prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
from typing import List
import numpy as np
from numba import njit, float64, int64
from scipy.integrate import quad
import VLEBinaryDiagrams
from EOSParametersBehavior.ParametersBehaviorInterface import (
BiBehavior,
DeltaiBehavior,
ThetaiBehavior,
EpsiloniBehavior,
)
from MixtureRules.MixtureRulesInterface import (
DeltaMixtureRuleBehavior,
EpsilonMixtureRuleBehavior,
MixtureRuleBehavior,
)
from Models.LiquidModel import UNIFAC, has_unifac_in_db
from Properties import DeltaProp, Props
from compounds import MixtureProp
from compounds import SubstanceProp
from constants import R_IG, DBL_EPSILON
from polyEqSolver import solve_cubic
from units import conv_unit
x_vec_for_plot = [
0,
0.01,
0.02,
0.03,
0.04,
0.06,
0.08,
0.1,
0.15,
0.2,
0.25,
0.3,
0.35,
0.4,
0.45,
0.50,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.85,
0.9,
0.92,
0.94,
0.96,
0.97,
0.98,
0.99,
1,
]
calc_options = {
"Bubble-point Pressure": "bubbleP",
"Dew-point Pressure": "dewP",
"Bubble-point Temperature": "bubbleT",
"Dew-point Temperature": "dewT",
"Flash": "flash",
}
class EOSMixture:
"""
Main class for modeling a system with multiple substances, using a cubic equation of state.
This is the main class of the software. It's responsable for calculating all properties,
and all the vapor-liquid equilibrium data. It uses a generalized cubic equation of state for all
its calculations.
"""
def __init__(self, _subs: List[SubstanceProp], _k):
self.substances = _subs
self.k = _k
self.eosname = ""
self.mixRuleBehavior = MixtureRuleBehavior()
self.thetaiBehavior = ThetaiBehavior()
self.biBehavior = BiBehavior()
# TODO remove deltai and epsiloni?
self.deltaiBehavior = DeltaiBehavior()
self.epsiloniBehavior = EpsiloniBehavior()
self.deltaMixBehavior = DeltaMixtureRuleBehavior()
self.epsilonMixBehavior = EpsilonMixtureRuleBehavior()
self.n = len(self.substances)
self.Vcs = np.zeros(self.n)
self.Pcs = np.zeros(self.n)
self.Tcs = np.zeros(self.n)
self.omegas = np.zeros(self.n)
self.subs_ids = self.getSubstancesIDs()
self.vle_method = "phi-phi"
self.has_UNIFAC = self.hasUNIFAC()
if self.has_UNIFAC:
self.unifac_model = UNIFAC(self.subs_ids)
for i in range(self.n):
self.Vcs[i] = self.substances[i].Vc
self.Tcs[i] = self.substances[i].Tc
self.Pcs[i] = self.substances[i].Pc
self.omegas[i] = self.substances[i].omega
def hasUNIFAC(self):
if len(self.subs_ids) < 2:
return False
return has_unifac_in_db(self.subs_ids)
def getZfromPT(self, P: float, T: float, y):
b = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
theta = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
delta = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilon = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
return _getZfromPT_helper(b, theta, delta, epsilon, T, P, R_IG)
def getPfromTV(self, T: float, V: float, y) -> float:
b = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
theta = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
delta = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilon = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
p = R_IG * T / (V - b) - theta / (V * (V + delta) + epsilon)
return p
def getPhi_i(self, i: int, y, P: float, T: float, Z: float):
bm = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
thetam = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
deltam = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilonm = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
# derivatives
diffthetam = self.mixRuleBehavior.diffThetam(
i, y, T, self.thetaiBehavior, self.substances, self.k
)
diffbm = self.mixRuleBehavior.diffBm(i, y, T, self.biBehavior, self.substances)
diffdeltam = self.deltaMixBehavior.diffDeltam(
i, y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
diffepsilonm = self.epsilonMixBehavior.diffEpsilonm(
i, y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
return _getPhi_i_helper(
P,
T,
Z,
R_IG,
bm,
thetam,
deltam,
epsilonm,
diffthetam,
diffbm,
diffdeltam,
diffepsilonm,
DBL_EPSILON,
)
def getFugacity(self, y, _P: float, _T: float, _V: float, _Z: float) -> float:
f = 0.0
for i in range(self.n):
f += y[i] * self.getPhi_i(i, y, _P, _T, _Z)
return f * _P
def getAllProps(
self, y, Tref: float, T: float, Pref: float, P: float
) -> (Props, Props):
log = ""
zs = self.getZfromPT(P, T, y)
zliq, zvap = np.min(zs), np.max(zs)
vliq, vvap = zliq * R_IG * T / P, zvap * R_IG * T / P
MixSubs = MixtureProp(self.substances, y)
avgMolWt = MixSubs.getMolWt()
if avgMolWt:
rholiq, rhovap = avgMolWt * 1e-3 / vliq, avgMolWt * 1e-3 / vvap
else:
rholiq, rhovap = 0, 0
if MixSubs.hasCp():
igprops = MixSubs.getIGProps(Tref, T, Pref, P)
log += MixSubs.getCpLog(Tref, T)
pliq, pvap = self.getCpHSGUA(y, Tref, T, Pref, P)
else:
igprops = 0
pliq, pvap = 0, 0
log += "Couldn't calculate properties: missing Cp paramaters"
fl, fv = (
self.getFugacity(y, P, T, vliq, zliq),
self.getFugacity(y, P, T, vvap, zvap),
)
retPropsliq, retPropsvap = Props(), Props()
retPropsliq.Z, retPropsvap.Z = zliq, zvap
retPropsliq.V, retPropsvap.V = vliq, vvap
retPropsliq.rho, retPropsvap.rho = rholiq, rhovap
retPropsliq.P, retPropsvap.P = P, P
retPropsliq.T, retPropsvap.T = T, T
retPropsliq.Fugacity, retPropsvap.Fugacity = fl, fv
retPropsliq.IGProps, retPropsvap.IGProps = igprops, igprops
retPropsliq.Props, retPropsvap.Props = pliq, pvap
retPropsliq.log, retPropsvap.log = log, log
return retPropsliq, retPropsvap
def getdZdT(self, P: float, T: float, y) -> [float, float]:
h = 1e-5
z_plus_h = self.getZfromPT(P, T + h, y)
z_minus_h = self.getZfromPT(P, T - h, y)
zs = (z_plus_h - z_minus_h) / (2.0 * h)
return np.min(zs), np.max(zs)
# TODO speed up this part with numba
def getDepartureProps(self, y, P, T, V, Z):
def _Zfunc(v, t):
bm = self.mixRuleBehavior.bm(y, t, self.biBehavior, self.substances)
thetam = self.mixRuleBehavior.thetam(
y, t, self.thetaiBehavior, self.substances, self.k
)
delta = self.deltaMixBehavior.deltam(
y, t, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilon = self.epsilonMixBehavior.epsilonm(
y, t, self.biBehavior, self.mixRuleBehavior, self.substances
)
return v / (v - bm) - (thetam / (R_IG * t)) * v / (
v ** 2 + v * delta + epsilon
)
def _dZdT(v, t):
h = 1e-5
return (_Zfunc(v, t + h) - _Zfunc(v, t - h)) / (2.0 * h)
def _URfunc(v, t):
return t * _dZdT(v, t) / v
def _ARfunc(v, t):
return (1.0 - _Zfunc(v, t)) / v
# calculate UR
# nhau = _URfunc(V, T)
UR_RT = quad(_URfunc, V, np.inf, args=(T,))[0]
UR = UR_RT * T * R_IG
# calculate AR
AR_RT = quad(_ARfunc, V, np.inf, args=(T,))[0] + np.log(Z)
AR = AR_RT * T * R_IG
# calculate HR
HR_RT = UR_RT + 1.0 - Z
HR = HR_RT * R_IG * T
# calculate SR
SR_R = UR_RT - AR_RT
SR = SR_R * R_IG
# calculate GR
GR_RT = AR_RT + 1 - Z
GR = GR_RT * R_IG * T
ret = DeltaProp(0, HR, SR, GR, UR, AR)
return ret
def getDeltaDepartureProps(
self,
y,
_Pref: float,
_Tref: float,
_Vref: float,
_Zref: float,
_P: float,
_T: float,
_V: float,
_Z: float,
) -> DeltaProp:
ref = self.getDepartureProps(y, _Pref, _Tref, _Vref, _Zref)
state = self.getDepartureProps(y, _P, _T, _V, _Z)
delta = state.subtract(ref)
return delta
def getCpHSGUA(self, y, Tref: float, T: float, Pref: float, P: float):
zs = self.getZfromPT(P, T, y)
zsref = self.getZfromPT(Pref, Tref, y)
zliq, zvap = np.min(zs), np.max(zs)
zliqref, zvapref = np.min(zsref), np.max(zsref)
vliq, vvap = zliq * R_IG * T / P, zvap * R_IG * T / P
vliqref, vvapref = zliqref * R_IG * Tref / Pref, zvapref * R_IG * Tref / Pref
MixSubs = MixtureProp(self.substances, y)
igprop = MixSubs.getIGProps(
Tref, T, Pref, P
) # make sure that mixture can handle single substances
ddp_liq = self.getDeltaDepartureProps(
y, Pref, Tref, vliqref, zliqref, P, T, vliq, zliq
)
ddp_vap = self.getDeltaDepartureProps(
y, Pref, Tref, vvapref, zvapref, P, T, vvap, zvap
)
pliq = igprop.subtract(ddp_liq)
pvap = igprop.subtract(ddp_vap)
return pliq, pvap
def _getPb_guess(self, x, T):
return _helper_getPb_guess(x, T, self.Pcs, self.Tcs, self.omegas)
def _getPd_guess(self, y, T):
return _helper_getPd_guess(y, T, self.Pcs, self.Tcs, self.omegas)
def getCapPhi_i(self, i: int, y, P: float, T: float) -> float:
zv = np.max(self.getZfromPT(P, T, y))
return self.getPhi_i(i, y, P, T, zv)
def getPSat_i(self, i: int, T: float) -> float:
has_antoine = self.substances[i].hasAntoine()
check_antoine_range = self.substances[i].checkAntoineRange(T)
if has_antoine and check_antoine_range:
P = self.substances[i].getPvpAntoine(T)
else:
P = self.substances[i].getPvpAW(T)
from EOSPureSubstanceInterface import EOSPureSubstanceInterface
system = EOSPureSubstanceInterface([self.substances[i]], self.eosname)
P, it = system.getPvp(T, P)
return P
def getTSat_i(self, i: int, P: float) -> float:
has_antoine = self.substances[i].hasAntoine()
if has_antoine:
t = self.substances[i].getAntoineTsat(P)
else:
t = 300.0 # check this!
return t
def getTsat(self, P: float):
tsat = np.asarray([self.getTSat_i(i, P) for i in range(self.n)])
return tsat
def getCapPhiSat_i(self, i: int, y, T: float) -> float:
P = self.getPSat_i(i, T)
zv = np.max(self.getZfromPT(P, T, y))
return self.getPhi_i(i, y, P, T, zv)
def getDefCapPhi_i(self, i: int, y, P: float, T: float) -> float:
return self.getCapPhi_i(i, y, P, T) / self.getCapPhiSat_i(i, y, T)
def get_y_eq_12_9(self, x, gamma, Psat, CapPhi, P):
return x * gamma * Psat / (CapPhi * P)
def get_P_eq_12_11(self, x, gamma, Psat, CapPhi):
return np.sum(x * gamma * Psat / CapPhi)
def getPhiVap(self, y, P, T):
phivap = np.zeros(self.n, dtype=np.float64)
zsvap = self.getZfromPT(P, T, y)
zvap = np.max(zsvap)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, P, T, zvap)
return phivap
def getCapPhi(self, y, P, T):
capphi = np.ones(self.n, dtype=np.float64)
for i in range(self.n):
capphi[i] = self.getCapPhi_i(i, y, P, T)
return capphi
def getBubblePointPressure(self, x, T: float, tol=1e3 * DBL_EPSILON, kmax=1000):
if self.vle_method == "phi-phi":
return self.getBubblePointPressure_phi_phi(x, T, tol=tol, kmax=kmax)
elif self.vle_method == "UNIFAC":
return self.getBubblePointPressure_UNIFAC(x, T, tol=tol, kmax=kmax)
else:
raise NotImplementedError("gamma-phi not implemented")
def getSubstancesIDs(self):
subs_ids = [s.getSubstanceID() for s in self.substances]
return subs_ids
def getPsat(self, T: float):
Psat = np.asarray([self.getPSat_i(i, T) for i in range(self.n)])
return Psat
def getBubblePointPressure_UNIFAC(self, x, T, tol=1e3 * DBL_EPSILON, kmax=100):
assert len(x) == self.n
assert np.sum(x) == 1.0
x = np.atleast_1d(x)
gamma = self.unifac_model.getGamma(x, T)
capphi = np.ones(self.n, dtype=np.float64)
PSat = self.getPsat(T)
pb = self.get_P_eq_12_11(x, gamma, PSat, capphi)
err = 100
ite = 0
while err > tol and ite < kmax:
ite += 1
y = self.get_y_eq_12_9(x, gamma, PSat, capphi, pb)
capphi = self.getCapPhi(y, pb, T)
pb_old = pb
pb = self.get_P_eq_12_11(x, gamma, PSat, capphi)
err = np.abs((pb - pb_old) / pb)
phivap = self.getPhiVap(y, pb, T)
k = self.get_k_gamma_phi(gamma, PSat, pb, capphi)
return y, pb, phivap, gamma, k, ite
def getBubblePointPressure_phi_phi(self, x, T, tol=1e3 * DBL_EPSILON, kmax=1000):
assert len(x) == self.n
assert np.sum(x) == 1.0
x = np.atleast_1d(x)
pb = self._getPb_guess(x, T)
k = np.exp(
np.log(self.Pcs / pb) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / T)
)
y = x * k / np.sum(x * k)
err = 100
ite = 0
phivap = np.empty(self.n, dtype=np.float64)
philiq = np.empty(self.n, dtype=np.float64)
while err > tol and ite < kmax:
ite += 1
zsvap = self.getZfromPT(pb, T, y)
zsliq = self.getZfromPT(pb, T, x)
zvap = np.max(zsvap)
zliq = np.min(zsliq)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, pb, T, zvap)
philiq[i] = self.getPhi_i(i, x, pb, T, zliq)
k = philiq / phivap
y = x * k
yt = np.sum(y)
pb = pb * yt
err = np.abs(1.0 - yt)
return y, pb, phivap, philiq, k, ite
####### DEW POINT ###########
def getDewPointPressure(self, y, T: float, tol=1e3 * DBL_EPSILON, kmax=1000):
if self.vle_method == "phi-phi":
return self.getDewPointPressure_phi_phi(y, T, tol=tol, kmax=kmax)
elif self.vle_method == "UNIFAC":
return self.getDewPointPressure_UNIFAC(y, T, tol=tol, kmax=kmax)
else:
raise NotImplementedError("gamma-phi not implemented")
def getDewPointPressure_phi_phi(self, y, T, tol=1e3 * DBL_EPSILON, kmax=1000):
assert len(y) == self.n
assert np.sum(y) == 1.0
y = np.atleast_1d(y)
pd = self._getPd_guess(y, T)
k = np.exp(
np.log(self.Pcs / pd) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / T)
)
x = y / k
x = x / np.sum(x)
err = 100
ite = 0
phivap = np.empty(self.n, dtype=np.float64)
philiq = np.empty(self.n, dtype=np.float64)
while err > tol and ite < kmax:
ite += 1
zsvap = self.getZfromPT(pd, T, y)
zsliq = self.getZfromPT(pd, T, x)
zvap = np.max(zsvap)
zliq = np.min(zsliq)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, pd, T, zvap)
philiq[i] = self.getPhi_i(i, x, pd, T, zliq)
k = philiq / phivap
x = y / k
xt = np.sum(x)
pd = pd / xt
err = np.abs(1.0 - xt)
x = x / xt
return x, pd, phivap, philiq, k, ite
def getP_eq_12_12(self, y, gamma, Psat, capphi):
return 1.0 / np.sum(y * capphi / (gamma * Psat))
def get_x_eq_12_10(self, y, gamma, Psat, capphi, p):
return y * capphi * p / (gamma * Psat)
def getDewPointPressure_UNIFAC(self, y, T: float, tol=1e3 * DBL_EPSILON, kmax=1000):
assert len(y) == self.n
assert np.sum(y) == 1.0
y = np.atleast_1d(y)
Psat = self.getPsat(T)
capphi = np.ones(self.n, dtype=np.float64)
gamma = np.ones(self.n, dtype=np.float64)
pd = self.getP_eq_12_12(y, gamma, Psat, capphi)
x = self.get_x_eq_12_10(y, gamma, Psat, capphi, pd)
x = x / np.sum(x)
gamma = self.unifac_model.getGamma(x, T)
pd = self.getP_eq_12_12(y, gamma, Psat, capphi)
capphi = self.getCapPhi(y, pd, T)
err = 100
ite = 0
while err > tol and ite < kmax:
ite += 1
capphi = self.getCapPhi(y, pd, T)
err2 = 100
ite2 = 0
while err2 > tol and ite2 < kmax:
ite2 += 1
x = self.get_x_eq_12_10(y, gamma, Psat, capphi, pd)
x = x / np.sum(x)
gamma_old = gamma
gamma = self.unifac_model.getGamma(x, T)
err2 = np.max(np.abs(gamma_old - gamma))
pd_old = pd
pd = self.getP_eq_12_12(y, gamma, Psat, capphi)
err = np.abs((pd - pd_old) / pd)
phivap = self.getPhiVap(y, pd, T)
k = self.get_k_gamma_phi(gamma, Psat, pd, capphi)
return x, pd, phivap, gamma, k, ite
def getBubblePointTemperature(self, x, P: float, tol=1e3 * DBL_EPSILON, kmax=100):
if self.vle_method == "phi-phi":
return self.getBubblePointTemperature_phi_phi(x, P, tol=tol, kmax=kmax)
elif self.vle_method == "UNIFAC":
return self.getBubblePointTemperature_UNIFAC(x, P, tol=tol, kmax=kmax)
else:
raise NotImplementedError("gamma-phi not implemented")
def get_k_gamma_phi(self, gamma, psat, P, capphi):
k = gamma * psat / (P * capphi)
return k
def getBubblePointTemperature_UNIFAC(self, x, P, tol=1e3 * DBL_EPSILON, kmax=100):
assert len(x) == self.n
x = np.atleast_1d(x)
assert np.sum(x) == 1.0
tsat = self.getTsat(P)
tb = np.float(np.sum(x * tsat))
capphi = np.ones(self.n, dtype=np.float64)
psat = self.getPsat(tb)
gamma = self.unifac_model.getGamma(x, tb)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
tb2 = tb
f2 = np.sum(x * k) - 1.0
tb1 = tb * 1.1
y = x * k / np.sum(x * k)
capphi = self.getCapPhi(y, P, tb1)
psat = self.getPsat(tb1)
gamma = self.unifac_model.getGamma(x, tb1)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
f1 = np.sum(x * k) - 1.0
y = x * k / np.sum(x * k)
err = 100
ite = 0
while err > tol and ite < kmax:
ite += 1
tb = tb1 - f1 * ((tb1 - tb2) / (f1 - f2))
capphi = self.getCapPhi(y, P, tb)
psat = self.getPsat(tb)
gamma = self.unifac_model.getGamma(x, tb)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
y = x * k
yt = np.sum(y)
err = np.abs(1.0 - yt)
y = y / yt
tb2 = tb1
tb1 = tb
f2 = f1
f1 = np.sum(k * x) - 1.0
phivap = self.getPhiVap(y, P, tb)
return y, tb, phivap, gamma, k, ite
# TODO optimize this! here, I used the secant method for Tb convergence.
def getBubblePointTemperature_phi_phi(self, x, P, tol=1e3 * DBL_EPSILON, kmax=100):
assert len(x) == self.n
x = np.atleast_1d(x)
assert np.sum(x) == 1.0
Tbi = np.empty(self.n)
for i in range(self.n):
if self.substances[i].Tb > 0:
Tbi[i] = self.substances[i].Tb
else:
Tbi[i] = 100.0
tb = _helper_bubble_T_guess_from_wilson(
x, P, np.sum(x * Tbi), self.Pcs, self.Tcs, self.omegas
)
k = np.exp(
np.log(self.Pcs / P) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / tb)
)
err = 100
ite = 0
tb2 = tb
f2 = np.sum(x * k) - 1.0
tb1 = tb * 1.1
k = np.exp(
np.log(self.Pcs / P) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / tb1)
)
f1 = np.sum(x * k) - 1.0
y = x * k / np.sum(x * k)
phivap = np.empty(self.n, dtype=np.float64)
philiq = np.empty(self.n, dtype=np.float64)
while err > tol and ite < kmax:
ite += 1
tb = tb1 - f1 * ((tb1 - tb2) / (f1 - f2))
zsvap = self.getZfromPT(P, tb, y)
zsliq = self.getZfromPT(P, tb, x)
zvap = np.max(zsvap)
zliq = np.min(zsliq)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, P, tb, zvap)
philiq[i] = self.getPhi_i(i, x, P, tb, zliq)
k = philiq / phivap
y = x * k
yt = np.sum(y)
err = np.abs(1.0 - yt)
y = y / yt
tb2 = tb1
tb1 = tb
f2 = f1
f1 = np.sum(k * x) - 1.0
return y, tb, phivap, philiq, k, ite
def getDewPointTemperature(self, y, P: float, tol=1e3 * DBL_EPSILON, kmax=100):
if self.vle_method == "phi-phi":
return self.getDewPointTemperature_phi_phi(y, P, tol=tol, kmax=kmax)
elif self.vle_method == "UNIFAC":
return self.getDewPointTemperature_UNIFAC(y, P, tol=tol, kmax=kmax)
else:
raise NotImplementedError("gamma-phi not implemented")
def getDewPointTemperature_UNIFAC(
self, y, P: float, tol: float = 1e4 * DBL_EPSILON, kmax: int = 1000
):
assert len(y) == self.n
y = np.atleast_1d(y)
assert np.sum(y) == 1.0
td = float(np.sum(y * self.getTsat(P)))
gamma = np.ones(self.n, dtype=np.float64)
capphi = self.getCapPhi(y, P, td)
psat = self.getPsat(td)
x = self.get_x_eq_12_10(y, gamma, psat, capphi, P)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
x = x / np.sum(x)
td2 = td
f2 = np.sum(y / k) - 1.0
td1 = td * 1.1
capphi = self.getCapPhi(y, P, td1)
psat = self.getPsat(td1)
gamma = self.unifac_model.getGamma(x, td1)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
f1 = np.sum(y / k) - 1.0
x = self.get_x_eq_12_10(y, gamma, psat, capphi, P)
x = x / np.sum(x)
err = 100
ite = 0
while err > tol and ite < kmax:
ite += 1
td = td1 - f1 * ((td1 - td2) / (f1 - f2))
capphi = self.getCapPhi(y, P, td)
psat = self.getPsat(td)
gamma = self.unifac_model.getGamma(x, td)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
x = self.get_x_eq_12_10(y, gamma, psat, capphi, P)
xt = np.sum(x)
err = np.abs(1.0 - xt)
x = x / xt
td2 = td1
td1 = td
f2 = f1
f1 = np.sum(y / k) - 1.0
phivap = self.getPhiVap(y, P, td)
return x, td, phivap, gamma, k, ite
def getDewPointTemperature_phi_phi(
self, y, P: float, tol: float = 1e4 * DBL_EPSILON, kmax: int = 1000
):
assert len(y) == self.n
y = np.atleast_1d(y)
assert np.sum(y) == 1.0
Tdi = np.empty(self.n)
for i in range(self.n):
if self.substances[i].Tb > 0:
Tdi[i] = self.substances[i].Tb
else:
Tdi[i] = 100.0
td = np.sum(y * Tdi)
k = np.exp(
np.log(self.Pcs / P) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / td)
)
td2 = td
f2 = np.sum(y / k) - 1.0
td1 = td * 1.1
k = np.exp(
np.log(self.Pcs / P) + 5.373 * (1 + self.omegas) * (1.0 - self.Tcs / td1)
)
f1 = np.sum(y / k) - 1.0
err = 100
ite = 0
# x = np.full(self.n, 1.0 / self.n)
x = (y / k) / np.sum(y / k)
phivap = np.empty(self.n, dtype=np.float64)
philiq = np.empty(self.n, dtype=np.float64)
while err > tol and ite < kmax:
ite += 1
td = td1 - f1 * ((td1 - td2) / (f1 - f2))
zsvap = self.getZfromPT(P, td, y)
zsliq = self.getZfromPT(P, td, x)
zvap = np.max(zsvap)
zliq = np.min(zsliq)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, P, td, zvap)
philiq[i] = self.getPhi_i(i, x, P, td, zliq)
k = philiq / phivap
x = y / k
xt = np.sum(x)
err = np.abs(1.0 - xt)
x = x / xt
td2 = td1
td1 = td
f2 = f1
f1 = np.sum(y / k) - 1.0
return x, td, phivap, philiq, k, ite
def getFlash(self, z, P: float, T: float, tol=1e5 * DBL_EPSILON, kmax=1000):
if self.vle_method == "phi-phi":
return self.getFlash_phi_phi(z, P, T, tol=tol, kmax=kmax)
elif self.vle_method == "UNIFAC":
return self.getFlash_UNIFAC(z, P, T, tol=tol, kmax=kmax)
else:
raise NotImplementedError("gamma-phi not implemented")
def getFlash_phi_phi(self, z, P: float, T: float, tol=1e5 * DBL_EPSILON, kmax=1000):
assert self.n == len(z)
z = np.atleast_1d(z)
assert np.sum(z) == 1.0
# check if is flash problem
y, pd, pv, pl, k, ite = self.getDewPointPressure(z, T)
x, pb, pv, pl, k, ite = self.getBubblePointPressure(z, T)
if not (pd <= P <= pb):
raise ValueError("P is not between Pdew and Pbubble")
v = (pb - P) / (pb - pd)
err = 100
ite = 0
phivap = np.empty(self.n, dtype=np.float64)
philiq = np.empty(self.n, dtype=np.float64)
y = np.full(self.n, 1.0 / self.n)
x = np.full(self.n, 1.0 / self.n)
while err > tol and ite < kmax:
ite += 1
zsvap = self.getZfromPT(P, T, y)
zsliq = self.getZfromPT(P, T, x)
zvap = np.max(zsvap)
zliq = np.min(zsliq)
for i in range(self.n):
phivap[i] = self.getPhi_i(i, y, P, T, zvap)
philiq[i] = self.getPhi_i(i, x, P, T, zliq)
k = philiq / phivap
vold = v
v = _RachfordRice(v, k, z, tol=1e-8, kmax=500)
x = z / (1.0 + v * (k - 1.0))
y = k * x
err = np.abs(v - vold)
return x, y, v, phivap, philiq, k, ite
def getFlash_UNIFAC(self, z, P: float, T: float, tol=1e5 * DBL_EPSILON, kmax=1000):
assert self.n == len(z)
z = np.atleast_1d(z)
assert np.sum(z) == 1.0
# check if is flash problem
y, pd, pv, pl, k, ite = self.getDewPointPressure(z, T)
x, pb, pv, pl, k, ite = self.getBubblePointPressure(z, T)
if not (pd <= P <= pb):
raise ValueError("P is not between Pdew and Pbubble")
v = (pb - P) / (pb - pd)
psat = self.getPsat(T)
y = np.full(self.n, 1.0 / self.n)
x = np.full(self.n, 1.0 / self.n)
err = 100
ite = 0
while err > tol and ite < kmax:
ite += 1
phivap = self.getPhiVap(y, P, T)
gamma = self.unifac_model.getGamma(x, T)
capphi = self.getCapPhi(y, P, T)
k = self.get_k_gamma_phi(gamma, psat, P, capphi)
vold = v
v = _RachfordRice(v, k, z, tol=1e-8, kmax=500)
x = z / (1.0 + v * (k - 1.0))
y = k * x
err = np.abs(v - vold)
return x, y, v, phivap, gamma, k, ite
def isobaricBinaryMixtureGenData(self, P, x=None, Punit="Pa", Tunit="K"):
assert self.n == 2
if x is None:
x = x_vec_for_plot
x = np.atleast_1d(x)
xmix = np.empty(2, dtype=np.float64)
y = np.empty(len(x), dtype=np.float64)
T = np.empty(len(x), dtype=np.float64)
kvec = np.empty(len(x), dtype=np.float64)
phi_vap_vec = np.empty(len(x), dtype=np.float64)
phi_liq_vec = np.empty(len(x), dtype=np.float64)
pv = np.empty(len(x), dtype=np.float64)
pl = np.empty(len(x), dtype=np.float64)
k = np.empty(len(x), dtype=np.float64)
for i in range(len(x)):
xmix[0] = x[i]
xmix[1] = 1.0 - x[i]
try:
yres, T[i], pv, pl, k, ite = self.getBubblePointTemperature(xmix, P)
except:
try:
yres = [0, 0]
yres[0] = y[i - 1]
T[i] = T[i - 1]
x[i] = x[i - 1]
pv[0] = phi_vap_vec[i - 1]
pl[0] = phi_liq_vec[i - 1]
k[0] = kvec[i - 1]
except:
yres = [0, 0]
yres[0] = y[i + 1]
T[i] = T[i + 1]
x[i] = x[i + 1]
pv[0] = phi_vap_vec[i + 1]
pl[0] = phi_liq_vec[i + 1]
k[0] = kvec[i + 1]
T[i] = conv_unit(T[i], "K", Tunit)
y[i] = yres[0]
phi_vap_vec[i] = pv[0]
phi_liq_vec[i] = pl[0]
kvec[i] = k[0]
return x, y, T, phi_vap_vec, phi_liq_vec, kvec
def isothermalBinaryMixtureGenData(self, T, x=None, Punit="Pa", Tunit="K"):
assert self.n == 2
if x is None:
x = x_vec_for_plot
x = np.atleast_1d(x)
xmix = np.empty(2, dtype=np.float64)
y = np.empty(len(x), dtype=np.float64)
P = np.empty(len(x), dtype=np.float64)
kvec = np.empty(len(x), dtype=np.float64)
phi_vap_vec = np.empty(len(x), dtype=np.float64)
phi_liq_vec = np.empty(len(x), dtype=np.float64)
phi_vap = np.empty(len(x), dtype=np.float64)
phi_liq = np.empty(len(x), dtype=np.float64)
kv = np.empty(len(x), dtype=np.float64)
for i in range(len(x)):
xmix[0] = x[i]
xmix[1] = 1.0 - x[i]
try:
yres, P[i], phi_vap, phi_liq, kv, ite = self.getBubblePointPressure(
xmix, T, tol=1e-5, kmax=100
)
except:
try:
yres = [0, 0]
yres[0] = y[i - 1]
P[i] = P[i - 1]
x[i] = x[i - 1]
phi_vap[0] = phi_vap_vec[i - 1]
phi_liq[0] = phi_liq_vec[i - 1]
kv[0] = kvec[i - 1]
except:
yres = [0, 0]
yres[0] = y[i + 1]
P[i] = P[i + 1]
x[i] = x[i + 1]
phi_vap[0] = phi_vap_vec[i + 1]
phi_liq[0] = phi_liq_vec[i + 1]
kv[0] = kv[i + 1]
P[i] = conv_unit(P[i], "Pa", Punit)
y[i] = yres[0]
phi_vap_vec[i] = phi_vap[0]
phi_liq_vec[i] = phi_liq[0]
kvec[i] = kv[0]
return x, y, P, phi_vap_vec, phi_liq_vec, kvec
def isobaricBinaryMixturePlot(
self, P, x=None, Punit="Pa", Tunit="K", expfilename="", plottype="both"
):
assert self.n == 2
if x is None:
x = x_vec_for_plot
x, y, T, phiv, phil, kvec = self.isobaricBinaryMixtureGenData(
P, x, Punit=Punit, Tunit=Tunit
)
if self.vle_method == "UNIFAC":
gamma_title = "UNIFAC + "
else:
gamma_title = ""
title = "{} (1) / {} (2) at {:0.3f} {}\n{}Equation of state: {}".format(
self.substances[0].Name,
self.substances[1].Name,
conv_unit(P, "Pa", Punit),
Punit,
gamma_title,
self.eosname,
)
vleplot = VLEBinaryDiagrams.VLEBinaryMixturePlot(
"isobaric", T, x, y, Tunit, title, plottype
)
if os.path.exists(expfilename):
vleplot.expPlot(expfilename)
vleplot.plot()
def setVLEmethod(self, method: str):
if not self.has_UNIFAC:
self.vle_method = "phi-phi"
return
self.vle_method = method
def isothermalBinaryMixturePlot(
self, T, x=None, Punit="Pa", Tunit="K", expfilename="", plottype="both"
):
assert self.n == 2
if x is None:
x = x_vec_for_plot
x, y, P, phiv, phil, kvec = self.isothermalBinaryMixtureGenData(
T, x, Punit=Punit, Tunit=Tunit
)
if self.vle_method == "UNIFAC":
gamma_title = "UNIFAC + "
else:
gamma_title = ""
title = "{} (1) / {} (2) at {:0.3f} {}\n{}Equation of state: {}".format(
self.substances[0].Name,
self.substances[1].Name,
conv_unit(T, "K", Tunit),
Tunit,
gamma_title,
self.eosname,
)
vleplot = VLEBinaryDiagrams.VLEBinaryMixturePlot(
"isothermal", P, x, y, Punit, title, plottype
)
if os.path.exists(expfilename):
vleplot.expPlot(expfilename)
vleplot.plot()
@njit(float64(float64, float64[:], float64[:], float64, int64), cache=True)
def _RachfordRice(v, k, z, tol, kmax):
v0 = v
v1 = 999.0
err = 1000.0
iter = 0
while err > tol or iter > kmax:
iter += 1
f = np.sum(z * (k - 1.0) / (1.0 + v0 * (k - 1.0)))
dfdv = -np.sum(z * (k - 1.0) ** 2 / (1.0 + v0 * (k - 1.0)) ** 2)
v1 = v0 - f / dfdv
err = np.abs(v0 - v1)
v0 = v1
return v1
@njit(float64(float64[:], float64, float64[:], float64[:], float64[:]), cache=True)
def _helper_getPb_guess(x, T, Pcs, Tcs, omegas):
x = np.atleast_1d(x)
return np.sum(x * Pcs * np.exp(5.373 * (1.0 + omegas) * (1.0 - Tcs / T)))
@njit(float64(float64[:], float64, float64[:], float64[:], float64[:]), cache=True)
def _helper_getPd_guess(y, T, Pcs, Tcs, omegas):
y = np.atleast_1d(y)
return 1.0 / np.sum(y / (Pcs * np.exp(5.373 * (1.0 + omegas) * (1.0 - Tcs / T))))
@njit(
float64(float64[:], float64, float64, float64[:], float64[:], float64[:]),
cache=True,
)
def _helper_f_for_temperature_bubble_point_guess(x, P, T, Pcs, Tcs, omegas):
return -P + np.sum(Pcs * x * np.exp(5.373 * (1.0 + omegas) * (1.0 - Tcs / T)))
@njit(
float64(float64[:], float64, float64, float64[:], float64[:], float64[:]),
cache=True,
)
def _helper_diff_f_for_temperature_bubble_point_guess(x, P, T, Pcs, Tcs, omegas):
h = 1e-3
f1 = _helper_f_for_temperature_bubble_point_guess(x, P, T + h, Pcs, Tcs, omegas)
f2 = _helper_f_for_temperature_bubble_point_guess(x, P, T - h, Pcs, Tcs, omegas)
return (f1 - f2) / (2.0 * h)
@njit(
float64(float64[:], float64, float64, float64[:], float64[:], float64[:]),
cache=True,
)
def _helper_bubble_T_guess_from_wilson(x, P, T, Pcs, Tcs, omegas):
tol = 1e-8
kmax = 1000
k = 0
err = 999
while k < kmax and err < tol:
k += 1
told = T - _helper_f_for_temperature_bubble_point_guess(
x, P, T, Pcs, Tcs, omegas
) / _helper_diff_f_for_temperature_bubble_point_guess(x, P, T, Pcs, Tcs, omegas)
err = np.abs(T - told)
T = told
return T
from numba import njit, float64, jit
@jit((float64, float64, float64, float64, float64, float64, float64), cache=True)
def _getZfromPT_helper(
b: float,
theta: float,
delta: float,
epsilon: float,
T: float,
P: float,
R_IG: float,
):
Bl = b * P / (R_IG * T)
deltal = delta * P / (R_IG * T)
epsilonl = epsilon * np.power(P / (R_IG * T), 2)
thetal = theta * P / np.power(R_IG * T, 2)
_b = deltal - Bl - 1.0
_c = thetal + epsilonl - deltal * (1.0 + Bl)
_d = -(epsilonl * (Bl + 1.0) + Bl * thetal)
roots = np.array(solve_cubic(1.0, _b, _c, _d))
real_values = roots[roots >= 0]
return real_values
@njit(
(
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
float64,
),
cache=True,
)
def _getPhi_i_helper(
P: float,
T: float,
Z: float,
R_IG: float,
bm: float,
thetam: float,
deltam: float,
epsilonm: float,
diffthetam: float,
diffbm: float,
diffdeltam: float,
diffepsilonm: float,
DBL_EPSILON: float,
) -> float:
RT = R_IG * T
V = RT * Z / P
deltam2_minus_4epislonm = deltam * deltam - 4.0 * epsilonm
deltaN = deltam * diffdeltam * 2.0 - 4.0 * diffepsilonm
if abs(deltam2_minus_4epislonm) < 100 * DBL_EPSILON:
substitute_term = -1.0 / (V + deltam / 2.0)
first_term = substitute_term * diffthetam / RT
last_term = diffbm / (V - bm) - | np.log((V - bm) / V) | numpy.log |
import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
from shutil import copyfile
import fortranformat as ff
from itertools import zip_longest
from scipy.signal import argrelextrema, argrelmin
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from ast import literal_eval as make_tuple
import pyshtools
from scipy.io import loadmat
from pathlib import Path
from scipy.special import lpmn
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import copy
import cartopy.feature as cfeature
"""
author: <NAME>
contact: <EMAIL>
description: A scipt containing tools to post-process the orbits, obtained from a forward simulation (and also recovery), from epos-oc.
"""
def create_element_lines(ffp, splitstring):
#get titles
with open(ffp) as f:
LINES = f.readlines()
starts = []
for i,line in enumerate(LINES):
if line.startswith(splitstring):
starts.append(i)
ends=[]
for i in range(len(starts)):
ends.append(starts[i]+16)
blocks = list(zip(starts,ends))
format_float = ff.FortranRecordWriter('(E19.13)')
for block in blocks:
with open(ffp) as fp:
for i, line in enumerate(fp):
if i in range(block[0],block[1]):
if i==block[0]:
outfile = open('%s_ELEMENTSnew.txt' %line.strip(),'w')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-C\n')
if i>block[0]+1:
if line.startswith('Sat'):
outfile.write(' --- End initial elements GRACE-C\n')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-D\n')
if line.startswith('ELEMENT'):
val = line.strip().split()
val[5] = str(format_float.write([np.float(val[5])])).replace('E','e')
val[6] = str(format_float.write([np.float(val[6])])).replace('E', 'e')
if val[7] == '0201201': val[7] = '1804701'
if val[7] == '0201202': val[7] = '1804702'
str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') \
% (val[0], val[1], int(val[2]), int(val[3]),
int(val[4]), val[5], val[6], val[7])
outfile.write('%s\n' %str_new2)
if i==block[1]-1:
outfile.write(' --- End initial elements GRACE-D')
break
#
#
# def create_element_lines(ffp, splitstring):
# #input: Unformatted file that contains orbit elements needed to start each of the runs for GRACE-FO simulation
# #output: Orbit elements that can be used as input for prepare_EPOSIN_4_orbit_integration.sh (located at
# #/GFZ/project/f_grace/NGGM_SIM/SIM_FORWARD )
# with open(ffp) as f:
# lines = f.read().splitlines()
# splits = [i for i in lines if i.startswith(splitstring)]
# print(splits)
# n = 2 # group size
# m = 1 # overlap size
# splits_grouped = [splits[i:i + n] for i in range(0, len(splits), n - m)]
# print(splits_grouped)
#
#
# # # print(lines)
# # split = [i for i in lines if i.startswith('PP')]
# for i in splits_grouped:
# if len(i) > 1:
# start = i[0]
# end = i[1]
# out = '%s_ELEMENT_lines.txt' % (start.strip())
# with open(ffp) as infile, open(out, 'w') as outfile:
# copy = False
# titlewritten0 = False
# titlewritten1 = False
# firsttime6 = False
# linesread = 0
# outfile.write("\n")
#
# for line in infile:
# if line.strip() == start.strip():
# copy = True
# continue
# elif line.strip() == end.strip():
# copy = False
# continue
# elif copy:
# linesread += 1
#
# if not titlewritten0:
# outfile.write(' --- Begin initial elements GRACE-C\n')
# titlewritten0 = True
# if line.startswith(
# 'ELEMENT') and titlewritten0: # if line starts with ELEMENT and the first title has been written
# val = list(filter(None, line.strip().split(' ')))[0:-3]
# format_float = ff.FortranRecordWriter('(E19.13)')
# val5 = str(format_float.write([np.float(val[5])]))
# val6 = str(format_float.write([np.float(val[6])]))
#
# val5 = val5.replace('E', 'e')
# val6 = val6.replace('E', 'e')
#
#
# if val[7] == '0201201': val[7] = '1804701'
# if val[7] == '0201202': val[7] = '1804702'
# str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') % (val[0], val[1], int(val[2]), int(val[3]), int(val[4]), val5, val6, val[7])
#
#
# # outfile.write("\n")
# if int(val[2]) < 6:
# outfile.write(str_new2)
# outfile.write("\n")
#
# if int(val[
# 2]) == 6 and not titlewritten1: # if element six has been reached and no 'end1' has been written yet:
# if not firsttime6:
# titlewritten1 = True
# # titlewritten2 = True
# outfile.write(str_new2)
# outfile.write("\n")
# outfile.write(' --- End initial elements GRACE-C\n\n')
# outfile.write(' --- Begin initial elements GRACE-D\n')
#
# if int(val[2]) == 6:
# print(linesread)
# if linesread > 7:
# outfile.write(str_new2)
# outfile.write("\n")
#
# outfile.write(' --- End initial elements GRACE-D')
# outfile.write("\n")
# outfile.write('\n')
# outfile.close()
# infile.close()
def files(path):
#input: path to a directory
#output: files within the directory (omitting nested directories)
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def create_case_directories(fp, fp_out):
#function to prepare the case directories for each of the simulations specified for the GRACE-FO project.
#It will
element_files = []
# current_dir = os.path.dirname(__file__)
for file in files(fp):
element_files.append(file)
IDs = ['PP.1', 'PP.2']
altitudes = [490, 490]
extens = [0, 0]
angles = [89, 89]
seperations = [200, 100]
repeats = [30, 30]
simdirs = ['FD', 'FD']
df = pd.DataFrame(columns=['id', 'altitude', 'extens', 'seperation', 'repeatvals', 'sim_direction'])
df['id'] = IDs
df['altitude'] = altitudes
df['angles'] = angles
df['extens'] = extens
df['seperation'] = seperations
df['repeatvals'] = repeats
df['sim_direction'] = simdirs
df.set_index('id', inplace=True)
for idx in df.index:
dirname = '%s_%i_%i_%i_%i_%id_%s' % (idx, df.loc[idx].altitude,
df.loc[idx].angles,
df.loc[idx].z,
df.loc[idx].seperation,
df.loc[idx].repeatvals,
df.loc[idx].sim_direction
)
if not os.path.exists(dirname):
os.mkdir(dirname)
ef = [f for f in element_files if f.startswith(idx)][0]
dst = os.path.abspath(fp, dirname, 'ELEMENT_lines')
src = os.path.abspath(os.path.join(os.path.dirname(__file__), ef))
copyfile(src, dst)
def serial_date_to_string(srl_no):
new_date = datetime.datetime(2000,1,1) + datetime.timedelta(srl_no+1)
return new_date.strftime("%Y-%m-%d")
def cart_2_kep_matrix(R, V, mu, Re):
# step1
h_bar = np.cross(R, V)
h = np.linalg.norm(h_bar, axis=1)
# step2
r = np.linalg.norm(R, axis=1)
v = np.linalg.norm(V, axis=1)
# step3
E = 0.5 * (v ** 2) - mu / r
# step4
a = -mu / (2 * E)
return (a-Re)/1000.0
def cart_2_kep(r_vec, v_vec, t, mu, Re):
# step1
h_bar = np.cross(r_vec, v_vec)
h = np.linalg.norm(h_bar)
# step2
r = np.linalg.norm(r_vec)
v = np.linalg.norm(v_vec)
# step3
E = 0.5 * (v ** 2) - mu / r
# step4
a = -mu / (2 * E)
# step5
e = np.sqrt(1 - (h ** 2) / (a * mu))
# step6
i = np.arccos(h_bar[2] / h)
# step7
omega_LAN = np.arctan2(h_bar[0], -h_bar[1])
# step8
# beware of division by zero here
lat = np.arctan2(np.divide(r_vec[2], (np.sin(i))), \
(r_vec[0] * np.cos(omega_LAN) + r_vec[1] * np.sin(omega_LAN)))
# step9
p = a * (1 - e ** 2)
nu = np.arctan2(np.sqrt(p / mu) * np.dot(r_vec, v_vec), p - r)
# step10
omega_AP = lat - nu
# step11
EA = 2 * np.arctan(np.sqrt((1 - e) / (1 + e)) * np.tan(nu / 2))
# step12
n = np.sqrt(mu / (a ** 3))
T = t - (1 / n) * (EA - e * np.sin(EA))
return a, e, i, omega_AP, omega_LAN, T, EA
def orbit_altitude(satfile):
mu = G.value * M_earth.value
Re = R_earth.value
with open(satfile) as infile:
"""read all lines from CIS files"""
lines = infile.readlines()
"""set the start and end characters for splitting the lines into X,Y,Z, U,V,W coordinates"""
start0, start1, start2, start3, start4, start5, end = 23, 41, 59, 77, 95, 113, 131
X = np.array([np.float(i[start0:start1]) for i in lines])
Y = np.array([np.float(i[start1:start2]) for i in lines])
Z = np.array([np.float(i[start2:start3]) for i in lines])
X = X.reshape(X.shape[0], 1)
Y = Y.reshape(Y.shape[0], 1)
Z = Z.reshape(Z.shape[0], 1)
R = np.concatenate((X, Y, Z), axis=1)
U = np.array([np.float(i[start3:start4]) for i in lines])
V = np.array([np.float(i[start4:start5]) for i in lines])
W = np.array([np.float(i[start5:end]) for i in lines])
U = U.reshape(U.shape[0], 1)
V = V.reshape(V.shape[0], 1)
W = W.reshape(W.shape[0], 1)
V = np.concatenate((U, V, W), axis=1)
"""calculate orbit altitude and convert to km"""
ALTITUDE_sec = cart_2_kep_matrix(R, V, mu, Re)
"""read the days and seconds """
daysStart, daysEnd, secondsStart, secondsEnd = 4, 11, 11, 23
seconds = np.array([np.float(i[secondsStart:secondsEnd]) for i in lines])
days = np.array([np.float(i[daysStart:daysEnd]) for i in lines])
"""calculate the decimal format for the days and subtract 51.184 to convert to correct time format"""
decimalDays = days + (seconds-51.184)/(24.0*60.*60.)
"""convert decimal days to a date"""
YMDHMS = [datetime.datetime(2000, 1, 1, 12) + datetime.timedelta(day) for day in decimalDays]
"""create an empty Pandas dataframe, called df"""
df = pd.DataFrame()
"""add the dates, decimal days and separation to the dataframe"""
df['date'] = pd.to_datetime(YMDHMS)
df['decimalDays'] = decimalDays
df['altitude_raw'] = ALTITUDE_sec
"""set the index of the dataframe to the date values"""
df.set_index('date', inplace=True)
df.drop(df.tail(1).index, inplace=True)
"""calculate the daily average"""
# df_daily = df.resample('D').mean()
df_daily = df.resample('24H', base=0, closed='left').mean()
print(df_daily.tail(10))
# print(df_daily.tail(1000))
# df_daily = df.resample('D').mean(min_count=200)
# print(df_daily.tail(10))
"""calculate extremas"""
m = argrelextrema(df_daily['altitude_raw'].values, np.greater)[0]
n = argrelextrema(df_daily['altitude_raw'].values, np.less)[0]
rawsignal = df_daily['altitude_raw'].values
extrema = np.empty(rawsignal.shape)
extrema[:] = np.nan
extrema1 = np.empty(rawsignal.shape)
extrema1[:] = np.nan
df_daily['extrema'] = extrema
df_daily['extrema1'] = extrema1
for ind in m:
df_daily['extrema'].iloc[ind] = rawsignal[ind]
for ind in n:
df_daily['extrema1'].iloc[ind] = rawsignal[ind]
"""interpolate extremas where they do not exist"""
df_daily.interpolate(inplace=True)
"""calculate the average of the two extrema columns"""
df_daily['altitude'] = df_daily[['extrema', 'extrema1']].mean(axis=1)
df_daily.to_csv('%s.csv' %satfile)
df_daily['altitude'].plot(figsize=(10,4))
# df_daily.drop(df_daily.tail(1).index, inplace=True)
"""set the x,y labels and x, y ticks to a readable fontsize and make layout tight"""
plt.xlabel('Date', fontsize=18)
plt.ylabel('Altitude [km]', fontsize=18)
plt.xticks(fontsize=12, rotation=70)
plt.yticks(fontsize=12)
plt.legend(fontsize=14)
plt.tight_layout()
# plt.show()
"""save plot to file"""
plt.savefig('altitude_%s.png' % (satfile))
plt.figure()
ax = df_daily['altitude_raw'].plot(marker='.', figsize=(10, 3))
"""set the x,y labels and x, y ticks to a readable fontsize and make layout tight"""
plt.xlabel('Date', fontsize=18)
plt.ylabel('Altitude [km]', fontsize=18)
plt.xticks(fontsize=12, rotation=70)
plt.yticks(fontsize=12)
plt.legend(fontsize=14)
ax.legend(['altitude']) # replacing legend entry 'altitude_raw' with 'altitude'
plt.tight_layout()
plt.savefig('altitude_raw%s.png' % (satfile))
# plt.show()
def sat_separation(satA, satB):
with open(satA) as infileA, open(satB) as infileB:
"""read all lines from CIS files"""
linesA = infileA.readlines()
linesB = infileB.readlines()
"""set the start and end characters for splitting the lines into X,Y,Z coordinates"""
start0, start1, start2, end = 23, 41, 59, 77
Xa = np.array([np.float(i[start0:start1]) for i in linesA])
Ya = np.array([np.float(i[start1:start2]) for i in linesA])
Za = np.array([np.float(i[start2:end]) for i in linesA])
Xb = np.array([np.float(i[start0:start1]) for i in linesB])
Yb = np.array([np.float(i[start1:start2]) for i in linesB])
Zb = np.array([np.float(i[start2:end]) for i in linesB])
"""calculate distance between satellites and convert to km"""
SEP_AB = np.sqrt((Xa - Xb)**2. + (Ya - Yb)**2. + (Za - Zb)**2.)/1000.
"""read the days and seconds """
daysStart, daysEnd, secondsStart, secondsEnd = 4, 11, 11, 23
seconds = np.array([np.float(i[secondsStart:secondsEnd]) for i in linesA])
days = np.array([np.float(i[daysStart:daysEnd]) for i in linesA])
"""calculate the decimal format for the days and subtract 51.184 to convert to correct time format"""
decimalDays = days + (seconds-51.184)/(24.0*60.*60.)
"""convert decimal days to a date"""
YMDHMS = [datetime.datetime(2000, 1, 1) + datetime.timedelta(day+1) for day in decimalDays]
"""create an empty Pandas dataframe, called df"""
df = pd.DataFrame()
"""add the dates, decimal days and separation to the dataframe"""
df['date'] = pd.to_datetime(YMDHMS)
df['decimalDays'] = decimalDays
df['separation'] = SEP_AB
"""set the index of the dataframe to the date values"""
df.set_index('date', inplace=True)
"""calculate the daily average"""
df_daily = df.resample('D').mean()
"""save the dataframe to file"""
df_daily.to_csv('separation_AB.csv')
"""For plotting purposes, drop the decimalDays from the dataframe"""
df_daily.drop(['decimalDays'], axis=1, inplace=True)
"""plot the dataframe"""
df_daily.plot(figsize=(10,4))
"""set the x,y labels and x, y ticks to a readable fontsize and make layout tight"""
plt.xlabel('Date', fontsize=18)
plt.ylabel('Separation [km]', fontsize=18)
plt.xticks(fontsize=12, rotation=70)
plt.yticks(fontsize=12)
plt.legend(fontsize=14)
plt.tight_layout()
"""save plot to file"""
plt.savefig('separation_%s_%s.png' %(satA, satB))
def kep_2_cart(a, e, i, omega_AP, omega_LAN, T, EA):
# step1
n = np.sqrt(mu / (a ** 3))
M = n * (t - T)
# step2
MA = EA - e * np.sin(EA)
# step3
#
nu = 2 * np.arctan(np.sqrt((1 + e) / (1 - e)) * np.tan(EA / 2))
# step4
r = a * (1 - e * np.cos(EA))
# step5
h = np.sqrt(mu * a * (1 - e ** 2))
# step6
Om = omega_LAN
w = omega_AP
X = r * (np.cos(Om) * np.cos(w + nu) - np.sin(Om) * np.sin(w + nu) * np.cos(i))
Y = r * ( | np.sin(Om) | numpy.sin |
"""Base station positing calibration.
Calibrating position and rotation for both base stations based on angle-data
fom 4 point measrunments.
"""
# Importing dependencies
import math
import numpy as np
# General Support Methods
def norm(x):
"""Wrapp for numpy vector norm."""
return np.linalg.norm(x)
def normVec(v):
"""Compute normal vector."""
return v/np.linalg.norm(v)
def dot(x, y):
"""Wrapp for numpy dot product."""
return np.dot(x, y)
# Rotation matrices
def rotate_x(ang):
"""Return rotation matrix for given Euler rotation around x-axis."""
return np.array([[1, 0, 0],
[0, np.cos(ang), -np.sin(ang)],
[0, np.sin(ang), np.cos(ang)]])
def rotate_y(ang):
"""Return rotation matrix for given Euler rotation around y-axis."""
return np.array([[np.cos(ang), 0, np.sin(ang)],
[0, 1, 0],
[-np.sin(ang), 0, np.cos(ang)]])
def rotate_z(ang):
"""Return rotation matrix for given Euler rotation around z-axis."""
return np.array([[np.cos(ang), -np.sin(ang), 0],
[np.sin(ang), np.cos(ang), 0],
[0, 0, 1]])
def rotate_zyx(x, y, z):
"""Construct compound rotation matrix for given x, y, z rotations.
Order of operation: z,y,x //TODO: Verify!
"""
return np.matmul(rotate_x(x), np.matmul(rotate_y(y), rotate_z(z)))
def transform_vector_to_pose(vec, pose):
"""Rotate vector from i',j',k' coordinate system of base station to
global i,j,k coordinates."""
return np.matmul(rotate_zyx(pose[3], pose[4], pose[5]), vec)
def rotate_zyx_to_angles(R):
"""Extract Euler rotation angles about i,j,k from rotation matrix."""
x = -math.atan2(R[1, 2], R[2, 2])
y = math.asin(R[0, 2])
z = -math.atan2(R[0, 1], R[0, 0])
return np.array([x, y, z])
# Solving for Roots
class ApplicationFunction(object):
"""Generate equations for lines and solves for multiple roots."""
# Parameters for rootfinding
accuracy = 0.000000001
delta = 0.001
maxStep = 0.02
def __init__(self, b1, b2, b3):
"""Initialize function."""
self.b1 = b1
self.b2 = b2
self.b3 = b3
self.t1 = []
self.t2 = []
self.t3 = []
self.error = False
self.findRoots()
def tn(self, t1, bx, m=False):
"""Compute values for t2 and t3 given t1 and m.
Parameters
- t1: value of t1
- bx: b2 for computing t2 and b3 for computing t3
- m: minus, choice of root for second order polynomial
"""
a = norm(self.b1)**2
b = -2*t1*dot(self.b1, bx)
c = t1**2 * norm(self.b1) - 1
d = b**2-4*a*c # Discriminant
if d < 0:
self.error = True
return 0
self.error = False
return (-b + np.sqrt(d))/(2*a) if m else (-b - np.sqrt(d))/(2*a)
def t2f(self, t1, p):
"""Wrapp for comuting t2."""
return self.tn(t1, self.b2, p)
def t3f(self, t1, p):
"""Wrapp for computing t3."""
return self.tn(t1, self.b3, p)
def eq(self, t1, m2, m3):
"""Compute all three roots."""
t2 = self.t2f(t1, m2)
er2 = self.error
t3 = self.t3f(t1, m3)
er3 = self.error
if er2 or er3:
self.error = True
return 0
self.error = False
return t2*t3*dot(self.b2, self.b3) - t2*t1*dot(self.b2, self.b1) - \
t3*t1*dot(self.b3, self.b1) + (t1*norm(self.b1))**2
# Generate all equation combinations
def eq1(self, t1):
"""Wrapp for different root combinations."""
return self.eq(t1, True, True)
def eq2(self, t1):
"""Wrapp for different root combinations."""
return self.eq(t1, True, False)
def eq3(self, t1):
"""Wrapp for different root combinations."""
return self.eq(t1, False, True)
def eq4(self, t1):
"""Wrapp for different root combinations."""
return self.eq(t1, False, False)
def newtonsMethod(self, x, equation):
"""
Recursive method for determining root of equation.
Parameters
- x: evaluate function at this x-value
- equation: function to be evaluated
"""
val = equation(x)
err = self.error
dVal = equation(x-self.delta)
dErr = self.error
if err or dErr:
return 0
a = (val-dVal)/self.delta
b = val - a*x
intersept = -b/a
movement = intersept - x
xNew = intersept if movement < self.maxStep else x + self.maxStep * movement/abs(movement)
return x if abs(val) < self.accuracy else self.newtonsMethod(xNew, equation)
def findRoots(self):
"""Find both roots of function.
Side-effects: stores t1Value and equaiton index
"""
# Determine zero of function using Newtons method:
x0 = 2
t1Va = self.newtonsMethod(x0, self.eq1)
t1Vb = self.newtonsMethod(x0, self.eq2)
t1Vc = self.newtonsMethod(x0, self.eq3)
t1Vd = self.newtonsMethod(x0, self.eq4)
# print "t1Vs = ", t1Va, t1Vb, t1Vc, t1Vd
if t1Va != 0:
self.t1.append(t1Va)
self.t2.append(self.t2f(t1Va, True))
self.t3.append(self.t3f(t1Va, True))
if t1Vb != 0:
self.t1.append(t1Vb)
self.t2.append(self.t2f(t1Vb, True))
self.t3.append(self.t3f(t1Vb, False))
if t1Vc != 0:
self.t1.append(t1Vc)
self.t2.append(self.t2f(t1Vc, False))
self.t3.append(self.t3f(t1Vc, True))
if t1Vd != 0:
self.t1.append(t1Vd)
self.t2.append(self.t2f(t1Vd, False))
self.t3.append(self.t3f(t1Vd, False))
# Application Main
def measuredAnglesToVector(hAngle, vAngle):
"""Compute the vector along the line of intersection of the two planes.
Angles 0,0 will correspond to a vector (0,0,1) -> along z axies
"""
y = np.sin(hAngle)
x = np.sin(vAngle)
z = (1-np.sin(hAngle)**2- | np.sin(vAngle) | numpy.sin |
import numpy as np
from .functional import *
from .layers import Function
class Sigmoid(Function):
def forward(self, X):
return sigmoid(X)
def backward(self, dY):
return dY * self.grad["X"]
def local_grad(self, X):
grads = {"X": sigmoid_prime(X)}
return grads
class ReLU(Function):
def forward(self, X):
return relu(X)
def backward(self, dY):
return dY * self.grad["X"]
def local_grad(self, X):
grads = {"X": relu_prime(X)}
return grads
class LeakyReLU(Function):
def forward(self, X):
return leaky_relu(X)
def backward(self, dY):
return dY * self.grad["X"]
def local_grad(self, X):
grads = {"X": leaky_relu_prime(X)}
return grads
class Softmax(Function):
def forward(self, X):
exp_x = np.exp(X)
probs = exp_x / | np.sum(exp_x, axis=1, keepdims=True) | numpy.sum |
import os
import zipfile
import argparse
import numpy as np
import _pickle as cp
import urllib.request
from io import BytesIO
from pandas import Series
NORM_MAX_THRESHOLDS = [
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
250, 25, 200, 5000, 5000, 5000, 5000, 5000, 5000,
10000, 10000, 10000, 10000, 10000, 10000, 250, 250, 25,
200, 5000, 5000, 5000, 5000, 5000, 5000, 10000, 10000,
10000, 10000, 10000, 10000, 250
]
NORM_MIN_THRESHOLDS = [
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-250, -100, -200, -5000, -5000, -5000, -5000, -5000, -5000,
-10000, -10000, -10000, -10000, -10000, -10000, -250, -250, -100,
-200, -5000, -5000, -5000, -5000, -5000, -5000, -10000, -10000,
-10000, -10000, -10000, -10000, -250
]
IMU_MAX_THRESHOLDS = [
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500
]
IMU_MIN_THRESHOLDS = [
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000
]
def select_columns_opp(data,mode='fullsensor'):
if mode == 'fullsensor':
# included-excluded
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, | np.arange(59, 63) | numpy.arange |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cellPoseUI.ui'
import numpy as np
import sys, os, pathlib, warnings, datetime, tempfile, glob, time, threading
from natsort import natsorted
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import pyqtgraph as pg
import cv2
from scellseg.guis import guiparts, iopart, menus, plot
from scellseg import models, utils, transforms, dynamics, dataset, io
from scellseg.dataset import DatasetShot, DatasetQuery
from scellseg.contrast_learning.dataset import DatasetPairEval
from skimage.measure import regionprops
from tqdm import trange
from math import floor, ceil
from torch.utils.data import DataLoader
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
class Ui_MainWindow(QtGui.QMainWindow):
"""UI Widget Initialize and UI Layout Initialize,
With any bug or problem, please do connact us from Github Issue"""
def __init__(self, image=None):
super(Ui_MainWindow, self).__init__()
if image is not None:
self.filename = image
iopart._load_image(self, self.filename)
self.now_pyfile_path = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/')
def setupUi(self, MainWindow, image=None):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1420, 800)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.now_pyfile_path + "/assets/logo.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
menus.mainmenu(self)
menus.editmenu(self)
menus.helpmenu(self)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter2 = QtWidgets.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Horizontal)
self.splitter2.setObjectName("splitter2")
self.scrollArea = QtWidgets.QScrollArea(self.splitter)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
# self.scrollAreaWidgetContents.setFixedWidth(500)
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1500, 848))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
# self.TableModel = QtGui.QStandardItemModel(self.tableRow, self.tableCol)
# self.TableModel.setHorizontalHeaderLabels(["INDEX", "NAME"])
# self.TableView = QtGui.QTableView()
# self.TableView.setModel(self.TableModel)
self.mainLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.mainLayout.setSpacing(0)
self.mainLayout.setObjectName("mainLayout")
self.previous_button = QtWidgets.QPushButton("previous image [Ctrl + ←]")
self.load_folder = QtWidgets.QPushButton("load image folder ")
self.next_button = QtWidgets.QPushButton("next image [Ctrl + →]")
self.previous_button.setShortcut(Qt.QKeySequence.MoveToPreviousWord)
self.next_button.setShortcut(Qt.QKeySequence.MoveToNextWord)
self.mainLayout.addWidget(self.previous_button, 1, 1, 1, 1)
self.mainLayout.addWidget(self.load_folder, 1, 2, 1, 1)
self.mainLayout.addWidget(self.next_button, 1, 3, 1, 1)
self.previous_button.clicked.connect(self.PreImBntClicked)
self.next_button.clicked.connect(self.NextImBntClicked)
self.load_folder.clicked.connect(self.OpenDirBntClicked)
# leftside cell list widget
self.listView = QtWidgets.QTableView()
self.myCellList = []
self.listmodel = Qt.QStandardItemModel(0,1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
# self.listmodel.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem())
self.listView.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignLeft)
# self.listView.horizontalHeader().setStyle("background-color: #F0F0F0")
# self.listView.horizontalHeader().setVisible(False)
self.listView.verticalHeader().setVisible(False)
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.horizontalHeader().setDefaultSectionSize(140)
self.listView.setMaximumWidth(120)
self.listView.setModel(self.listmodel)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.AdjustToContents
self.listView.customContextMenuRequested.connect(self.show_menu)
# self.listView.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listView.clicked.connect(self.showChoosen)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.toolBox = QtWidgets.QToolBox(self.splitter)
self.toolBox.setObjectName("toolBox")
self.toolBox.setMaximumWidth(340)
self.page = QtWidgets.QWidget()
self.page.setFixedWidth(340)
self.page.setObjectName("page")
self.gridLayout = QtWidgets.QGridLayout(self.page)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
# cross-hair/Draw area
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
self.layer_off = False
self.masksOn = True
self.win = pg.GraphicsLayoutWidget()
self.state_label = pg.LabelItem("Scellseg has been initialized!")
self.win.addItem(self.state_label, 3, 0)
self.win.scene().sigMouseClicked.connect(self.plot_clicked)
self.win.scene().sigMouseMoved.connect(self.mouse_moved)
self.make_viewbox()
bwrmap = make_bwr()
self.bwr = bwrmap.getLookupTable(start=0.0, stop=255.0, alpha=False)
self.cmap = []
# spectral colormap
self.cmap.append(make_spectral().getLookupTable(start=0.0, stop=255.0, alpha=False))
# single channel colormaps
for i in range(3):
self.cmap.append(make_cmap(i).getLookupTable(start=0.0, stop=255.0, alpha=False))
if MATPLOTLIB:
self.colormap = (plt.get_cmap('gist_ncar')(np.linspace(0.0, .9, 1000)) * 255).astype(np.uint8)
else:
self.colormap = ((np.random.rand(1000, 3) * 0.8 + 0.1) * 255).astype(np.uint8)
self.is_stack = True # always loading images of same FOV
# if called with image, load it
# if image is not None:
# self.filename = image
# iopart._load_image(self, self.filename)
self.setAcceptDrops(True)
self.win.show()
self.show()
self.splitter2.addWidget(self.listView)
self.splitter2.addWidget(self.win)
self.mainLayout.addWidget(self.splitter2,0,1,1,3)
self.label_2 = QtWidgets.QLabel(self.page)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 7, 0, 1, 1)
self.brush_size = 3
self.BrushChoose = QtWidgets.QComboBox()
self.BrushChoose.addItems(["1", "3", "5", "7", "9", "11", "13", "15", "17", "19"])
self.BrushChoose.currentIndexChanged.connect(self.brush_choose)
self.gridLayout.addWidget(self.BrushChoose, 7, 1, 1, 1)
# turn on single stroke mode
self.sstroke_On = True
self.SSCheckBox = QtWidgets.QCheckBox(self.page)
self.SSCheckBox.setObjectName("SSCheckBox")
self.SSCheckBox.setChecked(True)
self.SSCheckBox.toggled.connect(self.toggle_sstroke)
self.gridLayout.addWidget(self.SSCheckBox, 8, 0, 1, 1)
self.eraser_button = QtWidgets.QCheckBox(self.page)
self.eraser_button.setObjectName("Edit mask")
self.eraser_button.setChecked(False)
self.eraser_button.toggled.connect(self.eraser_model_change)
self.eraser_button.setToolTip("Right-click to add pixels\nShift+Right-click to delete pixels")
self.gridLayout.addWidget(self.eraser_button, 9, 0, 1, 1)
self.CHCheckBox = QtWidgets.QCheckBox(self.page)
self.CHCheckBox.setObjectName("CHCheckBox")
self.CHCheckBox.toggled.connect(self.cross_hairs)
self.gridLayout.addWidget(self.CHCheckBox, 10, 0, 1, 1)
self.MCheckBox = QtWidgets.QCheckBox(self.page)
self.MCheckBox.setChecked(True)
self.MCheckBox.setObjectName("MCheckBox")
self.MCheckBox.setChecked(True)
self.MCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.MCheckBox, 11, 0, 1, 1)
self.OCheckBox = QtWidgets.QCheckBox(self.page)
self.outlinesOn = True
self.OCheckBox.setChecked(True)
self.OCheckBox.setObjectName("OCheckBox")
self.OCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.OCheckBox, 12, 0, 1, 1)
self.scale_on = True
self.SCheckBox = QtWidgets.QCheckBox(self.page)
self.SCheckBox.setObjectName("SCheckBox")
self.SCheckBox.setChecked(True)
self.SCheckBox.toggled.connect(self.toggle_scale)
self.gridLayout.addWidget(self.SCheckBox, 13, 0, 1, 1)
self.autosaveOn = True
self.ASCheckBox = QtWidgets.QCheckBox(self.page)
self.ASCheckBox.setObjectName("ASCheckBox")
self.ASCheckBox.setChecked(True)
self.ASCheckBox.toggled.connect(self.toggle_autosave)
self.ASCheckBox.setToolTip("If ON, masks/npy/list will be autosaved")
self.gridLayout.addWidget(self.ASCheckBox, 14, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 15, 0, 1, 2)
# self.eraser_combobox = QtWidgets.QComboBox()
# self.eraser_combobox.addItems(["Pixal delete", "Pixal add"])
# self.gridLayout.addWidget(self.eraser_combobox, 8, 1, 1, 1)
self.RGBChoose = guiparts.RGBRadioButtons(self, 3, 1)
self.RGBDropDown = QtGui.QComboBox()
self.RGBDropDown.addItems(["rgb", "gray", "spectral", "red", "green", "blue"])
self.RGBDropDown.currentIndexChanged.connect(self.color_choose)
self.gridLayout.addWidget(self.RGBDropDown, 3, 0, 1, 1)
self.saturation_label = QtWidgets.QLabel("Saturation")
self.gridLayout.addWidget(self.saturation_label, 0, 0, 1, 1)
self.autobtn = QtGui.QCheckBox('Auto-adjust')
self.autobtn.setChecked(True)
self.autobtn.toggled.connect(self.toggle_autosaturation)
self.gridLayout.addWidget(self.autobtn, 0, 1, 1, 1)
self.currentZ = 0
self.zpos = QtGui.QLineEdit()
self.zpos.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.zpos.setText(str(self.currentZ))
self.zpos.returnPressed.connect(self.compute_scale)
self.zpos.setFixedWidth(20)
# self.gridLayout.addWidget(self.zpos, 0, 2, 1, 1)
self.slider = guiparts.RangeSlider(self)
self.slider.setMaximum(255)
self.slider.setMinimum(0)
self.slider.setHigh(255)
self.slider.setLow(0)
self.gridLayout.addWidget(self.slider, 2, 0, 1, 4)
self.slider.setObjectName("rangeslider")
self.page_2 = QtWidgets.QWidget()
self.page_2.setFixedWidth(340)
self.page_2.setObjectName("page_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
page2_l = 0
self.useGPU = QtWidgets.QCheckBox(self.page_2)
self.useGPU.setObjectName("useGPU")
self.gridLayout_2.addWidget(self.useGPU, page2_l, 0, 1, 1)
self.check_gpu()
page2_l += 1
self.label_4 = QtWidgets.QLabel(self.page_2)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, page2_l, 0, 1, 1)
self.ModelChoose = QtWidgets.QComboBox(self.page_2)
self.ModelChoose.setObjectName("ModelChoose")
self.project_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.sep + ".")
self.model_dir = os.path.join(self.project_path, 'assets', 'pretrained_models')
print('self.model_dir', self.model_dir)
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.gridLayout_2.addWidget(self.ModelChoose, page2_l, 1, 1, 1)
page2_l += 1
self.label_5 = QtWidgets.QLabel(self.page_2)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, page2_l, 0, 1, 1)
self.jCBChanToSegment = QtWidgets.QComboBox(self.page_2)
self.jCBChanToSegment.setObjectName("jCBChanToSegment")
self.jCBChanToSegment.addItems(["gray", "red", "green", "blue"])
self.jCBChanToSegment.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChanToSegment, page2_l, 1, 1, 1)
page2_l += 1
self.label_6 = QtWidgets.QLabel(self.page_2)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, page2_l, 0, 1, 1)
self.jCBChan2 = QtWidgets.QComboBox(self.page_2)
self.jCBChan2.setObjectName("jCBChan2")
self.jCBChan2.addItems(["none", "red", "green", "blue"])
self.jCBChan2.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChan2, page2_l, 1, 1, 1)
page2_l += 1
self.model_choose_btn = QtWidgets.QPushButton("Model file")
self.model_choose_btn.clicked.connect(self.model_file_dir_choose)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 0, 1, 1)
self.model_choose_btn = QtWidgets.QPushButton("Reset pre-trained")
self.model_choose_btn.clicked.connect(self.reset_pretrain_model)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 1, 1, 1)
page2_l += 1
self.label_null = QtWidgets.QLabel("")
self.gridLayout_2.addWidget(self.label_null, page2_l, 0, 1, 1)
slider_image_path = self.now_pyfile_path + '/assets/slider_handle.png'
self.sliderSheet = [
'QSlider::groove:vertical {',
'background-color: #D3D3D3;',
'position: absolute;',
'left: 4px; right: 4px;',
'}',
'',
'QSlider::groove:horizontal{',
'background-color:#D3D3D3;',
'position: absolute;',
'top: 4px; bottom: 4px;',
'}',
'',
'QSlider::handle:vertical {',
'height: 10px;',
'background-color: {0:s};'.format('#A9A9A9'),
'margin: 0 -4px;',
'}',
'',
'QSlider::handle:horizontal{',
'width: 10px;',
'border-image: url({0:s});'.format(slider_image_path),
'margin: -4px 0px -4px 0px;',
'}',
'QSlider::sub-page:horizontal',
'{',
'background-color: {0:s};'.format('#A9A9A9'),
'}',
'',
'QSlider::add-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
'',
'QSlider::sub-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
]
page2_l += 1
self.label_seg = QtWidgets.QLabel("Run seg for image in window")
self.gridLayout_2.addWidget(self.label_seg, page2_l, 0, 1, 4)
self.label_seg.setObjectName('label_seg')
page2_l += 1
self.label_3 = QtWidgets.QLabel(self.page_2)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, page2_l, 0, 1, 4)
page2_l += 1
self.prev_selected = 0
self.diameter = 30
# self.Diameter = QtWidgets.QSpinBox(self.page_2)
self.Diameter = QtWidgets.QLineEdit(self.page_2)
self.Diameter.setObjectName("Diameter")
self.Diameter.setText(str(self.diameter))
self.Diameter.setFixedWidth(100)
self.Diameter.editingFinished.connect(self.compute_scale)
self.gridLayout_2.addWidget(self.Diameter, page2_l, 0, 1, 2)
self.SizeButton = QtWidgets.QPushButton(self.page_2)
self.SizeButton.setObjectName("SizeButton")
self.gridLayout_2.addWidget(self.SizeButton, page2_l, 1, 1, 1)
self.SizeButton.clicked.connect(self.calibrate_size)
self.SizeButton.setEnabled(False)
page2_l += 1
self.label_mode = QtWidgets.QLabel("Inference mode")
self.gridLayout_2.addWidget(self.label_mode, page2_l, 0, 1, 1)
self.NetAvg = QtWidgets.QComboBox(self.page_2)
self.NetAvg.setObjectName("NetAvg")
self.NetAvg.addItems(["run 1 net (fast)", "+ resample (slow)"])
self.gridLayout_2.addWidget(self.NetAvg, page2_l, 1, 1, 1)
page2_l += 1
self.invert = QtWidgets.QCheckBox(self.page_2)
self.invert.setObjectName("invert")
self.gridLayout_2.addWidget(self.invert, page2_l, 0, 1, 1)
page2_l += 1
self.ModelButton = QtWidgets.QPushButton(' Run segmentation ')
self.ModelButton.setObjectName("runsegbtn")
self.ModelButton.clicked.connect(self.compute_model)
self.gridLayout_2.addWidget(self.ModelButton, page2_l, 0, 1, 2)
self.ModelButton.setEnabled(False)
page2_l += 1
self.label_7 = QtWidgets.QLabel(self.page_2)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, page2_l, 0, 1, 1)
self.threshold = 0.4
self.threshslider = QtWidgets.QSlider(self.page_2)
self.threshslider.setOrientation(QtCore.Qt.Horizontal)
self.threshslider.setObjectName("threshslider")
self.threshslider.setMinimum(1.0)
self.threshslider.setMaximum(30.0)
self.threshslider.setValue(31 - 4)
self.threshslider.valueChanged.connect(self.compute_cprob)
self.threshslider.setEnabled(False)
self.threshslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.threshslider, page2_l, 1, 1, 1)
self.threshslider.setToolTip("Value: " + str(self.threshold))
page2_l += 1
self.label_8 = QtWidgets.QLabel(self.page_2)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, page2_l, 0, 1, 1)
self.probslider = QtWidgets.QSlider(self.page_2)
self.probslider.setOrientation(QtCore.Qt.Horizontal)
self.probslider.setObjectName("probslider")
self.probslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.probslider, page2_l, 1, 1, 1)
self.probslider.setMinimum(-6.0)
self.probslider.setMaximum(6.0)
self.probslider.setValue(0.0)
self.cellprob = 0.5
self.probslider.valueChanged.connect(self.compute_cprob)
self.probslider.setEnabled(False)
self.probslider.setToolTip("Value: " + str(self.cellprob))
page2_l += 1
self.label_batchseg = QtWidgets.QLabel("Batch segmentation")
self.label_batchseg.setObjectName('label_batchseg')
self.gridLayout_2.addWidget(self.label_batchseg, page2_l, 0, 1, 4)
page2_l += 1
self.label_bz = QtWidgets.QLabel("Batch size")
self.gridLayout_2.addWidget(self.label_bz, page2_l, 0, 1, 1)
self.bz_line = QtWidgets.QLineEdit()
self.bz_line.setPlaceholderText('Default: 8')
self.bz_line.setFixedWidth(120)
self.gridLayout_2.addWidget(self.bz_line, page2_l, 1, 1, 1)
page2_l += 1
self.dataset_inference_bnt = QtWidgets.QPushButton("Data path")
self.gridLayout_2.addWidget(self.dataset_inference_bnt, page2_l, 0, 1, 1)
self.dataset_inference_bnt.clicked.connect(self.batch_inference_dir_choose)
self.batch_inference_bnt = QtWidgets.QPushButton("Run batch")
self.batch_inference_bnt.setObjectName("binferbnt")
self.batch_inference_bnt.clicked.connect(self.batch_inference)
self.gridLayout_2.addWidget(self.batch_inference_bnt, page2_l, 1, 1, 1)
self.batch_inference_bnt.setEnabled(False)
page2_l += 1
self.label_getsingle = QtWidgets.QLabel("Get single instance")
self.label_getsingle.setObjectName('label_getsingle')
self.gridLayout_2.addWidget(self.label_getsingle, page2_l,0,1,2)
page2_l += 1
self.single_dir_bnt = QtWidgets.QPushButton("Data path")
self.single_dir_bnt.clicked.connect(self.single_dir_choose)
self.gridLayout_2.addWidget(self.single_dir_bnt, page2_l,0,1,1)
self.single_cell_btn = QtWidgets.QPushButton("Run batch")
self.single_cell_btn.setObjectName('single_cell_btn')
self.single_cell_btn.clicked.connect(self.get_single_cell)
self.gridLayout_2.addWidget(self.single_cell_btn, page2_l,1,1,1)
self.single_cell_btn.setEnabled(False)
page2_l += 1
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, page2_l, 0, 1, 2)
self.page_3 = QtWidgets.QWidget()
self.page_3.setFixedWidth(340)
self.page_3.setObjectName("page_3")
self.progress = QtWidgets.QProgressBar()
self.progress.setProperty("value", 0)
self.progress.setAlignment(QtCore.Qt.AlignCenter)
self.progress.setObjectName("progress")
self.gridLayout_3 = QtWidgets.QGridLayout(self.page_3)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.ftuseGPU = QtWidgets.QCheckBox("Use GPU")
self.ftuseGPU.setObjectName("ftuseGPU")
self.gridLayout_3.addWidget(self.ftuseGPU, 0, 0, 1, 2)
self.check_ftgpu()
self.ftdirbtn = QtWidgets.QPushButton("Dataset path")
self.ftdirbtn.clicked.connect(self.fine_tune_dir_choose)
self.gridLayout_3.addWidget(self.ftdirbtn, 0, 2, 1, 2)
self.label_10 = QtWidgets.QLabel("Model architecture")
self.gridLayout_3.addWidget(self.label_10, 1, 0, 1, 2)
self.ftmodelchooseBnt = QtWidgets.QComboBox()
self.ftmodelchooseBnt.addItems(["scellseg", "cellpose", "hover"])
self.gridLayout_3.addWidget(self.ftmodelchooseBnt, 1, 2, 1, 2)
self.label_11 = QtWidgets.QLabel("Chan to segment")
self.gridLayout_3.addWidget(self.label_11, 2, 0, 1, 2)
self.chan1chooseBnt = QtWidgets.QComboBox()
self.chan1chooseBnt.addItems(["gray", "red", "green", "blue"])
self.chan1chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan1chooseBnt, 2, 2, 1, 2)
self.label_12 = QtWidgets.QLabel("Chan2 (optional)")
self.gridLayout_3.addWidget(self.label_12, 3, 0, 1, 2)
self.chan2chooseBnt = QtWidgets.QComboBox()
self.chan2chooseBnt.addItems(["none", "red", "green", "blue"])
self.chan2chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan2chooseBnt, 3, 2, 1, 2)
self.label_13 = QtWidgets.QLabel("Fine-tune strategy")
self.gridLayout_3.addWidget(self.label_13, 4, 0, 1, 2)
self.stmodelchooseBnt = QtWidgets.QComboBox()
self.stmodelchooseBnt.addItems(["contrastive", "classic"])
self.gridLayout_3.addWidget(self.stmodelchooseBnt, 4, 2, 1, 2)
self.label_14 = QtWidgets.QLabel("Epoch")
self.gridLayout_3.addWidget(self.label_14, 5, 0, 1, 2)
self.epoch_line = QtWidgets.QLineEdit()
self.epoch_line.setPlaceholderText('Default: 100')
self.gridLayout_3.addWidget(self.epoch_line, 5, 2, 1, 2)
self.label_ftbz = QtWidgets.QLabel("Batch size")
self.gridLayout_3.addWidget(self.label_ftbz, 6, 0, 1, 2)
self.ftbz_line = QtWidgets.QLineEdit()
self.ftbz_line.setPlaceholderText('Default: 8')
self.gridLayout_3.addWidget(self.ftbz_line, 6, 2, 1, 2)
self.ftbnt = QtWidgets.QPushButton("Start fine-tuning")
self.ftbnt.setObjectName('ftbnt')
self.ftbnt.clicked.connect(self.fine_tune)
self.gridLayout_3.addWidget(self.ftbnt, 7, 0, 1, 4)
self.ftbnt.setEnabled(False)
spacerItem3 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 8, 0, 1, 1)
#initialize scroll size
self.scroll = QtGui.QScrollBar(QtCore.Qt.Horizontal)
# self.scroll.setMaximum(10)
# self.scroll.valueChanged.connect(self.move_in_Z)
# self.gridLayout_3.addWidget(self.scroll)
spacerItem2 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2)
self.toolBox.addItem(self.page, "")
self.toolBox.addItem(self.page_3, "")
self.toolBox.addItem(self.page_2, "")
self.verticalLayout_2.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.toolBox.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.centralwidget.setFocusPolicy(QtCore.Qt.StrongFocus)
self.reset()
def show_menu(self, point):
# print(point.x())
# item = self.listView.itemAt(point)
# print(item)
temp_cell_idx = self.listView.rowAt(point.y())
self.list_select_cell(temp_cell_idx+1)
# print(self.myCellList[temp_cell_idx])
if self.listView.rowAt(point.y()) >= 0:
self.contextMenu = QtWidgets.QMenu()
self.actionA = QtGui.QAction("Delete this cell", self)
self.actionB = QtGui.QAction("Edit this cell", self)
self.contextMenu.addAction(self.actionA)
self.contextMenu.addAction(self.actionB)
self.contextMenu.popup(QtGui.QCursor.pos())
self.actionA.triggered.connect(lambda: self.remove_cell(temp_cell_idx + 1))
self.actionB.triggered.connect(lambda: self.edit_cell(temp_cell_idx + 1))
self.contextMenu.show()
def edit_cell(self, index):
self.select_cell(index)
self.eraser_button.setChecked(True)
self.toolBox.setCurrentIndex(0)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Scellseg"))
self.CHCheckBox.setText(_translate("MainWindow", "Crosshair on [C]"))
self.MCheckBox.setText(_translate("MainWindow", "Masks on [X]"))
self.label_2.setText(_translate("MainWindow", "Brush size"))
self.OCheckBox.setText(_translate("MainWindow", "Outlines on [Z]"))
# self.ServerButton.setText(_translate("MainWindow", "send manual seg. to server"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "View and Draw"))
self.SizeButton.setText(_translate("MainWindow", "Calibrate diam"))
self.label_3.setText(_translate("MainWindow", "Cell diameter (pixels):"))
self.useGPU.setText(_translate("MainWindow", "Use GPU"))
self.SCheckBox.setText(_translate("MainWindow", "Scale disk on [S]"))
self.ASCheckBox.setText(_translate("MainWindow", "Autosave [P]"))
self.SSCheckBox.setText(_translate("MainWindow", "Single stroke"))
self.eraser_button.setText(_translate("MainWindow", "Edit mask [E]"))
self.ModelChoose.setItemText(0, _translate("MainWindow", "scellseg"))
self.ModelChoose.setItemText(1, _translate("MainWindow", "cellpose"))
self.ModelChoose.setItemText(2, _translate("MainWindow", "hover"))
self.invert.setText(_translate("MainWindow", "Invert grayscale"))
self.label_4.setText(_translate("MainWindow", "Model architecture"))
self.label_5.setText(_translate("MainWindow", "Chan to segment"))
self.label_6.setText(_translate("MainWindow", "Chan2 (optional)"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Inference"))
self.label_7.setText(_translate("MainWindow", "Model match TH"))
self.label_8.setText(_translate("MainWindow", "Cell prob TH"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_3), _translate("MainWindow", "Fine-tune"))
# self.menuFile.setTitle(_translate("MainWindow", "File"))
# self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
# self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.ImFolder = ''
self.ImNameSet = []
self.CurImId = 0
self.CurFolder = os.getcwd()
self.DefaultImFolder = self.CurFolder
def setWinTop(self):
print('get')
def OpenDirDropped(self, curFile=None):
# dir dropped callback func
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
ImNameSetNosuffix = [os.path.splitext(imNameSeti)[0] for imNameSeti in self.ImNameSet]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
if curFile is not None:
curFile = os.path.splitext(curFile)[0]
try:
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
except:
curFile = curFile.replace('_cp_masks', '')
curFile = curFile.replace('_masks', '')
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
return
# self.state_label.setText("", color='#FF6A56')
else:
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def OpenDirBntClicked(self):
# dir choosing callback function
self.ImFolder = QtWidgets.QFileDialog.getExistingDirectory(None, "select folder", self.DefaultImFolder)
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
print(self.ImNameSet)
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def PreImBntClicked(self):
self.auto_save()
# show previous image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
print(self.ImFolder, self.ImNameSet)
self.CurImId = self.CurImId - 1
if self.CurImId >= 0: # 第一张图片没有前一张
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
if self.CurImId < 0:
self.CurImId = 0
self.state_label.setText("This is the first image", color='#FF6A56')
def NextImBntClicked(self):
self.auto_save()
# show next image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
if self.CurImId < self.ImNum - 1:
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId + 1]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
self.CurImId = self.CurImId + 1
else:
self.state_label.setText("This is the last image", color='#FF6A56')
def eraser_model_change(self):
if self.eraser_button.isChecked() == True:
self.outlinesOn = False
self.OCheckBox.setChecked(False)
# self.OCheckBox.setEnabled(False)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
# self.cur_size = self.brush_size * 6
# cursor = Qt.QPixmap("./assets/eraser.png")
# cursor_scaled = cursor.scaled(self.cur_size, self.cur_size)
# cursor_set = Qt.QCursor(cursor_scaled, self.cur_size/2, self.cur_size/2)
# QtWidgets.QApplication.setOverrideCursor(cursor_set)
self.update_plot()
else:
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
def showChoosen(self, item):
temp_cell_idx = int(item.row())
self.list_select_cell(int(temp_cell_idx) + 1)
def save_cell_list(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.listView.clearSelection()
def save_cell_list_menu(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.state_label.setText("Saved outlines", color='#39B54A')
self.listView.clearSelection()
def help_window(self):
HW = guiparts.HelpWindow(self)
HW.show()
def gui_window(self):
EG = guiparts.ExampleGUI(self)
EG.show()
def toggle_autosave(self):
if self.ASCheckBox.isChecked():
self.autosaveOn = True
else:
self.autosaveOn = False
print('self.autosaveOn', self.autosaveOn)
def toggle_sstroke(self):
if self.SSCheckBox.isChecked():
self.sstroke_On = True
else:
self.sstroke_On = False
print('self.sstroke_On', self.sstroke_On)
def toggle_autosaturation(self):
if self.autobtn.isChecked():
self.compute_saturation()
self.update_plot()
def cross_hairs(self):
if self.CHCheckBox.isChecked():
self.p0.addItem(self.vLine, ignoreBounds=True)
self.p0.addItem(self.hLine, ignoreBounds=True)
else:
self.p0.removeItem(self.vLine)
self.p0.removeItem(self.hLine)
def plot_clicked(self, event):
if event.double():
if event.button() == QtCore.Qt.LeftButton:
print("will initialize the range")
if (event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier):
try:
self.p0.setYRange(0,self.Ly+self.pr)
except:
self.p0.setYRange(0,self.Ly)
self.p0.setXRange(0,self.Lx)
def mouse_moved(self, pos):
# print('moved')
items = self.win.scene().items(pos)
for x in items:
if x == self.p0:
mousePoint = self.p0.mapSceneToView(pos)
if self.CHCheckBox.isChecked():
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
# else:
# QtWidgets.QApplication.restoreOverrideCursor()
# QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.DefaultCursor)
def color_choose(self):
self.color = self.RGBDropDown.currentIndex()
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.update_plot()
def update_ztext(self):
zpos = self.currentZ
try:
zpos = int(self.zpos.text())
except:
print('ERROR: zposition is not a number')
self.currentZ = max(0, min(self.NZ - 1, zpos))
self.zpos.setText(str(self.currentZ))
self.scroll.setValue(self.currentZ)
def calibrate_size(self):
model_type = self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
diams, _ = self.model.sz.eval(self.stack[self.currentZ].copy(), invert=self.invert.isChecked(),
channels=self.get_channels(), progress=self.progress)
diams = np.maximum(5.0, diams)
print('estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams))
self.state_label.setText('Estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams), color='#969696')
self.Diameter.setText('%0.1f'%diams)
self.diameter = diams
self.compute_scale()
self.progress.setValue(100)
def enable_buttons(self):
# self.X2Up.setEnabled(True)
# self.X2Down.setEnabled(True)
self.ModelButton.setEnabled(True)
self.SizeButton.setEnabled(True)
self.saveSet.setEnabled(True)
self.savePNG.setEnabled(True)
self.saveOutlines.setEnabled(True)
self.saveCellList.setEnabled(True)
self.saveAll.setEnabled(True)
self.loadMasks.setEnabled(True)
self.loadManual.setEnabled(True)
self.loadCellList.setEnabled(True)
self.toggle_mask_ops()
self.update_plot()
self.setWindowTitle('Scellseg @ ' + self.filename)
def add_set(self):
if len(self.current_point_set) > 0:
# print(self.current_point_set)
# print(np.array(self.current_point_set).shape)
self.current_point_set = np.array(self.current_point_set)
while len(self.strokes) > 0:
self.remove_stroke(delete_points=False)
if len(self.current_point_set) > 8:
col_rand = np.random.randint(1000)
color = self.colormap[col_rand, :3]
median = self.add_mask(points=self.current_point_set, color=color)
if median is not None:
self.removed_cell = []
self.toggle_mask_ops()
self.cellcolors.append(color)
self.ncells += 1
self.add_list_item()
self.ismanual = np.append(self.ismanual, True)
# if self.NZ == 1:
# # only save after each cell if single image
# iopart._save_sets(self)
self.current_stroke = []
self.strokes = []
self.current_point_set = []
self.update_plot()
def add_mask(self, points=None, color=None):
# loop over z values
median = []
if points.shape[1] < 3:
points = np.concatenate((np.zeros((points.shape[0], 1), np.int32), points), axis=1)
zdraw = np.unique(points[:, 0])
zrange = np.arange(zdraw.min(), zdraw.max() + 1, 1, int)
zmin = zdraw.min()
pix = np.zeros((2, 0), np.uint16)
mall = np.zeros((len(zrange), self.Ly, self.Lx), np.bool)
k = 0
for z in zdraw:
iz = points[:, 0] == z
vr = points[iz, 1]
vc = points[iz, 2]
# get points inside drawn points
mask = np.zeros((np.ptp(vr) + 4, np.ptp(vc) + 4), np.uint8)
pts = np.stack((vc - vc.min() + 2, vr - vr.min() + 2), axis=-1)[:, np.newaxis, :]
mask = cv2.fillPoly(mask, [pts], (255, 0, 0))
ar, ac = np.nonzero(mask)
ar, ac = ar + vr.min() - 2, ac + vc.min() - 2
# get dense outline
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + vr.min() - 2, pvc + vc.min() - 2
# concatenate all points
ar, ac = np.hstack((np.vstack((vr, vc)), np.vstack((ar, ac))))
# if these pixels are overlapping with another cell, reassign them
ioverlap = self.cellpix[z][ar, ac] > 0
if (~ioverlap).sum() < 8:
print('ERROR: cell too small without overlaps, not drawn')
return None
elif ioverlap.sum() > 0:
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of new mask
mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8)
mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2
self.draw_mask(z, ar, ac, vr, vc, color)
median.append(np.array([np.median(ar), np.median(ac)]))
mall[z - zmin, ar, ac] = True
pix = np.append(pix, np.vstack((ar, ac)), axis=-1)
mall = mall[:, pix[0].min():pix[0].max() + 1, pix[1].min():pix[1].max() + 1].astype(np.float32)
ymin, xmin = pix[0].min(), pix[1].min()
if len(zdraw) > 1:
mall, zfill = interpZ(mall, zdraw - zmin)
for z in zfill:
mask = mall[z].copy()
ar, ac = np.nonzero(mask)
ioverlap = self.cellpix[z + zmin][ar + ymin, ac + xmin] > 0
if (~ioverlap).sum() < 5:
print('WARNING: stroke on plane %d not included due to overlaps' % z)
elif ioverlap.sum() > 0:
mask[ar[ioverlap], ac[ioverlap]] = 0
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of mask
outlines = utils.masks_to_outlines(mask)
vr, vc = np.nonzero(outlines)
vr, vc = vr + ymin, vc + xmin
ar, ac = ar + ymin, ac + xmin
self.draw_mask(z + zmin, ar, ac, vr, vc, color)
self.zdraw.append(zdraw)
return median
def move_in_Z(self):
if self.loaded:
self.currentZ = min(self.NZ, max(0, int(self.scroll.value())))
self.zpos.setText(str(self.currentZ))
self.update_plot()
def make_viewbox(self):
# intialize the main viewport widget
# print("making viewbox")
self.p0 = guiparts.ViewBoxNoRightDrag(
parent=self,
lockAspect=True,
name="plot1",
border=[100, 100, 100],
invertY=True
)
# self.p0.setBackgroundColor(color='#292929')
self.brush_size = 3
self.win.addItem(self.p0, 0, 0)
self.p0.setMenuEnabled(False)
self.p0.setMouseEnabled(x=True, y=True)
self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
self.img.autoDownsample = False
# self.null_image = np.ones((200,200))
# self.img.setImage(self.null_image)
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.setMouseEnabled(x=False,y=False)
self.Ly, self.Lx = 512, 512
self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
# guiparts.make_quadrants(self)
def get_channels(self):
channels = [self.jCBChanToSegment.currentIndex(), self.jCBChan2.currentIndex()]
return channels
def compute_saturation(self):
# compute percentiles from stack
self.saturation = []
self.slider._low = np.percentile(self.stack[0].astype(np.float32), 1)
self.slider._high = np.percentile(self.stack[0].astype(np.float32), 99)
for n in range(len(self.stack)):
print('n,', n)
self.saturation.append([np.percentile(self.stack[n].astype(np.float32), 1),
np.percentile(self.stack[n].astype(np.float32), 99)])
def keyReleaseEvent(self, event):
# print('self.loaded', self.loaded)
if self.loaded:
# self.p0.setMouseEnabled(x=True, y=True)
if (event.modifiers() != QtCore.Qt.ControlModifier and
event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier) and not self.in_stroke:
updated = False
if len(self.current_point_set) > 0:
if event.key() == QtCore.Qt.Key_Return:
self.add_set()
if self.NZ > 1:
if event.key() == QtCore.Qt.Key_Left:
self.currentZ = max(0, self.currentZ - 1)
self.zpos.setText(str(self.currentZ))
elif event.key() == QtCore.Qt.Key_Right:
self.currentZ = min(self.NZ - 1, self.currentZ + 1)
self.zpos.setText(str(self.currentZ))
else:
if event.key() == QtCore.Qt.Key_M:
self.MCheckBox.toggle()
if event.key() == QtCore.Qt.Key_O:
self.OCheckBox.toggle()
if event.key() == QtCore.Qt.Key_C:
self.CHCheckBox.toggle()
if event.key() == QtCore.Qt.Key_S:
self.SCheckBox.toggle()
if event.key() == QtCore.Qt.Key_E:
self.eraser_button.toggle()
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_P:
self.ASCheckBox.toggle()
if event.key() == QtCore.Qt.Key_PageDown:
self.view = (self.view + 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
elif event.key() == QtCore.Qt.Key_PageUp:
self.view = (self.view - 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
# can change background or stroke size if cell not finished
if event.key() == QtCore.Qt.Key_Up:
self.color = (self.color - 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
elif event.key() == QtCore.Qt.Key_Down:
self.color = (self.color + 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
if (event.key() == QtCore.Qt.Key_BracketLeft or
event.key() == QtCore.Qt.Key_BracketRight):
count = self.BrushChoose.count()
gci = self.BrushChoose.currentIndex()
if event.key() == QtCore.Qt.Key_BracketLeft:
gci = max(0, gci - 1)
else:
gci = min(count - 1, gci + 1)
self.BrushChoose.setCurrentIndex(gci)
self.brush_choose()
self.state_label.setText("Brush size: %s"%(2*gci+1), color='#969696')
if not updated:
self.update_plot()
elif event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_Z:
self.undo_action()
if event.key() == QtCore.Qt.Key_0:
self.clear_all()
def keyPressEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1:
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_2:
self.toolBox.setCurrentIndex(1)
if event.key() == QtCore.Qt.Key_3:
self.toolBox.setCurrentIndex(2)
if event.key() == QtCore.Qt.Key_Minus or event.key() == QtCore.Qt.Key_Equal:
self.p0.keyPressEvent(event)
def chanchoose(self, image):
if image.ndim > 2:
if self.jCBChanToSegment.currentIndex() == 0:
image = image.astype(np.float32).mean(axis=-1)[..., np.newaxis]
else:
chanid = [self.jCBChanToSegment.currentIndex() - 1]
if self.jCBChan2.currentIndex() > 0:
chanid.append(self.jCBChan2.currentIndex() - 1)
image = image[:, :, chanid].astype(np.float32)
return image
def initialize_model(self, gpu=False, pretrained_model=False, model_type='scellseg',
diam_mean=30., net_avg=False, device=None, nclasses=3,
residual_on=True, style_on=True, concatenation=False, update_step=1,
last_conv_on=True, attn_on=False, dense_on=False, style_scale_on=True,
task_mode='cellpose', model=None):
self.current_model = model_type
self.model = models.sCellSeg(gpu=gpu, pretrained_model=pretrained_model, model_type=model_type,
diam_mean=diam_mean, net_avg=net_avg, device=device, nclasses=nclasses,
residual_on=residual_on, style_on=style_on, concatenation=concatenation, update_step=update_step,
last_conv_on=last_conv_on, attn_on=attn_on, dense_on=dense_on, style_scale_on=style_scale_on,
task_mode=task_mode, model=model)
def set_compute_thread(self):
self.seg_thread = threading.Thread(target = self.compute_model)
self.seg_thread.setDeamon(True)
self.seg_thread.start()
def compute_model(self):
self.progress.setValue(0)
self.update_plot()
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
if True:
tic = time.time()
self.clear_all()
self.flows = [[], [], []]
pretrained_model = os.path.join(self.model_dir, self.ModelChoose.currentText())
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=self.ModelChoose.currentText())
print('using model %s' % self.current_model)
self.progress.setValue(10)
do_3D = False
if self.NZ > 1:
do_3D = True
data = self.stack.copy()
else:
data = self.stack[0].copy()
channels = self.get_channels()
# print(channels)
self.diameter = float(self.Diameter.text())
self.update_plot()
try:
# net_avg = self.NetAvg.currentIndex() == 0
resample = self.NetAvg.currentIndex() == 1 # we need modify from here
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
# inference
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_images=data, channel=channels,
diameter=self.diameter,
resample=resample, flow_threshold=self.threshold,
cellprob_threshold=self.cellprob,
min_size=min_size, eval_batch_size=8,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
self.state_label.setText(
'%d cells found with scellseg net in %0.3fs' % (
len(np.unique(masks)[1:]), time.time() - tic),
color='#39B54A')
# self.state_label.setStyleSheet("color:green;")
self.update_plot()
self.progress.setValue(75)
self.flows[0] = flows[0].copy()
self.flows[1] = (np.clip(utils.normalize99(flows[2].copy()), 0, 1) * 255).astype(np.uint8)
if not do_3D:
masks = masks[np.newaxis, ...]
self.flows[0] = transforms.resize_image(self.flows[0], masks.shape[-2], masks.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.flows[1] = transforms.resize_image(self.flows[1], masks.shape[-2], masks.shape[-1])
if not do_3D:
self.flows[2] = np.zeros(masks.shape[1:], dtype=np.uint8)
self.flows = [self.flows[n][np.newaxis, ...] for n in range(len(self.flows))]
else:
self.flows[2] = (flows[1][0] / 10 * 127 + 127).astype(np.uint8)
if len(flows) > 2:
self.flows.append(flows[3])
self.flows.append(np.concatenate((flows[1], flows[2][np.newaxis, ...]), axis=0))
print()
self.progress.setValue(80)
z = 0
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
iopart._masks_to_gui(self, masks, outlines=None)
self.progress.setValue(100)
self.first_load_listView()
# self.toggle_server(off=True)
if not do_3D:
self.threshslider.setEnabled(True)
self.probslider.setEnabled(True)
self.masks_for_save = masks
except Exception as e:
print('NET ERROR: %s' % e)
self.progress.setValue(0)
return
else: # except Exception as e:
print('ERROR: %s' % e)
print('Finished inference')
def batch_inference(self):
self.progress.setValue(0)
# print('threshold', self.threshold, self.cellprob)
# self.update_plot()
if True:
tic = time.time()
self.clear_all()
model_type =self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
print('using model %s' % self.current_model)
self.progress.setValue(10)
channels = self.get_channels()
self.diameter = float(self.Diameter.text())
try:
# net_avg = self.NetAvg.currentIndex() < 2
# resample = self.NetAvg.currentIndex() == 1
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
try:
dataset_path = self.batch_inference_dir
except:
dataset_path = None
# batch inference
bz = 8 if self.bz_line.text() == '' else int(self.bz_line.text())
save_name = self.current_model + '_' + dataset_path.split('\\')[-1]
utils.set_manual_seed(5)
try:
shotset = dataset.DatasetShot(eval_dir=dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks',
channels=channels, task_mode=self.model.task_mode, active_ind=None,
rescale=True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'),
resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.batch_inference_bnt.setEnabled(False)
return
queryset = dataset.DatasetQuery(dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks')
query_image_names = queryset.query_image_names
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
self.model.net.save_name = save_name
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
# flow_threshold was set to 0.4, and cellprob_threshold was set to 0.5
try:
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_image_names=query_image_names, channel=channels,
diameter=diameter,
resample=False, flow_threshold=0.4,
cellprob_threshold=0.5,
min_size=min_size, eval_batch_size=bz,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
except RuntimeError:
iopart._initialize_image_portable(self,
iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Batch size is too big, please set smaller",
color='#FF6A56')
print("Batch size is too big, please set smaller")
return
# save output images
diams = np.ones(len(query_image_names)) * diameter
imgs = [io.imread(query_image_name) for query_image_name in query_image_names]
io.masks_flows_to_seg(imgs, masks, flows, diams, query_image_names,
[channels for i in range(len(query_image_names))])
io.save_to_png(imgs, masks, flows, query_image_names, labels=None, aps=None,
task_mode=self.model.task_mode)
self.masks_for_save = masks
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize,
X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
return
else: # except Exception as e:
print('ERROR: %s' % e)
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText('Finished inference in %0.3fs!'%(time.time() - tic), color='#39B54A')
self.batch_inference_bnt.setEnabled(False)
def compute_cprob(self):
rerun = False
if self.cellprob != self.probslider.value():
rerun = True
self.cellprob = self.probslider.value()
if self.threshold != (31 - self.threshslider.value()) / 10.:
rerun = True
self.threshold = (31 - self.threshslider.value()) / 10.
if not rerun:
return
if self.threshold == 3.0 or self.NZ > 1:
thresh = None
print('computing masks with cell prob=%0.3f, no flow error threshold' %
(self.cellprob))
else:
thresh = self.threshold
print('computing masks with cell prob=%0.3f, flow error threshold=%0.3f' %
(self.cellprob, thresh))
maski = dynamics.get_masks(self.flows[3].copy(), iscell=(self.flows[4][-1] > self.cellprob),
flows=self.flows[4][:-1], threshold=thresh)
if self.NZ == 1:
maski = utils.fill_holes_and_remove_small_masks(maski)
maski = transforms.resize_image(maski, self.cellpix.shape[-2], self.cellpix.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
if maski.ndim < 3:
maski = maski[np.newaxis, ...]
print('%d cells found' % (len(np.unique(maski)[1:])))
iopart._masks_to_gui(self, maski, outlines=None)
self.threshslider.setToolTip("Value: " + str(self.threshold))
self.probslider.setToolTip("Value: " + str(self.cellprob))
self.first_load_listView()
self.show()
def reset(self):
# ---- start sets of points ---- #
self.selected = 0
self.X2 = 0
self.resize = -1
self.onechan = False
self.loaded = False
self.channel = [0, 1]
self.current_point_set = []
self.in_stroke = False
self.strokes = []
self.stroke_appended = True
self.ncells = 0
self.zdraw = []
self.removed_cell = []
self.cellcolors = [np.array([255, 255, 255])]
# -- set menus to default -- #
self.color = 0
self.RGBDropDown.setCurrentIndex(self.color)
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.BrushChoose.setCurrentIndex(1)
self.CHCheckBox.setChecked(False)
self.OCheckBox.setEnabled(True)
self.SSCheckBox.setChecked(True)
# -- zero out image stack -- #
self.opacity = 128 # how opaque masks should be
self.outcolor = [200, 200, 255, 200]
self.NZ, self.Ly, self.Lx = 1, 512, 512
if self.autobtn.isChecked():
self.saturation = [[0, 255] for n in range(self.NZ)]
self.currentZ = 0
self.flows = [[], [], [], [], [[]]]
self.stack = np.zeros((1, self.Ly, self.Lx, 3))
# masks matrix
self.layers = 0 * np.ones((1, self.Ly, self.Lx, 4), np.uint8)
# image matrix with a scale disk
self.radii = 0 * np.ones((self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.ismanual = np.zeros(0, np.bool)
self.update_plot()
self.filename = []
self.loaded = False
def first_load_listView(self):
self.listmodel = Qt.QStandardItemModel(self.ncells,1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initialize_listView(self):
if self.filename != []:
if os.path.isfile(os.path.splitext(self.filename)[0] + '_instance_list.txt'):
self.list_file_name = str(os.path.splitext(self.filename)[0] + '_instance_list.txt')
self.myCellList_array = np.loadtxt(self.list_file_name, dtype=str)
self.myCellList = self.myCellList_array.tolist()
if len(self.myCellList) == self.ncells:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initinal_p0(self):
# self.p0.removeItem(self.img)
self.p0.removeItem(self.layer)
self.p0.removeItem(self.scale)
# self.img.deleteLater()
self.layer.deleteLater()
self.scale.deleteLater()
# self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
# self.img.autoDownsample = False
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
def add_list_item(self):
# print(self.ncells)
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
temp_nums = []
for celli in self.myCellList:
if 'instance_' in celli:
temp_nums.append(int(celli.split('instance_')[-1]))
if len(temp_nums) == 0:
now_cellIdx = 0
else:
now_cellIdx = np.max(np.array(temp_nums))
self.myCellList.append('instance_' + str(now_cellIdx+1))
# self.myCellList.append('instance_' + str(self.ncells))
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def delete_list_item(self, index):
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.last_remove_index = index
self.last_remove_item = self.myCellList.pop(index - 1)
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def check_gpu(self, torch=True):
# also decide whether or not to use torch
self.useGPU.setChecked(False)
self.useGPU.setEnabled(False)
if models.use_gpu():
self.useGPU.setEnabled(True)
self.useGPU.setChecked(True)
def check_ftgpu(self, torch=True):
# also decide whether or not to use torch
self.ftuseGPU.setChecked(False)
self.ftuseGPU.setEnabled(False)
if models.use_gpu():
self.ftuseGPU.setEnabled(True)
self.ftuseGPU.setChecked(True)
def clear_all(self):
self.prev_selected = 0
self.selected = 0
# self.layers_undo, self.cellpix_undo, self.outpix_undo = [],[],[]
self.layers = 0 * np.ones((self.NZ, self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.cellcolors = [np.array([255, 255, 255])]
self.ncells = 0
self.initialize_listView()
print('removed all cells')
self.toggle_removals()
self.update_plot()
def list_select_cell(self, idx):
self.prev_selected = self.selected
self.selected = idx
# print(idx)
# print(self.prev_selected)
if self.selected > 0:
self.layers[self.cellpix == idx] = np.array([255, 255, 255, 255])
if idx < self.ncells + 1 and self.prev_selected > 0 and self.prev_selected != idx:
self.layers[self.cellpix == self.prev_selected] = np.append(self.cellcolors[self.prev_selected],
self.opacity)
# if self.outlinesOn:
# self.layers[self.outpix == idx] = np.array(self.outcolor).astype(np.uint8)
self.update_plot()
def select_cell(self, idx):
self.prev_selected = self.selected
self.selected = idx
self.listView.selectRow(idx - 1)
# print('the prev-selected is ', self.prev_selected)
if self.selected > 0:
self.layers[self.cellpix == idx] = np.array([255, 255, 255, self.opacity])
print('idx', self.prev_selected, idx)
if idx < self.ncells + 1 and self.prev_selected > 0 and self.prev_selected != idx:
self.layers[self.cellpix == self.prev_selected] = np.append(self.cellcolors[self.prev_selected],
self.opacity)
# if self.outlinesOn:
# self.layers[self.outpix==idx] = np.array(self.outcolor)
self.update_plot()
def unselect_cell(self):
if self.selected > 0:
idx = self.selected
if idx < self.ncells + 1:
self.layers[self.cellpix == idx] = np.append(self.cellcolors[idx], self.opacity)
if self.outlinesOn:
self.layers[self.outpix == idx] = np.array(self.outcolor).astype(np.uint8)
# [0,0,0,self.opacity])
self.update_plot()
self.selected = 0
def remove_cell(self, idx):
# remove from manual array
# self.selected = 0
for z in range(self.NZ):
cp = self.cellpix[z] == idx
op = self.outpix[z] == idx
# remove from mask layer
self.layers[z, cp] = np.array([0, 0, 0, 0])
# remove from self.cellpix and self.outpix
self.cellpix[z, cp] = 0
self.outpix[z, op] = 0
# reduce other pixels by -1
self.cellpix[z, self.cellpix[z] > idx] -= 1
self.outpix[z, self.outpix[z] > idx] -= 1
self.update_plot()
if self.NZ == 1:
self.removed_cell = [self.ismanual[idx - 1], self.cellcolors[idx], np.nonzero(cp), np.nonzero(op)]
self.redo.setEnabled(True)
# remove cell from lists
self.ismanual = np.delete(self.ismanual, idx - 1)
del self.cellcolors[idx]
del self.zdraw[idx - 1]
self.ncells -= 1
print('removed cell %d' % (idx - 1))
self.delete_list_item(index=idx)
if self.ncells == 0:
self.ClearButton.setEnabled(False)
if self.NZ == 1:
iopart._save_sets(self)
# self.select_cell(0)
def merge_cells(self, idx):
self.prev_selected = self.selected
self.selected = idx
if self.selected != self.prev_selected:
for z in range(self.NZ):
ar0, ac0 = np.nonzero(self.cellpix[z] == self.prev_selected)
ar1, ac1 = np.nonzero(self.cellpix[z] == self.selected)
touching = np.logical_and((ar0[:, np.newaxis] - ar1) == 1,
(ac0[:, np.newaxis] - ac1) == 1).sum()
print(touching)
ar = np.hstack((ar0, ar1))
ac = np.hstack((ac0, ac1))
if touching:
mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8)
mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2
else:
vr0, vc0 = np.nonzero(self.outpix[z] == self.prev_selected)
vr1, vc1 = np.nonzero(self.outpix[z] == self.selected)
vr = np.hstack((vr0, vr1))
vc = np.hstack((vc0, vc1))
color = self.cellcolors[self.prev_selected]
self.draw_mask(z, ar, ac, vr, vc, color, idx=self.prev_selected)
self.remove_cell(self.selected)
print('merged two cells')
self.update_plot()
iopart._save_sets(self)
self.undo.setEnabled(False)
self.redo.setEnabled(False)
def undo_remove_cell(self):
if len(self.removed_cell) > 0:
z = 0
ar, ac = self.removed_cell[2]
vr, vc = self.removed_cell[3]
color = self.removed_cell[1]
self.draw_mask(z, ar, ac, vr, vc, color)
self.toggle_mask_ops()
self.cellcolors.append(color)
self.ncells += 1
self.add_list_item()
self.ismanual = np.append(self.ismanual, self.removed_cell[0])
self.zdraw.append([])
print('added back removed cell')
self.update_plot()
iopart._save_sets(self)
self.removed_cell = []
self.redo.setEnabled(False)
def fine_tune(self):
tic = time.time()
dataset_dir = self.fine_tune_dir
self.state_label.setText("%s"%(dataset_dir), color='#969696')
if not isinstance(dataset_dir, str): # TODO: 改成警告
print('dataset_dir is not provided')
train_epoch = 100 if self.epoch_line.text() == '' else int(self.epoch_line.text())
ft_bz = 8 if self.ftbz_line.text() == '' else int(self.ftbz_line.text())
contrast_on = 1 if self.stmodelchooseBnt.currentText() == 'contrastive' else 0
model_type = self.ftmodelchooseBnt.currentText()
task_mode, postproc_mode, attn_on, dense_on, style_scale_on = utils.process_different_model(model_type) # task_mode mean different instance representation
pretrained_model = os.path.join(self.model_dir, model_type)
channels = [self.chan1chooseBnt.currentIndex(), self.chan2chooseBnt.currentIndex()]
print(dataset_dir, train_epoch, channels)
utils.set_manual_seed(5)
try:
print('ft_bz', ft_bz)
shotset = DatasetShot(eval_dir=dataset_dir, class_name=None, image_filter='_img', mask_filter='_masks',
channels=channels,
train_num=train_epoch * ft_bz, task_mode=task_mode, rescale=True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'), resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except ValueError:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.ftbnt.setEnabled(False)
return
shot_gen = DataLoader(dataset=shotset, batch_size=ft_bz, num_workers=0, pin_memory=True)
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
lr = {'downsample': 0.001, 'upsample': 0.001, 'tasker': 0.001, 'alpha': 0.1}
lr_schedule_gamma = {'downsample': 0.5, 'upsample': 0.5, 'tasker': 0.5, 'alpha': 0.5}
step_size = int(train_epoch * 0.25)
print('step_size', step_size)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.ftuseGPU.isChecked(), model_type=model_type)
self.model.net.pretrained_model = pretrained_model
save_name = model_type + '_' + os.path.basename(dataset_dir)
self.model.net.contrast_on = contrast_on
if contrast_on:
self.model.net.pair_gen = DatasetPairEval(positive_dir=dataset_dir, use_negative_masks=False, gpu=self.ftuseGPU.isChecked(),
rescale=True)
self.model.net.save_name = save_name + '-cft'
else:
self.model.net.save_name = save_name + '-ft'
try:
print('Now is fine-tuning...Please Wait')
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
self.model.finetune(shot_gen=shot_gen, lr=lr, lr_schedule_gamma=lr_schedule_gamma, step_size=step_size, savepath=dataset_dir)
except RuntimeError:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Batch size is too big, please set smaller",
color='#FF6A56')
print("Batch size is too big, please set smaller")
return
print('Finished fine-tuning')
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText("Finished in %0.3fs, model saved at %s/fine-tune/%s" %(time.time()-tic, dataset_dir, self.model.net.save_name), color='#39B54A')
self.ftbnt.setEnabled(False)
self.fine_tune_dir = ''
def get_single_cell(self):
tic = time.time()
try:
data_path = self.single_cell_dir
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print(data_path)
try:
image_names = io.get_image_files(data_path, '_masks', imf='_img')
mask_names, _ = io.get_label_files(image_names, '_img_cp_masks', imf='_img')
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'), resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.single_cell_btn.setEnabled(False)
return
sta = 256
save_dir = os.path.join(os.path.dirname(data_path), 'single')
utils.make_folder(save_dir)
imgs = [io.imread(os.path.join(data_path, image_name)) for image_name in image_names]
masks = [io.imread(os.path.join(data_path, mask_name)) for mask_name in mask_names]
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
for n in trange(len(masks)):
maskn = masks[n]
props = regionprops(maskn)
i_max = maskn.max() + 1
for i in range(1, i_max):
maskn_ = np.zeros_like(maskn)
maskn_[maskn == i] = 1
bbox = props[i - 1]['bbox']
if imgs[n].ndim == 3:
imgn_single = imgs[n][bbox[0]:bbox[2], bbox[1]:bbox[3]] * maskn_[bbox[0]:bbox[2], bbox[1]:bbox[3],
np.newaxis]
else:
imgn_single = imgs[n][bbox[0]:bbox[2], bbox[1]:bbox[3]] * maskn_[bbox[0]:bbox[2], bbox[1]:bbox[3]]
shape = imgn_single.shape
shape_x = shape[0]
shape_y = shape[1]
add_x = sta - shape_x
add_y = sta - shape_y
add_x_l = int(floor(add_x / 2))
add_x_r = int(ceil(add_x / 2))
add_y_l = int(floor(add_y / 2))
add_y_r = int(ceil(add_y / 2))
if add_x > 0 and add_y > 0:
if imgn_single.ndim == 3:
imgn_single = np.pad(imgn_single, ((add_x_l, add_x_r), (add_y_l, add_y_r), (0, 0)), 'constant',
constant_values=(0, 0))
else:
imgn_single = np.pad(imgn_single, ((add_x_l, add_x_r), (add_y_l, add_y_r)), 'constant',
constant_values=(0, 0))
save_name = os.path.join(save_dir, image_names[n].split('query')[-1].split('.')[0][1:] + '_' + str(
i) + '.tif')
cv2.imwrite(save_name, imgn_single)
print('Finish getting single instance')
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText("Finished in %0.3fs, saved at %s"%(time.time()-tic, os.path.dirname(data_path)+'/single') , color='#39B54A')
self.single_cell_btn.setEnabled(False)
def fine_tune_dir_choose(self):
self.fine_tune_dir = QtWidgets.QFileDialog.getExistingDirectory(None,"choose fine-tune data",self.DefaultImFolder)
if self.fine_tune_dir =='':
self.state_label.setText("Choose nothing", color='#FF6A56')
else:
self.state_label.setText("Choose data at %s"%str(self.fine_tune_dir), color='#969696')
self.ftbnt.setEnabled(True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/black.png'), resize=self.resize, X2=0)
def batch_inference_dir_choose(self):
self.batch_inference_dir = QtWidgets.QFileDialog.getExistingDirectory(None, "choose batch segmentation data", self.DefaultImFolder)
if self.batch_inference_dir =='':
self.state_label.setText("Choose nothing", color='#FF6A56')
else:
self.state_label.setText("Choose data at %s"%str(self.batch_inference_dir), color='#969696')
self.batch_inference_bnt.setEnabled(True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/black.png'), resize=self.resize, X2=0)
def single_dir_choose(self):
self.single_cell_dir = QtWidgets.QFileDialog.getExistingDirectory(None, "choose get single instance data", self.DefaultImFolder)
if self.single_cell_dir =='':
self.state_label.setText("Choose nothing", color='#FF6A56')
else:
self.state_label.setText("Choose data at %s"%str(self.single_cell_dir), color='#969696')
self.single_cell_btn.setEnabled(True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/black.png'), resize=self.resize, X2=0)
def model_file_dir_choose(self):
""" model after fine-tuning"""
self.model_file_path = QtWidgets.QFileDialog.getOpenFileName(None, "choose model file", self.DefaultImFolder)
if self.model_file_path[0] =='':
self.state_label.setText("Choose nothing", color='#FF6A56')
else:
self.state_label.setText("Choose model at %s"%str(self.model_file_path[0]), color='#969696')
def reset_pretrain_model(self):
self.model_file_path = None
self.state_label.setText("Reset to pre-trained model", color='#969696')
def remove_stroke(self, delete_points=True):
# self.current_stroke = get_unique_points(self.current_stroke)
stroke = np.array(self.strokes[-1])
cZ = stroke[0, 0]
outpix = self.outpix[cZ][stroke[:, 1], stroke[:, 2]] > 0
self.layers[cZ][stroke[~outpix, 1], stroke[~outpix, 2]] = np.array([0, 0, 0, 0])
if self.masksOn:
cellpix = self.cellpix[cZ][stroke[:, 1], stroke[:, 2]]
ccol = np.array(self.cellcolors.copy())
if self.selected > 0:
ccol[self.selected] = np.array([255, 255, 255])
col2mask = ccol[cellpix]
col2mask = np.concatenate((col2mask, self.opacity * (cellpix[:, np.newaxis] > 0)), axis=-1)
self.layers[cZ][stroke[:, 1], stroke[:, 2], :] = col2mask
if self.outlinesOn:
self.layers[cZ][stroke[outpix, 1], stroke[outpix, 2]] = np.array(self.outcolor)
if delete_points:
self.current_point_set = self.current_point_set[:-1 * (stroke[:, -1] == 1).sum()]
del self.strokes[-1]
self.update_plot()
def brush_choose(self):
self.brush_size = self.BrushChoose.currentIndex() * 2 + 1
if self.loaded:
self.layer.setDrawKernel(kernel_size=self.brush_size)
self.update_plot()
# if self.eraser_button.isChecked():
# print("will change")
# self.cur_size = self.brush_size * 6
# cursor = Qt.QPixmap("./assets/eraser.png")
# cursor_scaled = cursor.scaled(self.cur_size, self.cur_size)
# cursor_set = Qt.QCursor(cursor_scaled, self.cur_size/2, self.cur_size/2)
# QtWidgets.QApplication.setOverrideCursor(cursor_set)
# self.update_plot()
#
# def toggle_server(self, off=False):
# if SERVER_UPLOAD:
# if self.ncells>0 and not off:
# self.saveServer.setEnabled(True)
# self.ServerButton.setEnabled(True)
#
# else:
# self.saveServer.setEnabled(False)
# self.ServerButton.setEnabled(False)
def toggle_mask_ops(self):
self.toggle_removals()
# self.toggle_server()
def load_cell_list(self):
self.list_file_name = QtWidgets.QFileDialog.getOpenFileName(None, "Load instance list", self.DefaultImFolder)
self.myCellList_array = np.loadtxt(self.list_file_name[0], dtype=str)
self.myCellList = self.myCellList_array.tolist()
if len(self.myCellList) == self.ncells:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def toggle_scale(self):
if self.scale_on:
self.p0.removeItem(self.scale)
self.scale_on = False
else:
self.p0.addItem(self.scale)
self.scale_on = True
def toggle_removals(self):
if self.ncells > 0:
# self.ClearButton.setEnabled(True)
self.remcell.setEnabled(True)
self.undo.setEnabled(True)
else:
# self.ClearButton.setEnabled(False)
self.remcell.setEnabled(False)
self.undo.setEnabled(False)
def remove_action(self):
if self.selected > 0:
self.remove_cell(self.selected)
def undo_action(self):
if (len(self.strokes) > 0 and
self.strokes[-1][0][0] == self.currentZ):
self.remove_stroke()
else:
# remove previous cell
if self.ncells > 0:
self.remove_cell(self.ncells)
def undo_remove_action(self):
self.undo_remove_cell()
def get_files(self):
images = []
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.png'))
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.jpg'))
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.jpeg'))
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.tif'))
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.tiff'))
images.extend(glob.glob(os.path.dirname(self.filename) + '/*.jfif'))
images = natsorted(images)
fnames = [os.path.split(images[k])[-1] for k in range(len(images))]
f0 = os.path.split(self.filename)[-1]
idx = np.nonzero(np.array(fnames) == f0)[0][0]
return images, idx
def get_prev_image(self):
images, idx = self.get_files()
idx = (idx - 1) % len(images)
iopart._load_image(self, filename=images[idx])
def get_next_image(self):
images, idx = self.get_files()
idx = (idx + 1) % len(images)
iopart._load_image(self, filename=images[idx])
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
files = [u.toLocalFile() for u in event.mimeData().urls()]
# print(files)
if os.path.splitext(files[0])[-1] == '.npy':
iopart._load_seg(self, filename=files[0])
self.initialize_listView()
if os.path.isdir(files[0]):
print("loading a folder")
self.ImFolder = files[0]
try:
self.OpenDirDropped()
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/black.png'), resize=self.resize, X2=0)
self.state_label.setText("No image found, please choose right data path",
color='#FF6A56')
else:
# print(len(files))
# print(files[0])
self.ImFolder = os.path.dirname(files[0])
try:
self.OpenDirDropped(os.path.basename(files[0]))
print(files[0], self.ImNameSet[self.CurImId])
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
fname = os.path.basename(files[0])
fsuffix = os.path.splitext(fname)[-1]
if fsuffix in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif']:
if '_mask' in fname:
self.state_label.setText("This is a mask file, autoload corresponding image", color='#FDC460')
else:
self.state_label.setText("This format is not supported", color='#FF6A56')
except ValueError:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/black.png'), resize=self.resize, X2=0)
self.state_label.setText("No corresponding iamge for this mask file", color='#FF6A56')
def toggle_masks(self):
if self.MCheckBox.isChecked():
self.masksOn = True
else:
self.masksOn = False
if self.OCheckBox.isChecked():
self.outlinesOn = True
else:
self.outlinesOn = False
if not self.masksOn and not self.outlinesOn:
self.p0.removeItem(self.layer)
self.layer_off = True
else:
if self.layer_off:
self.p0.addItem(self.layer)
self.redraw_masks(masks=self.masksOn, outlines=self.outlinesOn)
if self.loaded:
self.update_plot()
def draw_mask(self, z, ar, ac, vr, vc, color, idx=None):
''' draw single mask using outlines and area '''
if idx is None:
idx = self.ncells + 1
self.cellpix[z][vr, vc] = idx
self.cellpix[z][ar, ac] = idx
self.outpix[z][vr, vc] = idx
if self.masksOn:
self.layers[z][ar, ac, :3] = color
# print(self.layers.shape)
# print(self.layers[z][ar, ac, :3].shape)
# print(self.layers[z][ar,ac,:3])
self.layers[z][ar, ac, -1] = self.opacity
# print(z)
if self.outlinesOn:
self.layers[z][vr, vc] = np.array(self.outcolor)
def draw_masks(self):
self.cellcolors = np.array(self.cellcolors)
self.layers[..., :3] = self.cellcolors[self.cellpix, :]
self.layers[..., 3] = self.opacity * (self.cellpix > 0).astype(np.uint8)
self.cellcolors = list(self.cellcolors)
self.layers[self.outpix > 0] = np.array(self.outcolor)
if self.selected > 0:
self.layers[self.outpix == self.selected] = np.array([0, 0, 0, self.opacity])
def redraw_masks(self, masks=True, outlines=True):
if not outlines and masks:
self.draw_masks()
self.cellcolors = np.array(self.cellcolors)
self.layers[..., :3] = self.cellcolors[self.cellpix, :]
self.layers[..., 3] = self.opacity * (self.cellpix > 0).astype(np.uint8)
self.cellcolors = list(self.cellcolors)
if self.selected > 0:
self.layers[self.cellpix == self.selected] = np.array([255, 255, 255, self.opacity])
else:
if masks:
self.layers[..., 3] = self.opacity * (self.cellpix > 0).astype(np.uint8)
else:
self.layers[..., 3] = 0
self.layers[self.outpix > 0] = np.array(self.outcolor).astype(np.uint8)
def update_plot_without_mask(self):
self.Ly, self.Lx, _ = self.stack[self.currentZ].shape
if self.view == 0:
image = self.stack[self.currentZ]
if self.color == 0:
if self.onechan:
# show single channel
image = self.stack[self.currentZ][:, :, 0]
self.img.setImage(image, autoLevels=False, lut=None)
elif self.color == 1:
image = image.astype(np.float32).mean(axis=-1).astype(np.uint8)
self.img.setImage(image, autoLevels=False, lut=None)
elif self.color == 2:
image = image.astype(np.float32).mean(axis=-1).astype(np.uint8)
self.img.setImage(image, autoLevels=False, lut=self.cmap[0])
elif self.color > 2:
image = image[:, :, self.color - 3]
self.img.setImage(image, autoLevels=False, lut=self.cmap[self.color - 2])
self.img.setLevels(self.saturation[self.currentZ])
else:
image = np.zeros((self.Ly, self.Lx), np.uint8)
if len(self.flows) >= self.view - 1 and len(self.flows[self.view - 1]) > 0:
image = self.flows[self.view - 1][self.currentZ]
if self.view > 1:
self.img.setImage(image, autoLevels=False, lut=self.bwr)
else:
self.img.setImage(image, autoLevels=False, lut=None)
self.img.setLevels([0.0, 255.0])
self.scale.setImage(self.radii, autoLevels=False)
self.scale.setLevels([0.0, 255.0])
if self.masksOn or self.outlinesOn:
self.layer.setImage(self.layers[self.currentZ], autoLevels=False)
self.win.show()
self.show()
def update_plot(self):
self.Ly, self.Lx, _ = self.stack[self.currentZ].shape
if self.view == 0:
image = self.stack[self.currentZ]
if self.color == 0:
if self.onechan:
# show single channel
image = self.stack[self.currentZ][:, :, 0]
self.img.setImage(image, autoLevels=False, lut=None)
elif self.color == 1:
image = image.astype(np.float32).mean(axis=-1).astype(np.uint8)
self.img.setImage(image, autoLevels=False, lut=None)
elif self.color == 2:
image = image.astype(np.float32).mean(axis=-1).astype(np.uint8)
self.img.setImage(image, autoLevels=False, lut=self.cmap[0])
elif self.color > 2:
image = image[:, :, self.color - 3]
self.img.setImage(image, autoLevels=False, lut=self.cmap[self.color - 2])
self.img.setLevels(self.saturation[self.currentZ])
else:
image = np.zeros((self.Ly, self.Lx), np.uint8)
if len(self.flows) >= self.view - 1 and len(self.flows[self.view - 1]) > 0:
image = self.flows[self.view - 1][self.currentZ]
if self.view > 1:
self.img.setImage(image, autoLevels=False, lut=self.bwr)
else:
self.img.setImage(image, autoLevels=False, lut=None)
self.img.setLevels([0.0, 255.0])
self.scale.setImage(self.radii, autoLevels=False)
self.scale.setLevels([0.0, 255.0])
if self.masksOn or self.outlinesOn:
self.layer.setImage(self.layers[self.currentZ], autoLevels=False)
self.win.show()
self.show()
def compute_scale(self):
self.diameter = float(self.Diameter.text())
self.pr = int(float(self.Diameter.text()))
self.radii = np.zeros((self.Ly + self.pr, self.Lx, 4), np.uint8)
yy, xx = plot.disk([self.pr / 2 - 1, self.Ly - self.pr / 2 + 1],
self.pr / 2, self.Ly + self.pr, self.Lx)
self.radii[yy, xx, 0] = 255
self.radii[yy, xx, -1] = 255
self.update_plot()
self.p0.setYRange(0, self.Ly + self.pr)
self.p0.setXRange(0, self.Lx)
def auto_save(self):
if self.autosaveOn:
print('Autosaved')
self.save_cell_list()
iopart._save_sets(self)
iopart._save_png(self)
def save_all(self):
self.save_cell_list()
iopart._save_sets(self)
iopart._save_png(self)
self.state_label.setText('Save masks/npy/list successfully', color='#39B54A')
def make_cmap(cm=0):
# make a single channel colormap
r = np.arange(0, 256)
color = np.zeros((256, 3))
color[:, cm] = r
color = color.astype(np.uint8)
cmap = pg.ColorMap(pos=np.linspace(0.0, 255, 256), color=color)
return cmap
def interpZ(mask, zdraw):
""" find nearby planes and average their values using grid of points
zfill is in ascending order
"""
ifill = np.ones(mask.shape[0], np.bool)
zall = np.arange(0, mask.shape[0], 1, int)
ifill[zdraw] = False
zfill = zall[ifill]
zlower = zdraw[np.searchsorted(zdraw, zfill, side='left') - 1]
zupper = zdraw[np.searchsorted(zdraw, zfill, side='right')]
for k, z in enumerate(zfill):
Z = zupper[k] - zlower[k]
zl = (z - zlower[k]) / Z
plower = avg3d(mask[zlower[k]]) * (1 - zl)
pupper = avg3d(mask[zupper[k]]) * zl
mask[z] = (plower + pupper) > 0.33
# Ml, norml = avg3d(mask[zlower[k]], zl)
# Mu, normu = avg3d(mask[zupper[k]], 1-zl)
# mask[z] = (Ml + Mu) / (norml + normu) > 0.5
return mask, zfill
def avg3d(C):
""" smooth value of c across nearby points
(c is center of grid directly below point)
b -- a -- b
a -- c -- a
b -- a -- b
"""
Ly, Lx = C.shape
# pad T by 2
T = np.zeros((Ly + 2, Lx + 2), np.float32)
M = | np.zeros((Ly, Lx), np.float32) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
"""
tqsdk.ta 模块包含了一批常用的技术指标计算函数
"""
import numpy as np
import pandas as pd
import numba
from tqsdk import ta_func
def ATR(df, n):
"""平均真实波幅"""
new_df = pd.DataFrame()
pre_close = df["close"].shift(1)
new_df["tr"] = np.where(df["high"] - df["low"] > np.absolute(pre_close - df["high"]),
np.where(df["high"] - df["low"] > np.absolute(pre_close - df["low"]),
df["high"] - df["low"], np.absolute(pre_close - df["low"])),
np.where(np.absolute(pre_close - df["high"]) > np.absolute(pre_close - df["low"]),
np.absolute(pre_close - df["high"]), np.absolute(pre_close - df["low"])))
new_df["atr"] = ta_func.ma(new_df["tr"], n)
return new_df
def BIAS(df, n):
"""乖离率"""
ma1 = ta_func.ma(df["close"], n)
new_df = pd.DataFrame(data=list((df["close"] - ma1) / ma1 * 100), columns=["bias"])
return new_df
def BOLL(df, n, p):
"""布林线"""
new_df = pd.DataFrame()
mid = ta_func.ma(df["close"], n)
std = df["close"].rolling(n).std()
new_df["mid"] = mid
new_df["top"] = mid + p * std
new_df["bottom"] = mid - p * std
return new_df
def DMI(df, n, m):
"""动向指标"""
new_df = pd.DataFrame()
new_df["atr"] = ATR(df, n)["atr"]
pre_high = df["high"].shift(1)
pre_low = df["low"].shift(1)
hd = df["high"] - pre_high
ld = pre_low - df["low"]
admp = ta_func.ma(pd.Series(np.where((hd > 0) & (hd > ld), hd, 0)), n)
admm = ta_func.ma(pd.Series(np.where((ld > 0) & (ld > hd), ld, 0)), n)
new_df["pdi"] = pd.Series(np.where(new_df["atr"] > 0, admp / new_df["atr"] * 100, np.NaN)).ffill()
new_df["mdi"] = pd.Series(np.where(new_df["atr"] > 0, admm / new_df["atr"] * 100, np.NaN)).ffill()
ad = pd.Series(np.absolute(new_df["mdi"] - new_df["pdi"]) / (new_df["mdi"] + new_df["pdi"]) * 100)
new_df["adx"] = ta_func.ma(ad, m)
new_df["adxr"] = (new_df["adx"] + new_df["adx"].shift(m)) / 2
return new_df
def KDJ(df, n, m1, m2):
"""随机指标"""
new_df = pd.DataFrame()
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - lv) / (hv - lv) * 100))
new_df["k"] = ta_func.sma(rsv, m1, 1)
new_df["d"] = ta_func.sma(new_df["k"], m2, 1)
new_df["j"] = 3 * new_df["k"] - 2 * new_df["d"]
return new_df
def MACD(df, short, long, m):
"""异同移动平均线"""
new_df = pd.DataFrame()
eshort = ta_func.ema(df["close"], short)
elong = ta_func.ema(df["close"], long)
new_df["diff"] = eshort - elong
new_df["dea"] = ta_func.ema(new_df["diff"], m)
new_df["bar"] = 2 * (new_df["diff"] - new_df["dea"])
return new_df
@numba.njit
def _sar(open, high, low, close, range_high, range_low, n, step, maximum):
sar = np.empty_like(close)
sar[:n] = np.NAN
af = 0
ep = 0
trend = 1 if (close[n] - open[n]) > 0 else -1
if trend == 1:
sar[n] = min(range_low[n - 2], low[n - 1])
else:
sar[n] = max(range_high[n - 2], high[n - 1])
for i in range(n, len(sar)):
if i != n:
if abs(trend) > 1:
sar[i] = sar[i - 1] + af * (ep - sar[i - 1])
elif trend == 1:
sar[i] = min(range_low[i - 2], low[i - 1])
elif trend == -1:
sar[i] = max(range_high[i - 2], high[i - 1])
if trend > 0:
if sar[i - 1] > low[i]:
ep = low[i]
af = step
trend = -1
else:
ep = high[i]
af = min(af + step, maximum) if ep > range_high[i - 1] else af
trend += 1
else:
if sar[i - 1] < high[i]:
ep = high[i]
af = step
trend = 1
else:
ep = low[i]
af = min(af + step, maximum) if ep < range_low[i - 1] else af
trend -= 1
return sar
def SAR(df, n, step, max):
"""抛物转向"""
range_high = df["high"].rolling(n - 1).max()
range_low = df["low"].rolling(n - 1).min()
sar = _sar(df["open"].values, df["high"].values, df["low"].values, df["close"].values, range_high.values,
range_low.values, n, step, max)
new_df = pd.DataFrame(data=sar, columns=["sar"])
return new_df
def WR(df, n):
"""威廉指标"""
hn = df["high"].rolling(n).max()
ln = df["low"].rolling(n).min()
new_df = pd.DataFrame(data=list((hn - df["close"]) / (hn - ln) * (-100)), columns=["wr"])
return new_df
def RSI(df, n):
"""相对强弱指标"""
lc = df["close"].shift(1)
rsi = ta_func.sma(pd.Series(np.where(df["close"] - lc > 0, df["close"] - lc, 0)), n, 1) / \
ta_func.sma(np.absolute(df["close"] - lc), n, 1) * 100
new_df = pd.DataFrame(data=rsi, columns=["rsi"])
return new_df
def ASI(df):
"""振动升降指标"""
lc = df["close"].shift(1) # 上一交易日的收盘价
aa = np.absolute(df["high"] - lc)
bb = np.absolute(df["low"] - lc)
cc = np.absolute(df["high"] - df["low"].shift(1))
dd = np.absolute(lc - df["open"].shift(1))
r = np.where((aa > bb) & (aa > cc), aa + bb / 2 + dd / 4,
np.where((bb > cc) & (bb > aa), bb + aa / 2 + dd / 4, cc + dd / 4))
x = df["close"] - lc + (df["close"] - df["open"]) / 2 + lc - df["open"].shift(1)
si = np.where(r == 0, 0, 16 * x / r * np.where(aa > bb, aa, bb))
new_df = pd.DataFrame(data=list(pd.Series(si).cumsum()), columns=["asi"])
return new_df
def VR(df, n):
"""VR 容量比率"""
lc = df["close"].shift(1)
vr = pd.Series(np.where(df["close"] > lc, df["volume"], 0)).rolling(n).sum() / pd.Series(
np.where(df["close"] <= lc, df["volume"], 0)).rolling(n).sum() * 100
new_df = pd.DataFrame(data=list(vr), columns=["vr"])
return new_df
def ARBR(df, n):
"""人气意愿指标"""
new_df = pd.DataFrame()
new_df["ar"] = (df["high"] - df["open"]).rolling(n).sum() / (df["open"] - df["low"]).rolling(n).sum() * 100
new_df["br"] = pd.Series(
np.where(df["high"] - df["close"].shift(1) > 0, df["high"] - df["close"].shift(1), 0)).rolling(
n).sum() / pd.Series(
np.where(df["close"].shift(1) - df["low"] > 0, df["close"].shift(1) - df["low"], 0)).rolling(n).sum() * 100
return new_df
def DMA(df, short, long, m):
"""平均线差"""
new_df = pd.DataFrame()
new_df["ddd"] = ta_func.ma(df["close"], short) - ta_func.ma(df["close"], long)
new_df["ama"] = ta_func.ma(new_df["ddd"], m)
return new_df
def EXPMA(df, p1, p2):
"""指数加权移动平均线组合"""
new_df = pd.DataFrame()
new_df["ma1"] = ta_func.ema(df["close"], p1)
new_df["ma2"] = ta_func.ema(df["close"], p2)
return new_df
def CR(df, n, m):
"""CR能量"""
new_df = pd.DataFrame()
mid = (df["high"] + df["low"] + df["close"]) / 3
new_df["cr"] = pd.Series(np.where(0 > df["high"] - mid.shift(1), 0, df["high"] - mid.shift(1))).rolling(
n).sum() / pd.Series(np.where(0 > mid.shift(1) - df["low"], 0, mid.shift(1) - df["low"])).rolling(n).sum() * 100
new_df["crma"] = ta_func.ma(new_df["cr"], m).shift(int(m / 2.5 + 1))
return new_df
def CCI(df, n):
"""顺势指标"""
typ = (df["high"] + df["low"] + df["close"]) / 3
ma = ta_func.ma(typ, n)
def mad(x):
return np.fabs(x - x.mean()).mean()
md = typ.rolling(window=n).apply(mad, raw=True) # 平均绝对偏差
new_df = pd.DataFrame(data=list((typ - ma) / (md * 0.015)), columns=["cci"])
return new_df
def OBV(df):
"""能量潮"""
lc = df["close"].shift(1)
obv = (np.where(df["close"] > lc, df["volume"], np.where(df["close"] < lc, -df["volume"], 0))).cumsum()
new_df = pd.DataFrame(data=obv, columns=["obv"])
return new_df
def CDP(df, n):
"""逆势操作"""
new_df = pd.DataFrame()
pt = df["high"].shift(1) - df["low"].shift(1)
cdp = (df["high"].shift(1) + df["low"].shift(1) + df["close"].shift(1)) / 3
new_df["ah"] = ta_func.ma(cdp + pt, n)
new_df["al"] = ta_func.ma(cdp - pt, n)
new_df["nh"] = ta_func.ma(2 * cdp - df["low"], n)
new_df["nl"] = ta_func.ma(2 * cdp - df["high"], n)
return new_df
def HCL(df, n):
"""均线通道"""
new_df = pd.DataFrame()
new_df["mah"] = ta_func.ma(df["high"], n)
new_df["mal"] = ta_func.ma(df["low"], n)
new_df["mac"] = ta_func.ma(df["close"], n)
return new_df
def ENV(df, n, k):
"""包略线 (Envelopes)"""
new_df = pd.DataFrame()
new_df["upper"] = ta_func.ma(df["close"], n) * (1 + k / 100)
new_df["lower"] = ta_func.ma(df["close"], n) * (1 - k / 100)
return new_df
def MIKE(df, n):
"""麦克指标"""
new_df = pd.DataFrame()
typ = (df["high"] + df["low"] + df["close"]) / 3
ll = df["low"].rolling(n).min()
hh = df["high"].rolling(n).max()
new_df["wr"] = typ + (typ - ll)
new_df["mr"] = typ + (hh - ll)
new_df["sr"] = 2 * hh - ll
new_df["ws"] = typ - (hh - typ)
new_df["ms"] = typ - (hh - ll)
new_df["ss"] = 2 * ll - hh
return new_df
def PUBU(df, m):
"""瀑布线"""
pb = (ta_func.ema(df["close"], m) + ta_func.ma(df["close"], m * 2) + ta_func.ma(df["close"], m * 4)) / 3
new_df = pd.DataFrame(data=list(pb), columns=["pb"])
return new_df
def BBI(df, n1, n2, n3, n4):
"""多空指数"""
bbi = (ta_func.ma(df["close"], n1) + ta_func.ma(df["close"], n2) + ta_func.ma(df["close"], n3) + ta_func.ma(
df["close"], n4)) / 4
new_df = pd.DataFrame(data=list(bbi), columns=["bbi"])
return new_df
def DKX(df, m):
"""多空线"""
new_df = pd.DataFrame()
a = (3 * df["close"] + df["high"] + df["low"] + df["open"]) / 6
new_df["b"] = (20 * a + 19 * a.shift(1) + 18 * a.shift(2) + 17 * a.shift(3) + 16 * a.shift(4) + 15 * a.shift(
5) + 14 * a.shift(6)
+ 13 * a.shift(7) + 12 * a.shift(8) + 11 * a.shift(9) + 10 * a.shift(10) + 9 * a.shift(
11) + 8 * a.shift(
12) + 7 * a.shift(13) + 6 * a.shift(14) + 5 * a.shift(15) + 4 * a.shift(16) + 3 * a.shift(
17) + 2 * a.shift(18) + a.shift(20)
) / 210
new_df["d"] = ta_func.ma(new_df["b"], m)
return new_df
def BBIBOLL(df, n, m):
"""多空布林线"""
new_df = pd.DataFrame()
new_df["bbiboll"] = (ta_func.ma(df["close"], 3) + ta_func.ma(df["close"], 6) + ta_func.ma(df["close"],
12) + ta_func.ma(
df["close"], 24)) / 4
new_df["upr"] = new_df["bbiboll"] + m * new_df["bbiboll"].rolling(n).std()
new_df["dwn"] = new_df["bbiboll"] - m * new_df["bbiboll"].rolling(n).std()
return new_df
def ADTM(df, n, m):
"""动态买卖气指标"""
new_df = pd.DataFrame()
dtm = np.where(df["open"] < df["open"].shift(1), 0,
np.where(df["high"] - df["open"] > df["open"] - df["open"].shift(1), df["high"] - df["open"],
df["open"] - df["open"].shift(1)))
dbm = np.where(df["open"] >= df["open"].shift(1), 0,
np.where(df["open"] - df["low"] > df["open"] - df["open"].shift(1), df["open"] - df["low"],
df["open"] - df["open"].shift(1)))
stm = pd.Series(dtm).rolling(n).sum()
sbm = pd.Series(dbm).rolling(n).sum()
new_df["adtm"] = np.where(stm > sbm, (stm - sbm) / stm, np.where(stm == sbm, 0, (stm - sbm) / sbm))
new_df["adtmma"] = ta_func.ma(new_df["adtm"], m)
return new_df
def B3612(df):
"""三减六日乖离率"""
new_df = pd.DataFrame()
new_df["b36"] = ta_func.ma(df["close"], 3) - ta_func.ma(df["close"], 6)
new_df["b612"] = ta_func.ma(df["close"], 6) - ta_func.ma(df["close"], 12)
return new_df
def DBCD(df, n, m, t):
"""异同离差乖离率"""
new_df = pd.DataFrame()
bias = (df["close"] - ta_func.ma(df["close"], n)) / ta_func.ma(df["close"], n)
dif = bias - bias.shift(m)
new_df["dbcd"] = ta_func.sma(dif, t, 1)
new_df["mm"] = ta_func.ma(new_df["dbcd"], 5)
return new_df
def DDI(df, n, n1, m, m1):
"""方向标准离差指数"""
new_df = pd.DataFrame()
tr = np.where(np.absolute(df["high"] - df["high"].shift(1)) > np.absolute(df["low"] - df["low"].shift(1)),
np.absolute(df["high"] - df["high"].shift(1)), np.absolute(df["low"] - df["low"].shift(1)))
dmz = np.where((df["high"] + df["low"]) <= (df["high"].shift(1) + df["low"].shift(1)), 0, tr)
dmf = np.where((df["high"] + df["low"]) >= (df["high"].shift(1) + df["low"].shift(1)), 0, tr)
diz = pd.Series(dmz).rolling(n).sum() / (pd.Series(dmz).rolling(n).sum() + pd.Series(dmf).rolling(n).sum())
dif = pd.Series(dmf).rolling(n).sum() / (pd.Series(dmf).rolling(n).sum() + pd.Series(dmz).rolling(n).sum())
new_df["ddi"] = diz - dif
new_df["addi"] = ta_func.sma(new_df["ddi"], n1, m)
new_df["ad"] = ta_func.ma(new_df["addi"], m1)
return new_df
def KD(df, n, m1, m2):
"""随机指标"""
new_df = pd.DataFrame()
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - lv) / (hv - lv) * 100))
new_df["k"] = ta_func.sma(rsv, m1, 1)
new_df["d"] = ta_func.sma(new_df["k"], m2, 1)
return new_df
def LWR(df, n, m):
"""威廉指标"""
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - hv) / (hv - lv) * 100))
new_df = pd.DataFrame(data=list(ta_func.sma(rsv, m, 1)), columns=["lwr"])
return new_df
def MASS(df, n1, n2):
"""梅斯线"""
ema1 = ta_func.ema(df["high"] - df["low"], n1)
ema2 = ta_func.ema(ema1, n1)
new_df = pd.DataFrame(data=list((ema1 / ema2).rolling(n2).sum()), columns=["mass"])
return new_df
def MFI(df, n):
"""资金流量指标"""
typ = (df["high"] + df["low"] + df["close"]) / 3
mr = pd.Series(np.where(typ > typ.shift(1), typ * df["volume"], 0)).rolling(n).sum() / pd.Series(
np.where(typ < typ.shift(1), typ * df["volume"], 0)).rolling(n).sum()
new_df = pd.DataFrame(data=list(100 - (100 / (1 + mr))), columns=["mfi"])
return new_df
def MI(df, n):
"""动量指标"""
new_df = pd.DataFrame()
new_df["a"] = df["close"] - df["close"].shift(n)
new_df["mi"] = ta_func.sma(new_df["a"], n, 1)
return new_df
def MICD(df, n, n1, n2):
"""异同离差动力指数"""
new_df = pd.DataFrame()
mi = df["close"] - df["close"].shift(1)
ami = ta_func.sma(mi, n, 1)
new_df["dif"] = ta_func.ma(ami.shift(1), n1) - ta_func.ma(ami.shift(1), n2)
new_df["micd"] = ta_func.sma(new_df["dif"], 10, 1)
return new_df
def MTM(df, n, n1):
"""MTM动力指标"""
new_df = pd.DataFrame()
new_df["mtm"] = df["close"] - df["close"].shift(n)
new_df["mtmma"] = ta_func.ma(new_df["mtm"], n1)
return new_df
def PRICEOSC(df, long, short):
"""价格震荡指数 Price Oscillator"""
ma_s = ta_func.ma(df["close"], short)
ma_l = ta_func.ma(df["close"], long)
new_df = pd.DataFrame(data=list((ma_s - ma_l) / ma_s * 100), columns=["priceosc"])
return new_df
def PSY(df, n, m):
"""心理线"""
new_df = pd.DataFrame()
new_df["psy"] = ta_func.count(df["close"] > df["close"].shift(1), n) / n * 100
new_df["psyma"] = ta_func.ma(new_df["psy"], m)
return new_df
def QHLSR(df):
"""阻力指标"""
new_df = pd.DataFrame()
qhl = (df["close"] - df["close"].shift(1)) - (df["volume"] - df["volume"].shift(1)) * (
df["high"].shift(1) - df["low"].shift(1)) / df["volume"].shift(1)
a = pd.Series(np.where(qhl > 0, qhl, 0)).rolling(5).sum()
e = pd.Series( | np.where(qhl > 0, qhl, 0) | numpy.where |
"""Mactices functions.
Functions
---------
- calc_chi_sq
- transform_string_to_r_b
- transform_string_to_digits
- transform_fraction_with_label_to_string
- transform_digits_to_string
- transform_r_b_to_string
- calc_mRmCmRT
- calc_rotation_matrix_ij_by_euler_angles
- calc_euler_angles_by_rotation_matrix_ij
- calc_determinant_matrix_ij
- calc_inverse_matrix_ij
- calc_rotation_matrix_ij_around_axis
- calc_product_matrices
- calc_product_matrix_vector
- calc_vector_angle
- calc_vector_product
- scalar_product
- calc_rotation_matrix_by_two_vectors
- tri_linear_interpolation
- ortogonalize_matrix
- calc_moment_2d_by_susceptibility
- calc_phase_3d
"""
import numpy
from fractions import Fraction
from typing import Tuple
def calc_chi_sq(y_exp, y_error, y_model, der_y=None):
"""Calc chi_sq."""
y_diff = (y_exp - y_model)/y_error
chi_sq = (numpy.square(numpy.abs(y_diff))).sum()
if der_y is not None:
der_chi_sq = (-2.*(y_diff/y_error)[:, numpy.newaxis]*der_y).sum(axis=0)
else:
der_chi_sq = None
return chi_sq, der_chi_sq
def transform_string_to_r_b(name: str, labels=("x", "y", "z")) -> Tuple:
"""Transform string to rotation part and offset.
Example
-------
x,y,-z -> 0.0 1 0 0 0.0 0 1 0 0.0 0 0 -1
"""
l_name = "".join(name.strip().split()).lstrip("(").rstrip(")").split(",")
rij, bi = [], []
for _name in l_name:
coefficients, offset = transform_string_to_digits(_name, labels)
rij.append(coefficients)
bi.append(offset)
res_rij = numpy.array(rij, dtype=object)
res_bi = numpy.array(bi, dtype=object)
return res_rij, res_bi
def transform_string_to_digits(name: str, labels: Tuple[str]):
"""Transform string to digits.
Multiplication has to be implicit, division must be explicit.
White space within the string is optional.
>>> transform_string_to_digits('-a+2x/7-0.7t+4', ('a', 'x', 't'))
"""
coefficients = []
offset = Fraction(0, 1).limit_denominator(10)
l_name = name.strip().replace(" ", "").replace("+", " +").replace(
"-", " -").split()
for _label in labels:
res = Fraction(0, 1).limit_denominator(10)
for _name in l_name:
if _label in _name:
s_1 = _name.replace(_label, "").replace("+/", "+1/").replace(
"-/", "-1/")
if s_1 == "":
s_1 = "1"
if s_1.startswith("/"):
s_1 = "1" + s_1
if s_1.endswith("+"):
s_1 = s_1 + "1"
if s_1.endswith("-"):
s_1 = s_1 + "1"
res += Fraction(s_1).limit_denominator(10)
coefficients.append(res)
res = Fraction(0, 1).limit_denominator(10)
for _name in l_name:
flag = all([not (_label in _name) for _label in labels])
if flag:
res += Fraction(_name).limit_denominator(10)
offset = res
return coefficients, offset
def transform_fraction_with_label_to_string(number: Fraction, label: str) \
-> str:
"""Transform fraction with label to string.
Parameters
----------
number : Fraction
DESCRIPTION.
label : str
DESCRIPTION.
Returns
-------
str
DESCRIPTION.
"""
if isinstance(number, Fraction):
val = number
else:
val = Fraction(number).limit_denominator(10)
if val == Fraction(0, 1):
res = ""
elif val == Fraction(1, 1):
res = f"{label:}"
elif val == Fraction(-1, 1):
res = f"-{label:}"
elif val.denominator == 1:
res = f"{val.numerator}{label:}"
elif val.numerator == 1:
res = f"{label:}/{val.denominator}"
else:
res = f"{val.numerator}{label:}/{val.denominator}"
return res
def transform_digits_to_string(labels: Tuple[str], coefficients,
offset: Fraction) -> str:
"""Form a string from digits.
Arguments
---------
labels: the tuple of lablels (ex.: ('x', 'y', 'z') or ('a', 'b', 'c')))
coefficients: the parameters in front of label (ex.: (1.0, 0.5, 0.0))
offset: the number (ex.: 2/3)
Output
------
string
Example
-------
>>> transform_digits_to_string(('x', 'y', 'z'), (1.0, 0.5, 0.0), 0.6666667)
x+1/2y+2/3
"""
l_res = []
for _coefficient, _label in zip(coefficients, labels):
_name = transform_fraction_with_label_to_string(_coefficient, _label)
if _name == "":
pass
elif _name.startswith("-"):
l_res.append(_name)
elif l_res == []:
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
_name = str(Fraction(offset).limit_denominator(10))
if _name == "0":
if l_res == []:
l_res.append(_name)
elif ((l_res == []) | (_name.startswith("-"))):
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
return "".join(l_res)
def transform_r_b_to_string(r, b, labels=("x", "y", "z")) -> str:
"""Transform r (matrix) b (vector) to string.
Parameters
----------
r : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
labels : TYPE, optional
DESCRIPTION. The default is ("x", "y", "z").
Returns
-------
str
DESCRIPTION.
"""
l_res = [transform_digits_to_string(labels, _ri, _bi) for _ri, _bi in
zip(r, b)]
return ",".join(l_res)
def calc_mRmCmRT(r_ij, c_ij):
"""Calculate matrix multiplication R*C*RT.
Matrices are expressed through its component and can be expressed as
nD-array.
r_ij: r11, r12, r13, r21, r22, r23, r31, r32, r33
c_ij: c11, c12, c13, c21, c22, c23, c31, c32, c33
"""
r11, r12, r13, r21, r22, r23, r31, r32, r33 = r_ij
rt_ij = r11, r21, r31, r12, r22, r32, r13, r23, r33
rcrt_ij = calc_product_matrices(r_ij, c_ij, rt_ij)
rcrt_11, rcrt_12, rcrt_13, rcrt_21, rcrt_22, rcrt_23, rcrt_31, rcrt_32, \
rcrt_33 = rcrt_ij
return rcrt_11, rcrt_12, rcrt_13, rcrt_21, rcrt_22, rcrt_23, rcrt_31, \
rcrt_32, rcrt_33
def calc_rotation_matrix_ij_by_euler_angles(alpha, beta, gamma):
"""Calculate rotational matrix from Euler angle.
Tait-Bryan convection for angles is used.
psi - x-axis
theta - y axis
phi - z axis
0 <= psi <= 2 pi ???
-pi/2 <= theta <= pi/2
0 <= phi <= 2 pi
"""
psi, theta, phi = alpha, beta, gamma
rm_11 = numpy.cos(theta)*numpy.cos(phi)
rm_21 = numpy.cos(theta)*numpy.sin(phi)
rm_31 = -numpy.sin(theta)
rm_12 = numpy.sin(psi)*numpy.sin(theta)*numpy.cos(phi) - \
numpy.cos(psi)*numpy.sin(phi)
rm_22 = numpy.sin(psi)*numpy.sin(theta)*numpy.sin(phi) + \
numpy.cos(psi)*numpy.cos(phi)
rm_32 = numpy.sin(psi)*numpy.cos(theta)
rm_13 = numpy.cos(psi)*numpy.sin(theta)*numpy.cos(phi) + \
numpy.sin(psi)*numpy.sin(phi)
rm_23 = numpy.cos(psi)*numpy.sin(theta)*numpy.sin(phi) - \
numpy.sin(psi)*numpy.cos(phi)
rm_33 = numpy.cos(psi)*numpy.cos(theta)
return rm_11, rm_12, rm_13, rm_21, rm_22, rm_23, rm_31, rm_32, rm_33
def calc_euler_angles_by_rotation_matrix_ij(rm_ij):
"""Calculate Euler Angles from rotational matrix.
Tait-Bryan convection for angles used.
psi - x-axis
theta - y axis
phi - z axis.
"""
rm_11, rm_12, rm_13, rm_21, rm_22, rm_23, rm_31, rm_32, rm_33 = rm_ij
if (abs(rm_31) != 1.):
theta1 = -1*numpy.arcsin(rm_31)
theta2 = numpy.pi-theta1
psi1 = numpy.arctan2((rm_32)/numpy.cos(theta1),
(rm_33)/numpy.cos(theta1))
psi2 = numpy.arctan2((rm_32)/numpy.cos(theta2),
(rm_33)/numpy.cos(theta2))
phi1 = numpy.arctan2((rm_21)/numpy.cos(theta1),
(rm_11)/numpy.cos(theta1))
phi2 = numpy.arctan2((rm_21)/numpy.cos(theta2),
(rm_11)/numpy.cos(theta2))
res = [[psi1, theta1, phi1], [psi2, theta2, phi2]]
else:
phi = 0
if (rm_31 == -1):
theta = numpy.pi/2
psi = phi+numpy.arctan2(rm_12, rm_13)
res = [psi, theta, phi]
else:
theta = -1*numpy.pi/2
psi = -1*phi+numpy.arctan2(-1*rm_12, -1*rm_13)
res = [psi, theta, phi]
return res
def calc_determinant_matrix_ij(m_ij):
"""Calculate determinant of the matrix / matrices.
Parameters
----------
m_ij : TYPE
m_ij = m_11, m_12, m_13, m_21, m_22, m_23, m_31, m_32, m_33
Returns
-------
det : TYPE
DESCRIPTION.
"""
m_11, m_12, m_13, m_21, m_22, m_23, m_31, m_32, m_33 = m_ij
det = m_11*m_22*m_33 - m_11*m_23*m_32 \
- m_12*m_21*m_33 + m_12*m_23*m_31 \
+ m_13*m_21*m_32 - m_13*m_22*m_31
return det
def calc_inverse_matrix_ij(m_ij):
"""Calculate inverse matrix / matrices.
Parameters
----------
m_ij : TYPE
m_ij = m_11, m_12, m_13, m_21, m_22, m_23, m_31, m_32, m_33
Returns
-------
m_i_11 : TYPE
DESCRIPTION.
m_i_12 : TYPE
DESCRIPTION.
m_i_13 : TYPE
DESCRIPTION.
m_i_21 : TYPE
DESCRIPTION.
m_i_22 : TYPE
DESCRIPTION.
m_i_23 : TYPE
DESCRIPTION.
m_i_31 : TYPE
DESCRIPTION.
m_i_32 : TYPE
DESCRIPTION.
m_i_33 : TYPE
DESCRIPTION.
"""
eps = 1.0e-12
det = calc_determinant_matrix_ij(m_ij)
m_11, m_12, m_13, m_21, m_22, m_23, m_31, m_32, m_33 = m_ij
inv_det = numpy.where(det < eps, 0., 1./det)
m_i_11 = +(m_22*m_33-m_23*m_32) * inv_det
m_i_21 = -(m_21*m_33-m_23*m_31) * inv_det
m_i_31 = +(m_21*m_32-m_22*m_31) * inv_det
m_i_12 = -(m_12*m_33-m_13*m_32) * inv_det
m_i_22 = +(m_11*m_33-m_13*m_31) * inv_det
m_i_32 = -(m_11*m_32-m_12*m_31) * inv_det
m_i_13 = +(m_12*m_23-m_13*m_22) * inv_det
m_i_23 = -(m_11*m_23-m_13*m_21) * inv_det
m_i_33 = +(m_11*m_22-m_12*m_21) * inv_det
return m_i_11, m_i_12, m_i_13, m_i_21, m_i_22, m_i_23, m_i_31, m_i_32, \
m_i_33
def calc_rotation_matrix_ij_around_axis(angle, axis="x"):
"""Calculate rotation matrix around given axis.
Parameters
----------
angle : TYPE
DESCRIPTION.
axis : TYPE, optional
DESCRIPTION. The default is "x".
Returns
-------
r_m_11 : TYPE
DESCRIPTION.
r_m_12 : TYPE
DESCRIPTION.
r_m_13 : TYPE
DESCRIPTION.
r_m_21 : TYPE
DESCRIPTION.
r_m_22 : TYPE
DESCRIPTION.
r_m_23 : TYPE
DESCRIPTION.
r_m_31 : TYPE
DESCRIPTION.
r_m_32 : TYPE
DESCRIPTION.
r_m_33 : TYPE
DESCRIPTION.
"""
np_zero = 0.*angle
np_one = 1. + np_zero
if axis.lower() == "y":
r_m_11 = numpy.cos(angle)
r_m_12 = np_zero
r_m_13 = numpy.sin(angle)
r_m_21 = np_zero
r_m_22 = np_one
r_m_23 = np_zero
r_m_31 = -numpy.sin(angle)
r_m_32 = np_zero
r_m_33 = numpy.cos(angle)
elif axis.lower() == "z":
r_m_11 = numpy.cos(angle)
r_m_12 = numpy.sin(angle)
r_m_13 = np_zero
r_m_21 = -numpy.sin(angle)
r_m_22 = numpy.cos(angle)
r_m_23 = np_zero
r_m_31 = np_zero
r_m_32 = np_zero
r_m_33 = np_one
else:
# by default "x"
r_m_11 = np_one
r_m_12 = np_zero
r_m_13 = np_zero
r_m_21 = np_zero
r_m_22 = numpy.cos(angle)
r_m_23 = numpy.sin(angle)
r_m_31 = np_zero
r_m_32 = -numpy.sin(angle)
r_m_33 = | numpy.cos(angle) | numpy.cos |
#!/usr/bin/env python
"""ISOCHRONE.PY - Isochrone and isochrone grid classes
"""
__authors__ = '<NAME> <<EMAIL>?'
__version__ = '20210920' # yyyymmdd
import os
import numpy as np
from glob import glob
from astropy.table import Table
from astropy.io import fits
from scipy.interpolate import interp1d
from dlnpyutils import utils as dln
import copy
from . import extinction,utils
def load():
""" Load all the default isochrone files."""
ddir = utils.datadir()
files = glob(ddir+'parsec_*fits.gz')
nfiles = len(files)
if nfiles==0:
raise Exception("No default isochrone files found in "+ddir)
iso = []
for f in files:
iso.append(Table.read(f))
if len(iso)==1: iso=iso[0]
# Change metallicity and age names for parsec
iso['AGE'] = 10**iso['LOGAGE'].copy()
iso['METAL'] = iso['MH']
# Index
grid = IsoGrid(iso)
return grid
def isointerp2(iso1,iso2,frac,photnames=None,minlabel=1,maxlabel=7,verbose=False):
""" Interpolate between two isochrones."""
# frac: fractional distance for output isochrone
# 0 is close to iso1 and 1 is close to iso2
niso1 = len(iso1)
niso2 = len(iso2)
label1 = np.unique(iso1['LABEL'])
label2 = np.unique(iso2['LABEL'])
age1 = iso1['AGE'][0]
metal1 = iso1['METAL'][0]
age2 = iso2['AGE'][0]
metal2 = iso2['METAL'][0]
isominimax1 = np.max(iso1['MINI'])
isominimax2 = np.max(iso2['MINI'])
intimfmax1 = np.max(iso1['INT_IMF'])
intimfmax2 = np.max(iso2['INT_IMF'])
# max MINI for the output isochrone
isominimax = isominimax1*(1-frac)+isominimax2*frac
# Use MINI, original mass
# get unique MINI values between the two isochrones
#mini = np.concatenate((iso1['MINI'].data,iso2['MINI']))
#mini = np.unique(mini)
# Maybe interpolate different star type "labels" separately
# 1-9
# 1: main sequence
# 2: subgiant branch (Hertzsprung gap)
# 3: red giant branch
# 4: horizontal branch
# 5: sometimes "missing"
# 6: sometimes "missing"
# 7: AGB
# 8: TAGB
# 9: WD (often only one point)
# Descriptions from CMD website
# http://stev.oapd.inaf.it/cmd_3.5/faq.html
# 0 = PMS, pre main sequence
# 1 = MS, main sequence
# 2 = SGB, subgiant branch, or Hertzsprung gap for more intermediate+massive stars
# 3 = RGB, red giant branch, or the quick stage of red giant for intermediate+massive stars
# 4 = CHEB, core He-burning for low mass stars, or the very initial stage of CHeB for intermediate+massive stars
# 5 = still CHEB, the blueward part of the Cepheid loop of intermediate+massive stars
# 6 = still CHEB, the redward part of the Cepheid loop of intermediate+massive stars
# 7 = EAGB, the early asymptotic giant branch, or a quick stage of red giant for massive stars
# 8 = TPAGB, the thermally pulsing asymptotic giant branch
# 9 = post-AGB (in preparation!)
#
# can do 1-3 together
# sometimes 5+6 are "missing"
#
# if a phase is missing in ONE of the isochrones then drop it from the output one as well
# for isochrones of the SAME age, you should be able to use "mass" as the independent variable
# to interpolate things
# the points line up REALLY well on the MINI vs. INT_IMF plot
# get unique MINI values between the two isochrones
#mini = np.concatenate((iso1['MINI'].data,iso2['MINI']))
#mini = np.unique(mini)
# MINI values for isochrones of the same age are quite similar, sometimes one will have a couple extra points
if photnames is None:
colnames = np.char.array(iso1.colnames)
photind, = np.where((colnames.find('MAG')>-1) & (colnames.find('_')>-1))
photnames = list(colnames[photind])
interpnames = ['INT_IMF','MASS']+photnames
# Initialize the output catalog
nout = int(1.5*np.max([niso1,niso2]))
out = Table()
out['AGE'] = np.zeros(nout,float)+(age1*(1-frac)+age2*frac)
out['METAL'] = metal1*(1-frac)+metal2*frac
out['MINI'] = 0.0
out['INT_IMF'] = 0.0
out['MASS'] = 0.0
out['LABEL'] = 0
for n in photnames:
out[n] = 0.0
# Label loop
count = 0
for l in | np.arange(minlabel,maxlabel+1) | numpy.arange |
"""
Copyright (c) 2016-2020 The scikit-optimize developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/grid.py
"""
import numpy as np
from entmoot.sampler.base import InitialPointGenerator
from entmoot.space.space import Space
from sklearn.utils import check_random_state
def _quadrature_combine(args):
args = [np.asarray(arg).reshape(len(arg), -1) for arg in args]
shapes = [arg.shape for arg in args]
size = np.prod(shapes, 0)[0] * np.sum(shapes, 0)[1]
if size > 10 ** 9:
raise MemoryError("Too large sets")
out = args[0]
for arg in args[1:]:
out = np.hstack([
np.tile(out, len(arg)).reshape(-1, out.shape[1]),
np.tile(arg.T, len(out)).reshape(arg.shape[1], -1).T,
])
return out
def _create_uniform_grid_exclude_border(n_dim, order):
assert order > 0
assert n_dim > 0
x_data = np.arange(1, order + 1) / (order + 1.)
x_data = _quadrature_combine([x_data] * n_dim)
return x_data
def _create_uniform_grid_include_border(n_dim, order):
assert order > 1
assert n_dim > 0
x_data = np.arange(0, order) / (order - 1.)
x_data = _quadrature_combine([x_data] * n_dim)
return x_data
def _create_uniform_grid_only_border(n_dim, order):
assert n_dim > 0
assert order > 1
x = [[0., 1.]] * (n_dim - 1)
x.append(list(np.arange(0, order) / (order - 1.)))
x_data = _quadrature_combine(x)
return x_data
class Grid(InitialPointGenerator):
"""Generate samples from a regular grid.
Parameters
----------
border : str, default='exclude'
defines how the samples are generated:
- 'include' : Includes the border into the grid layout
- 'exclude' : Excludes the border from the grid layout
- 'only' : Selects only points at the border of the dimension
use_full_layout : boolean, default=True
When True, a full factorial design is generated and
missing points are taken from the next larger full factorial
design, depending on `append_border`
When False, the next larger full factorial design is
generated and points are randomly selected from it.
append_border : str, default="only"
When use_full_layout is True, this parameter defines how the missing
points will be generated from the next larger grid layout:
- 'include' : Includes the border into the grid layout
- 'exclude' : Excludes the border from the grid layout
- 'only' : Selects only points at the border of the dimension
"""
def __init__(self, border="exclude", use_full_layout=True,
append_border="only"):
self.border = border
self.use_full_layout = use_full_layout
self.append_border = append_border
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from a regular grid.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Halton sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
grid set
"""
rng = check_random_state(random_state)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if self.border == "include":
if self.use_full_layout:
order = int(np.floor(np.sqrt(n_samples)))
else:
order = int(np.ceil(np.sqrt(n_samples)))
if order < 2:
order = 2
h = _create_uniform_grid_include_border(n_dim, order)
elif self.border == "exclude":
if self.use_full_layout:
order = int(np.floor(np.sqrt(n_samples)))
else:
order = int(np.ceil(np.sqrt(n_samples)))
if order < 1:
order = 1
h = _create_uniform_grid_exclude_border(n_dim, order)
elif self.border == "only":
if self.use_full_layout:
order = int(np.floor(n_samples / 2.))
else:
order = int(np.ceil(n_samples / 2.))
if order < 2:
order = 2
h = _create_uniform_grid_exclude_border(n_dim, order)
else:
raise ValueError("Wrong value for border")
if np.size(h, 0) > n_samples:
rng.shuffle(h)
h = h[:n_samples, :]
elif | np.size(h, 0) | numpy.size |
# -*- coding: utf-8 -*-
from io import StringIO
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy.interpolate import interp1d
def gumbel_r_(mean: float, sd: float, **_):
# parameters Gumbel W&S
alpha = 1.282 / sd
u = mean - 0.5772 / alpha
# parameters Gumbel scipy
scale = 1 / alpha
loc = u
return dict(loc=loc, scale=scale)
def lognorm_(mean: float, sd: float, **_):
cov = sd / mean
sigma_ln = np.sqrt(np.log(1 + cov ** 2))
miu_ln = np.log(mean) - 1 / 2 * sigma_ln ** 2
s = sigma_ln
loc = 0
scale = np.exp(miu_ln)
return dict(s=s, loc=loc, scale=scale)
def norm_(mean: float, sd: float, **_):
loc = mean
scale = sd
return dict(loc=loc, scale=scale)
def uniform_(ubound: float, lbound: float, **_):
if lbound > ubound:
lbound += ubound
ubound = lbound - ubound
lbound -= ubound
loc = lbound
scale = ubound - lbound
return dict(loc=loc, scale=scale)
def random_variable_generator(dict_in: dict, num_samples: int):
"""Generates samples of defined distribution. This is build upon scipy.stats library.
:param dict_in: distribution inputs, required keys are distribution dependent, should be align with inputs
required in the scipy.stats. Additional compulsory keys are:
`dist`: str, distribution type;
`ubound`: float, upper bound of the sampled values; and
`lbound`: float, lower bound of the sampled values.
:param num_samples: number of samples to be generated.
:return samples: sampled values based upon `dist` in the range [`lbound`, `ubound`] with `num_samples` number of
values.
"""
# assign distribution type
dist_0 = dict_in["dist"]
dist = dict_in["dist"]
# assign distribution boundary (for samples)
ubound = dict_in["ubound"]
lbound = dict_in["lbound"]
# sample CDF points (y-axis value)
def generate_cfd_q(dist_, dist_kw_, lbound_, ubound_):
cfd_q_ = np.linspace(
getattr(stats, dist_).cdf(x=lbound_, **dist_kw_),
getattr(stats, dist_).cdf(x=ubound_, **dist_kw_),
num_samples,
)
samples_ = getattr(stats, dist_).ppf(q=cfd_q_, **dist_kw_)
return samples_
# convert human distribution parameters to scipy distribution parameters
if dist_0 == "gumbel_r_":
dist_kw = gumbel_r_(**dict_in)
dist = "gumbel_r"
samples = generate_cfd_q(
dist_=dist, dist_kw_=dist_kw, lbound_=lbound, ubound_=ubound
)
elif dist_0 == "uniform_":
dist_kw = uniform_(**dict_in)
dist = "uniform"
samples = generate_cfd_q(
dist_=dist, dist_kw_=dist_kw, lbound_=lbound, ubound_=ubound
)
elif dist_0 == "norm_":
dist_kw = norm_(**dict_in)
dist = "norm"
samples = generate_cfd_q(
dist_=dist, dist_kw_=dist_kw, lbound_=lbound, ubound_=ubound
)
elif dist_0 == "lognorm_":
dist_kw = lognorm_(**dict_in)
dist = "lognorm"
samples = generate_cfd_q(
dist_=dist, dist_kw_=dist_kw, lbound_=lbound, ubound_=ubound
)
elif dist_0 == "lognorm_mod_":
dist_kw = lognorm_(**dict_in)
dist = "lognorm"
samples = generate_cfd_q(
dist_=dist, dist_kw_=dist_kw, lbound_=lbound, ubound_=ubound
)
samples = 1 - samples
elif dist_0 == "constant_":
# print(num_samples, lbound, ubound, np.average(lbound))
samples = np.full((num_samples,), np.average([lbound, ubound]))
else:
try:
dict_in.pop("dist")
dict_in.pop("ubound")
dict_in.pop("lbound")
samples = generate_cfd_q(
dist_=dist, dist_kw_=dict_in, lbound_=lbound, ubound_=ubound
)
except AttributeError:
raise ValueError("Unknown distribution type {}.".format(dist))
samples[samples == np.inf] = ubound
samples[samples == -np.inf] = lbound
if "permanent" in dict_in:
samples += dict_in["permanent"]
np.random.shuffle(samples)
return samples
def dict_unflatten(dict_in: dict) -> dict:
dict_out = dict()
for k in list(dict_in.keys()):
if ":" in k:
k1, k2 = k.split(":")
if k1 in dict_out:
dict_out[k1][k2] = dict_in[k]
else:
dict_out[k1] = dict(k2=dict_in[k])
return dict_out
def dict_flatten(dict_in: dict) -> dict:
"""Converts two levels dict to single level dict. Example input and output see _test_dict_flatten.
:param dict_in: Any two levels (or less) dict.
:return dict_out: Single level dict.
"""
dict_out = dict()
for k in list(dict_in.keys()):
if isinstance(dict_in[k], dict):
for kk, vv in dict_in[k].items():
dict_out[f"{k}:{kk}"] = vv
else:
dict_out[k] = dict_in[k]
return dict_out
def _test_dict_flatten():
x = dict(A=dict(a=0, b=1), B=dict(c=2, d=3))
y_expected = {"A:a": 0, "A:b": 1, "B:c": 2, "B:d": 3}
y = dict_flatten(x)
assert y == y_expected
def main(x: dict, num_samples: int) -> pd.DataFrame:
"""Generates samples based upon prescribed distribution types.
:param x: description of distribution function.
:param num_samples: number of samples to be produced.
:return df_out:
"""
dict_out = dict()
for k, v in x.items():
if isinstance(v, float) or isinstance(v, int) or isinstance(v, np.float):
dict_out[k] = np.full((num_samples,), v, dtype=float)
elif isinstance(v, str):
dict_out[k] = np.full(
(num_samples,), v, dtype=np.dtype("U{:d}".format(len(v)))
)
elif isinstance(v, np.ndarray) or isinstance(v, list):
dict_out[k] = list(np.full((num_samples, len(v)), v, dtype=float))
elif isinstance(v, dict):
if "dist" in v:
try:
dict_out[k] = random_variable_generator(v, num_samples)
except KeyError:
raise ("Missing parameters in input variable {}.".format(k))
elif "ramp" in v:
s_ = StringIO(v["ramp"])
d_ = pd.read_csv(
s_,
names=["x", "y"],
dtype=float,
skip_blank_lines=True,
skipinitialspace=True,
)
t_ = d_.iloc[:, 0]
v_ = d_.iloc[:, 1]
if all(v_ == v_[0]):
f_interp = v_[0]
else:
f_interp = interp1d(t_, v_, bounds_error=False, fill_value=0)
dict_out[k] = np.full((num_samples,), f_interp)
else:
raise ValueError("Unknown input data type for {}.".format(k))
else:
raise TypeError("Unknown input data type for {}.".format(k))
dict_out["index"] = np.arange(0, num_samples, 1)
df_out = pd.DataFrame.from_dict(dict_out, orient="columns")
return df_out
def _test_random_variable_generator():
x = dict(v=np.pi)
y = main(x, 1000)
assert len(y["v"].values) == 1000
assert all([v == np.pi for v in y["v"].values])
x = dict(v="hello world.")
y = main(x, 1000)
assert len(y["v"].values) == 1000
assert all([v == "hello world." for v in y["v"].values])
x = dict(v=[0.0, 1.0, 2.0])
y = main(x, 1000)
assert len(y["v"].values) == 1000
assert all([all(v == np.array([0.0, 1.0, 2.0])) for v in y["v"].values])
x = dict(v=dict(dist="uniform_", ubound=10, lbound=-1))
y = main(x, 1000)
assert len(y["v"].values) == 1000
assert | np.max(y["v"].values) | numpy.max |
import copy
import logging
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import Callback
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("DNN")
class Dnn:
def __init__(self, input_size, class_num):
self.class_num = class_num
self.input_size = input_size
self.model = Sequential()
self.model.add(BatchNormalization())
self.model.add(
Dense(
input_size * 2, input_dim=input_size,
activation='relu',
kernel_initializer='uniform'
)
)
self.model.add(BatchNormalization())
self.model.add(
Dense(
class_num, input_dim=input_size * 2,
activation='softmax',
kernel_initializer='uniform'
)
)
self.model.compile(
optimizer=SGD(lr=0.005),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
def train(
self, samples, labels, test_samples, test_labels,
epoch=1, batch_size=1, cm=False
):
matrices = []
history = TrainHistory(self.model, epoch)
self.model.fit(
samples, labels, batch_size=batch_size, epochs=epoch,
verbose=0, callbacks=[history]
)
if cm:
cm_model = copy.copy(self.model)
cm_labels = np.argmax(test_labels, axis=1)
for weight in history.weights_list:
confusion_matrix = np.zeros([self.class_num, self.class_num]).astype("int32")
cm_model.set_weights(weight)
predicts = np.argmax(
cm_model.predict(test_samples), axis=1
).astype("int32")
for p in range(predicts.shape[0]):
confusion_matrix[predicts[p], cm_labels[p]] += 1
matrices.append(confusion_matrix)
return history.loss, history.acc, matrices
def evaluate(self, xt, yt, batch_size=1, verbose=0):
history = EvaluationHistory()
self.model.evaluate(xt, yt, verbose=0, batch_size=batch_size, callbacks=[history])
if verbose == 1:
logger.info(f"\rTest Accuracy={np.mean(np.array(history.acc))}")
logger.info(f"\rTest Loss={np.mean( | np.array(history.loss) | numpy.array |
"""
dataset.py
This module implements the class Dataset.
The Dataset class holds the data on which the experiments are performed.
It distinguishes between pool, test and training data.
The actual data is only stored *once* and access is granted via reference.
Additional copies with unique access indices can be obtained via .make_copy().
"""
import numpy as np
from sklearn.utils.random import sample_without_replacement
class Dataset:
# X_pool, y_pool, X_test, y_test are refences to the real data storage
# due to python's mutable variable paradigm
# different instances of Dataset hold differnt sets of train/pool_idxs
def __init__(self, X_pool, y_pool, X_test, y_test, name=None):
self.train_idxs = np.empty(0)
self.pool_idxs = np.arange(len(X_pool))
self._X_pool = X_pool
self._y_pool = y_pool
self._X_test = X_test
self._y_test = y_test
self.name = name
def apply_scaler(self, scaler):
self.scaler = scaler
self.scaler.fit(self._X_pool)
self._X_pool = self.scaler.transform(self._X_pool)
self._X_test = self.scaler.transform(self._X_test)
def apply_scaler_y(self, scaler):
self.scaler_y = scaler
self.scaler_y.fit(self._y_pool)
self._y_pool = self.scaler_y.transform(self._y_pool)
self._y_test = self.scaler_y.transform(self._y_test)
def reset_pool(self):
self.train_idxs = np.empty(0)
self.pool_idxs = np.arange(len(self._X_pool))
# different copies hold reference ot the same data, but different indices
def make_copy(self, approximate_pool=False):
ds = Dataset(self._X_pool, self._y_pool, self._X_test, self._y_test, self.name)
# if training data is not empty -> copy the idxs
if len(self.train_idxs) > 2:
ds.train_idxs = np.array(self.train_idxs)
ds.pool_idxs = np.array(self.pool_idxs)
return ds
# subsample data to requested size
def reduce_size(self, size_pool, size_test):
assert size_pool <= self._X_pool.shape[0]
assert size_test <= self._X_test.shape[0]
pool_sample = sample_without_replacement(
self._X_pool.shape[0], n_samples=size_pool
)
test_sample = sample_without_replacement(
self._X_test.shape[0], n_samples=size_test
)
self._X_pool = self._X_pool[pool_sample]
self._y_pool = self._y_pool[pool_sample]
self._X_test = self._X_test[test_sample]
self._y_test = self._y_test[test_sample]
self.train_idxs = np.empty(0)
self.pool_idxs = np.arange(len(self._X_pool))
def add_to_training(self, idxs, return_data=False):
if not self.train_idxs.size > 0:
self.train_idxs = np.array(idxs)
else:
assert np.max(idxs) < len(self.pool_idxs)
self.train_idxs = np.append(self.train_idxs, self.pool_idxs[idxs])
if return_data:
added_data = self._X_pool[self.pool_idxs[idxs]]
self.pool_idxs = | np.delete(self.pool_idxs, idxs) | numpy.delete |
import librosa
import numpy as np
import os
import tensorflow as tf
from keras.models import load_model
from model.QueryByVoiceModel import QueryByVoiceModel
class SiameseStyle(QueryByVoiceModel):
'''
A siamese-style neural network for query-by-voice applications.
citation: <NAME>, <NAME>, and <NAME>, "Siamese Style Convolutional
Neural Networks for Sound Search by Vocal Imitation," in IEEE/ACM
Transactions on Audio, Speech, and Language Processing, pp. 99-112,
2018.
'''
def __init__(
self,
model_filepath,
parametric_representation=False,
uses_windowing=True,
window_length=4.0,
hop_length=2.0):
'''
SiameseStyle model constructor.
Arguments:
model_filepath: A string. The path to the model weight file on
disk.
parametric_representation: A boolen. True if the audio
representations depend on the model weights.
uses_windowing: A boolean. Indicates whether the model slices the
representation
window_length: A float. The window length in seconds. Unused if
uses_windowing is False.
hop_length: A float. The hop length between windows in seconds.
Unused if uses_windowing is False.
'''
super().__init__(
model_filepath,
parametric_representation,
uses_windowing,
window_length,
hop_length)
def construct_representation(self, audio_list, sampling_rates, is_query):
'''
Constructs the audio representation used during inference. Audio
files from the dataset are constructed only once and cached for
later reuse.
Arguments:
audio_list: A python list of 1D numpy arrays. Each array represents
one variable-length mono audio file.
sampling_rate: A python list of ints. The corresponding sampling
rate of each element of audio_list.
is_query: A boolean. True only if audio is a user query.
Returns:
A python list of audio representations. The list order should be
the same as in audio_list.
'''
# Siamese-style network requires different representation of query
# and dataset audio
if is_query:
representation = self._construct_representation_query(
audio_list[0], sampling_rates[0])
else:
representation = self._construct_representation_dataset(
audio_list, sampling_rates)
return representation
def measure_similarity(self, query, items):
'''
Runs model inference on the query.
Arguments:
query: A numpy array. An audio representation as defined by
construct_representation. The user's vocal query.
items: A numpy array. The audio representations as defined by
construct_representation. The dataset of potential matches for
the user's query.
Returns:
A python list of floats. The similarity score of the query and each
element in the dataset. The list order should be the same as
in dataset.
'''
if not self.model:
raise RuntimeError('No model loaded during call to \
measure_similarity.')
# run model inference
with self.graph.as_default():
self.logger.debug('Running inference')
return np.array(self.model.predict(
[query, items], batch_size=len(query), verbose=1),
dtype='float64')
def _load_model(self):
'''
Loads the model weights from disk. Prepares the model to be able to
make predictions.
'''
self.logger.info(
'Loading model weights from {}'.format(self.model_filepath))
self.model = load_model(self.model_filepath)
self.graph = tf.get_default_graph()
def _construct_representation_query(self, query, sampling_rate):
self.logger.debug('Constructing query representation')
# resample query at 16k
new_sampling_rate = 16000
query = librosa.resample(query, sampling_rate, new_sampling_rate)
sampling_rate = new_sampling_rate
if self.uses_windowing:
windows = self._window(query, sampling_rate)
else:
windows = [
librosa.util.fix_length(
query, self.window_length * sampling_rate)]
# construct the logmelspectrogram of the signal
representation = []
for window in windows:
melspec = librosa.feature.melspectrogram(
window, sr=sampling_rate, n_fft=133,
hop_length=133, power=2, n_mels=39,
fmin=0.0, fmax=5000)
melspec = melspec[:, :482]
logmelspec = librosa.power_to_db(melspec, ref=np.max)
representation.append(logmelspec)
# normalize to zero mean and unit variance
representation = np.array(representation)
representation = self._normalize(representation).astype('float32')
return [representation]
def _construct_representation_dataset(self, dataset, sampling_rates):
new_sampling_rate = 44100
representations = []
for audio, sampling_rate in zip(dataset, sampling_rates):
# resample audio at 44.1k
audio = librosa.resample(audio, sampling_rate, new_sampling_rate)
sampling_rate = new_sampling_rate
if self.uses_windowing:
windows = self._window(audio, sampling_rate)
else:
windows = [
librosa.util.fix_length(
audio, self.window_length * sampling_rate)]
representation = []
for window in windows:
# construct the logmelspectrogram of the signal
melspec = librosa.feature.melspectrogram(
window,
sr=sampling_rate,
n_fft=1024,
hop_length=1024,
power=2)
melspec = melspec[:, 0:128]
logmelspec = librosa.power_to_db(melspec, ref=np.max)
representation.append(logmelspec)
# normalize to zero mean and unit variance
representation = np.array(representation)
representation = self._normalize(representation).astype('float32')
representation = | np.expand_dims(representation, axis=1) | numpy.expand_dims |
import glob
import os
import librosa
import numpy as np
import shutil
import pretty_midi
startpath='Data'
destpath = 'TempData'
subfolder='MUS'
RangeMIDInotes=[21,108]
n_fft = 512
sr=44100.
bins_per_octave=36
n_octave=7
val_rate=1./7
pretty_midi.pretty_midi.MAX_TICK = 1e10
n_bins= n_octave * bins_per_octave
hop_length = 256
win_width = 32
kernel_size=7
overlap=True
def midi2mat(midi_path_train, length, spect_len, sr, RangeMIDInotes=RangeMIDInotes):
midi_data = pretty_midi.PrettyMIDI(midi_path_train)
pianoRoll = midi_data.instruments[0].get_piano_roll(fs=spect_len * sr/length)
Ground_truth_mat = (pianoRoll[RangeMIDInotes[0]:RangeMIDInotes[1] + 1, :spect_len] > 0)
return Ground_truth_mat
files = [f for f in os.listdir('Data')]
#fpath is path of different MUS folders
for f in files:
j=0
k=0
print(f)
fpath=os.path.join(startpath,f)
print(fpath)
subfiles = [f1 for f1 in os.listdir(fpath)]
for f1 in subfiles:
mainfile=os.path.join(fpath,f1)
file_name,file_extensions=os.path.splitext(f1)
if file_extensions == ".txt":
continue
if file_extensions == ".mid":
mainfile=os.path.join(fpath,file_name+'.wav')
# if file_extensions == ".mid":
# continue
# mainfile=os.path.join(fpath,f1)
# if file_extensions == ".wav":
# mainfile = f1
x ,sr = librosa.load(mainfile,sr=sr)
# -----------------------------------------------------------------------------------------------------------
fft = np.fft.fft(x)
spectrum = np.abs(fft)
# perform stft
stft = librosa.stft(x, n_fft=n_fft, hop_length=hop_length)[:-1]
# calculate abs values on complex numbers to get magnitude
spect = np.transpose(np.abs(stft))
log_spectrogram = librosa.amplitude_to_db(np.transpose(spect))
# ------------------------------------------------------------------------------------------------------------
midi_file = os.path.join(fpath,f1)
if file_extensions==".wav":
midi_file = os.path.join(fpath,file_name+'.mid')
Ground_truth_mat=midi2mat(midi_file, len(x), spect.shape[0], sr, RangeMIDInotes=RangeMIDInotes)
midi_train = np.transpose(Ground_truth_mat)
#midi length<stft length, cut stft
if midi_train.shape[0]<spect.shape[0]:
spect=spect[:midi_train.shape[0],:]
if file_extensions == ".wav" :
ofolder = 'wav'
subname = 'STFT'
elif file_extensions == ".mid" :
ofolder = 'mid'
subname = 'label'
opath = os.path.join(destpath,f,file_name)+subname+'.npy'
if file_extensions == ".wav":
np.save(opath,spect)
elif file_extensions == ".mid":
np.save(opath,midi_train)
print('Preprocessed',f1)
matrix = np.array( | np.load(opath) | numpy.load |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import ismo.iterative_surrogate_model_optimization
import ismo.train.trainer_factory
import ismo.train.multivariate_trainer
import ismo.samples.sample_generator_factory
import ismo.optimizers
import matplotlib.pyplot as plt
import collections
class LossWriter:
def __init__(self, basename):
self.basename = basename
self.iteration = 0
def __call__(self, loss):
np.save(f'{self.basename}_iteration_{self.iteration}.npy', loss.history['loss'])
self.iteration += 1
def convergence_study(
*,
generator_name,
training_parameter_filename,
optimizer_name,
retries,
save_result,
prefix,
with_competitor,
dimension,
number_of_variables,
number_of_samples_per_iteration,
simulator_creator,
objective,
variable_names,
save_plot=lambda name: plt.savefig(f'{name}.png')
):
all_values_min = collections.defaultdict(list)
samples_as_str = "_".join(map(str, number_of_samples_per_iteration))
for try_number in range(retries):
print(f"try_number: {try_number}")
generator = ismo.samples.create_sample_generator(generator_name)
optimizer = ismo.optimizers.create_optimizer(optimizer_name)
trainers = [ismo.train.create_trainer_from_simple_file(training_parameter_filename) for _ in
range(number_of_variables)]
for var_index, trainer in enumerate(trainers):
trainer.add_loss_history_writer(LossWriter(f'{prefix}loss_var_{var_index}_try_{try_number}'))
trainer = ismo.train.MultiVariateTrainer(trainers)
starting_sample = try_number * sum(number_of_samples_per_iteration)
parameters, values = ismo.iterative_surrogate_model_optimization(
number_of_samples_per_iteration=number_of_samples_per_iteration,
sample_generator=generator,
trainer=trainer,
optimizer=optimizer,
simulator=simulator_creator(starting_sample),
objective_function=objective,
dimension=dimension,
starting_sample=starting_sample
)
values = np.array(values)
objective_values = [objective(values[i, :]) for i in range(values.shape[0])]
per_iteration = collections.defaultdict(list)
total_number_of_samples = 0
for number_of_samples in number_of_samples_per_iteration:
total_number_of_samples += number_of_samples
arg_min = np.argmin(objective_values[:total_number_of_samples])
for n, name in enumerate(variable_names):
per_iteration[name].append(values[arg_min, n])
per_iteration['objective'].append(objective_values[arg_min])
for func_name, func_values in per_iteration.items():
all_values_min[func_name].append(per_iteration[func_name])
if save_result:
np.savetxt(f'{prefix}parameters_{try_number}_samples_{samples_as_str}.txt', parameters)
| np.savetxt(f'{prefix}values_{try_number}_samples_{samples_as_str}.txt', values) | numpy.savetxt |
import pytest
import tensorflow as tf
import numpy as np
from scipy.ndimage.measurements import mean as label_mean
from skimage.segmentation import relabel_sequential as sk_relabel_sequential
from rdcnet.losses.embedding_loss import InstanceEmbeddingLossBase, SpatialInstanceEmbeddingLossBase, InstanceMeanIoUEmbeddingLoss, MarginInstanceEmbeddingLoss, relabel_sequential
class DummySpatialInstanceEmbeddingLoss(SpatialInstanceEmbeddingLossBase):
def _center_dist_to_probs(self, one_hot, center_dist):
pass
def test__unbatched_soft_jaccard():
'''Verifies that the soft Jaccard loss behaves as keras MeanIoU when
probabilities are either 0 or 1 and that background masking works
'''
_unbatched_soft_jaccard = DummySpatialInstanceEmbeddingLoss(
)._unbatched_soft_jaccard
# check with/without background on simple example
yt = np.array([0, 0, 1, 1, 2, 2])[..., None]
yp = np.array([0, 1, 0, 1, 2, 2])[..., None]
one_hot = tf.cast(tf.one_hot(tf.squeeze(yt, -1), 3), tf.float32)
probs = tf.cast(tf.one_hot(tf.squeeze(yp, -1), 3), tf.float32)
loss = _unbatched_soft_jaccard(one_hot[..., 1:], probs[...,
1:]).numpy().mean()
np.testing.assert_almost_equal(loss, (1 - 1 / 2) / 2, decimal=3)
def test__unbatched_label_to_hot():
_unbatched_label_to_hot = DummySpatialInstanceEmbeddingLoss(
)._unbatched_label_to_hot
np.random.seed(25)
labels = np.random.choice(range(5), size=(10, 10, 1)).astype(np.int32)
hot_labels = _unbatched_label_to_hot(labels)
# #channels == #unique labels - bg
assert hot_labels.shape == (10, 10, 4)
for idx, l in enumerate([1, 2, 3, 4]):
hot_slice = hot_labels[..., idx].numpy().astype(bool)
l_mask = labels.squeeze() == l
np.testing.assert_array_equal(hot_slice, l_mask)
def test_relabel_sequential():
np.random.seed(25)
labels = np.random.choice([-1, 0, 2, 3, 4, 5],
size=(10, 10, 1)).astype(np.int32)
# already sequential labels
sk_sequential_labels = sk_relabel_sequential(labels + 1)[0] - 1
tf_sequential_labels = relabel_sequential(labels)
assert set(np.unique(sk_sequential_labels)) == set(
np.unique(tf_sequential_labels))
# non sequential labels
labels[labels == 2] = 0
labels[labels == 4] = -1
sk_sequential_labels = sk_relabel_sequential(labels + 1)[0] - 1
tf_sequential_labels = relabel_sequential(labels)
assert set(np.unique(sk_sequential_labels)) == set(
np.unique(tf_sequential_labels))
def test__unbatched_embedding_center():
_unbatched_label_to_hot = DummySpatialInstanceEmbeddingLoss(
)._unbatched_label_to_hot
_unbatched_embedding_center = DummySpatialInstanceEmbeddingLoss(
)._unbatched_embedding_center
np.random.seed(25)
labels = np.random.choice(range(5), size=(10, 10, 1)).astype(np.int32)
hot_labels = _unbatched_label_to_hot(labels)
yp = np.random.rand(10, 10, 3).astype(np.float32)
centers = _unbatched_embedding_center(hot_labels, yp)
assert centers.shape == (1, 1, 4, 3)
expected_centers = np.stack([
label_mean(p, labels.squeeze(), [1, 2, 3, 4])
for p in | np.moveaxis(yp, -1, 0) | numpy.moveaxis |
import numpy as np
import pytest
from autolens.data.array import mask
from autolens.data.array import interpolation
from autolens.model.galaxy import galaxy
from autolens.model.profiles import mass_profiles
@pytest.fixture(name='scheme')
def make_scheme():
return interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]), image_pixel_scale=1.0)
@pytest.fixture(name='geometry')
def make_geometry():
return interpolation.InterpolationGeometry(y_min=-1.0, y_max=1.0, x_min=-1.0, x_max=1.0,
y_pixel_scale=1.0, x_pixel_scale=1.0)
@pytest.fixture(name='galaxy_no_profiles', scope='function')
def make_galaxy_no_profiles():
return galaxy.Galaxy()
@pytest.fixture(name="galaxy_mass_sis")
def make_galaxy_mass_sis():
sis = mass_profiles.SphericalIsothermal(einstein_radius=1.0)
return galaxy.Galaxy(mass_profile=sis)
class TestInterpolationScheme(object):
class TestConstructor:
def test__sets_up_attributes_correctly(self):
image_coords = np.array([[-1.0, -6.0], [-1.0, 0.0], [-4.0, 2.0],
[-0.0, -1.0], [0.0, 0.0], [0.0, 1.0],
[3.0, -1.0], [1.0, 0.0], [1.0, 1.0]])
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=image_coords, image_pixel_scale=1.0)
assert interp.shape == (3, 3)
assert interp.pixels == 9
assert (interp.image_coords == image_coords).all()
assert interp.geometry.y_min == -6.0
assert interp.geometry.y_max == 2.0
assert interp.geometry.x_min == -4.0
assert interp.geometry.x_max == 3.0
assert interp.geometry.y_pixel_scale == 1.0
assert interp.geometry.x_pixel_scale == 1.0
assert interp.geometry.x_size == 7.0
assert interp.geometry.y_size == 8.0
assert interp.geometry.x_start == -4.5
assert interp.geometry.y_start == -6.5
class TestNeighbors:
def test___3x3_grid_neighbors_all_correct(self):
# |0|1|2|
# |3|4|5|
# |6|7|8|
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 3, 4])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 4, 5])).all()
assert (interp.bottom_right_neighbors[2] == np.array([-1, 5, -1])).all()
assert (interp.bottom_right_neighbors[3] == np.array([4, 6, 7])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 7, 8])).all()
assert (interp.bottom_right_neighbors[5] == np.array([-1, 8, -1])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, -1, -1])).all()
assert (interp.bottom_right_neighbors[7] == np.array([8, -1, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 3])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 3, 4])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 4, 5])).all()
assert (interp.bottom_left_neighbors[3] == np.array([-1, -1, 6])).all()
assert (interp.bottom_left_neighbors[4] == np.array([3, 6, 7])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 7, 8])).all()
assert (interp.bottom_left_neighbors[6] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, -1, -1])).all()
assert (interp.bottom_left_neighbors[8] == np.array([7, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[3] == np.array([0, 1, 4])).all()
assert (interp.top_right_neighbors[4] == np.array([1, 2, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([2, -1, -1])).all()
assert (interp.top_right_neighbors[6] == np.array([3, 4, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([4, 5, 8])).all()
assert (interp.top_right_neighbors[8] == np.array([5, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[4] == np.array([0, 1, 3])).all()
assert (interp.top_left_neighbors[5] == np.array([1, 2, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([-1, 3, -1])).all()
assert (interp.top_left_neighbors[7] == np.array([3, 4, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([4, 5, 7])).all()
def test___3x4_grid_neighbors_all_correct(self):
# |0|1| 2| 3|
# |4|5| 6| 7|
# |8|9|10|11|
interp = interpolation.InterpolationScheme(shape=(3, 4), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 4, 5])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 5, 6])).all()
assert (interp.bottom_right_neighbors[2] == np.array([3, 6, 7])).all()
assert (interp.bottom_right_neighbors[3] == np.array([-1, 7, -1])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 8, 9])).all()
assert (interp.bottom_right_neighbors[5] == np.array([6, 9, 10])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 10, 11])).all()
assert (interp.bottom_right_neighbors[7] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([9, -1, -1])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, -1, -1])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, -1, -1])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 4])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 4, 5])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 5, 6])).all()
assert (interp.bottom_left_neighbors[3] == np.array([2, 6, 7])).all()
assert (interp.bottom_left_neighbors[4] == np.array([-1, -1, 8])).all()
assert (interp.bottom_left_neighbors[5] == | np.array([4, 8, 9]) | numpy.array |
from keras import Input
from keras.models import Model
from keras.layers import Conv2D, MaxPool2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
import numpy as np
import imageio
import glob
import os
import argparse
import dataloader
import random
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import cv2
def get_data(X_train, y_train, num_train):
"""
This function generates training samples.
Parameters
----------
X_train : List
List of training samples features.
y_train: Numpy array
Target masks
num_train: Int
The number of training samples.
Yields
----------
sample_feature : numpy array
The features of a particular sample.
sample_label : numpy array
The label (mask) of a particular sample.
"""
while True:
for i in range(num_train):
sample_feature = np.array([X_train[i]])
sample_label = | np.array([y_train[i]]) | numpy.array |
import torch
import numpy as np
import torch.nn as nn
from sklearn import metrics
from transformers import BertModel
from load_data import traindataloader, valdataloader
BERT_PATH = '../bert-base-chinese'
device = "cuda" if torch.cuda.is_available() else 'cpu'
bert = BertModel.from_pretrained(BERT_PATH).to(device)
bert.eval()
def get_vecs():
device = "cuda" if torch.cuda.is_available() else 'cpu'
bert = BertModel.from_pretrained('/content/gdrive/My Drive/bert-base-chinese').to(device)
bert.eval()
array = np.empty((0, 768))
with torch.no_grad():
for batch in traindataloader:
input_ids, attention_mask = batch['input_ids_1'].to(device), batch['attention_mask_1'].to(device)
outputs = bert(input_ids, attention_mask=attention_mask, output_hidden_states=True)
last_hidden_state = outputs[0]# [batch_size, seq_len, 768]
out = last_hidden_state.permute(0, 2, 1)
out = nn.AvgPool1d(out.size(2))(out).squeeze(2)# [batch_size, 768]
out = out.cpu().data.numpy()
array = | np.append(array, out, axis=0) | numpy.append |
try:
raise ModuleNotFoundError
from numba import jit
using_numba = True
except ModuleNotFoundError:
print('Module numba not found. Proceeding without, probably slower.')
using_numba = False
def jit(stuff):
"""Blank placeholder decorator for numba JIT, used in the absence of numba."""
return stuff
import numpy as np
from PIL import Image
# from pprint import pprint
def pil_analysis(pilimg):
"""Take image as a PIL Image object, return its palette in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return palette_analysis(np.array(pilimg))
def img_dimensions(img):
"""Return dimensions of an image in numpy array form. Most of the times, equivalent to np.shape(img)."""
try:
width, height, channels = np.shape(img)
except ValueError:
width, height = np.shape(img)
channels = 1
return (width, height, channels)
def flat_img(img, dims=None):
"""Return the image flattened, i.e. a 2-dimensional array, where the second dimension maps only colors."""
if dims is None:
dims = img_dimensions(img)
return np.reshape(img, (dims[0]*dims[1], dims[2]))
@jit
def make_palette(flatimg):
output = np.unique(flatimg, axis=0)
"""Return all the colors in a flattened image."""
return np.unique(flatimg, axis=0)
def dict_palette(palette):
"""Take the palette as in the output of make_palette, return them in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return {tuple(col) : list(col) for col in palette}
def palette_analysis(img):
"""Take image, return its palette in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return dict_palette(make_palette(flat_img(img)))
def crude_remappers(flatimg, dictpalette):
"""Not to be used alone, responsible for internal transformation. Dict comprehension just extracted to allow JITting
of the rest with numba."""
return {tuple(dictpalette[col]): flatimg == | np.array(col) | numpy.array |
# -*- coding: utf-8 -*-
# test_imagecodecs.py
# Copyright (c) 2018-2019, <NAME>
# Copyright (c) 2018-2019, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unittests for the imagecodecs package.
:Author:
`<NAME> <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics. University of California, Irvine
:License: 3-clause BSD
:Version: 2019.12.3
"""
from __future__ import division, print_function
import sys
import os
import io
import re
import glob
import tempfile
import os.path as osp
import pytest
import numpy
from numpy.testing import assert_array_equal, assert_allclose
try:
import tifffile
except ImportError:
tifffile = None
try:
import czifile
except ImportError:
czifile = None
if (
'imagecodecs_lite' in os.getcwd() or
osp.exists(osp.join(osp.dirname(__file__), '..', 'imagecodecs_lite'))
):
try:
import imagecodecs_lite as imagecodecs
from imagecodecs_lite import _imagecodecs_lite # noqa
from imagecodecs_lite import imagecodecs as imagecodecs_py
except ImportError:
pytest.exit('the imagecodec-lite package is not installed')
lzma = zlib = bz2 = zstd = lz4 = lzf = blosc = bitshuffle = None
_jpeg12 = _jpegls = _zfp = None
else:
try:
import imagecodecs
import imagecodecs.imagecodecs as imagecodecs_py
from imagecodecs.imagecodecs import (lzma, zlib, bz2, zstd, lz4, lzf,
blosc, bitshuffle)
from imagecodecs import _imagecodecs # noqa
except ImportError:
pytest.exit('the imagecodec package is not installed')
try:
from imagecodecs import _jpeg12
except ImportError:
_jpeg12 = None
try:
from imagecodecs import _jpegls
except ImportError:
_jpegls = None
try:
from imagecodecs import _zfp
except ImportError:
_zfp = None
IS_PY2 = sys.version_info[0] == 2
IS_32BIT = sys.maxsize < 2**32
TEST_DIR = osp.dirname(__file__)
class TempFileName():
"""Temporary file name context manager."""
def __init__(self, name=None, suffix='', remove=True):
self.remove = bool(remove)
if not name:
self.name = tempfile.NamedTemporaryFile(prefix='test_',
suffix=suffix).name
else:
self.name = osp.join(tempfile.gettempdir(),
'test_%s%s' % (name, suffix))
def __enter__(self):
return self.name
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
try:
os.remove(self.name)
except Exception:
pass
def test_version():
"""Assert imagecodecs versions match docstrings."""
ver = ':Version: ' + imagecodecs.__version__
assert ver in __doc__
assert ver in imagecodecs.__doc__
assert imagecodecs.version().startswith('imagecodecs')
assert ver in imagecodecs_py.__doc__
if zlib:
assert imagecodecs.version(dict)['zlib'].startswith('1.')
@pytest.mark.skipif(not hasattr(imagecodecs, 'imread'),
reason='imread function missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
def test_imread_imwrite():
"""Test imread and imwrite functions."""
imread = imagecodecs.imread
imwrite = imagecodecs.imwrite
data = image_data('rgba', 'uint8')
# codec from file extension
with TempFileName(suffix='.npy') as filename:
imwrite(filename, data, level=99)
im, codec = imread(filename, return_codec=True)
assert codec == imagecodecs.numpy_decode
assert_array_equal(data, im)
with TempFileName() as filename:
# codec from name
imwrite(filename, data, codec='numpy')
im = imread(filename, codec='npy')
assert_array_equal(data, im)
# codec from function
imwrite(filename, data, codec=imagecodecs.numpy_encode)
im = imread(filename, codec=imagecodecs.numpy_decode)
assert_array_equal(data, im)
# codec from name list
im = imread(filename, codec=['npz'])
assert_array_equal(data, im)
# autodetect
im = imread(filename)
assert_array_equal(data, im)
# fail
with pytest.raises(ValueError):
imwrite(filename, data)
with pytest.raises(ValueError):
im = imread(filename, codec='unknown')
def test_none():
"""Test NOP codec."""
encode = imagecodecs.none_encode
decode = imagecodecs.none_decode
data = b'None'
assert encode(data) is data
assert decode(data) is data
def test_bitorder():
"""Test BitOrder codec with bytes."""
decode = imagecodecs.bitorder_decode
data = b'\x01\x00\x9a\x02'
reverse = b'\x80\x00Y@'
# return new string
assert decode(data) == reverse
assert data == b'\x01\x00\x9a\x02'
# provide output
out = bytearray(len(data))
decode(data, out=out)
assert out == reverse
assert data == b'\x01\x00\x9a\x02'
# inplace
decode(data, out=data)
assert data == reverse
# bytes range
assert BYTES == decode(readfile('bytes.bitorder.bin'))
def test_bitorder_ndarray():
"""Test BitOrder codec with ndarray."""
decode = imagecodecs.bitorder_decode
data = numpy.array([1, 666], dtype='uint16')
reverse = numpy.array([128, 16473], dtype='uint16')
# return new array
assert_array_equal(decode(data), reverse)
# inplace
decode(data, out=data)
assert_array_equal(data, numpy.array([128, 16473], dtype='uint16'))
# array view
data = numpy.array([[1, 666, 1431655765, 62],
[2, 667, 2863311530, 32],
[3, 668, 1431655765, 30]], dtype='uint32')
reverse = numpy.array([[1, 666, 1431655765, 62],
[2, 16601, 1431655765, 32],
[3, 16441, 2863311530, 30]], dtype='uint32')
assert_array_equal(decode(data[1:, 1:3]), reverse[1:, 1:3])
# array view inplace
decode(data[1:, 1:3], out=data[1:, 1:3])
assert_array_equal(data, reverse)
def test_packints_decode():
"""Test PackInts decoder."""
decode = imagecodecs.packints_decode
decoded = decode(b'', 'B', 1)
assert len(decoded) == 0
decoded = decode(b'a', 'B', 1)
assert tuple(decoded) == (0, 1, 1, 0, 0, 0, 0, 1)
decoded = decode(b'ab', 'B', 2)
assert tuple(decoded) == (1, 2, 0, 1, 1, 2, 0, 2)
decoded = decode(b'abcd', 'B', 3)
assert tuple(decoded) == (3, 0, 2, 6, 1, 1, 4, 3, 3, 1)
decoded = decode(numpy.frombuffer(b'abcd', dtype='uint8'), 'B', 3)
assert tuple(decoded) == (3, 0, 2, 6, 1, 1, 4, 3, 3, 1)
PACKBITS_DATA = [
(b'', b''),
(b'X', b'\x00X'),
(b'123', b'\x02123'),
(b'112112', b'\xff1\x002\xff1\x002'),
(b'1122', b'\xff1\xff2'),
(b'1' * 126, b'\x831'),
(b'1' * 127, b'\x821'),
(b'1' * 128, b'\x811'),
(b'1' * 127 + b'foo', b'\x821\x00f\xffo'),
(b'12345678' * 16, # literal 128
b'\x7f1234567812345678123456781234567812345678123456781234567812345678'
b'1234567812345678123456781234567812345678123456781234567812345678'),
(b'12345678' * 17,
b'~1234567812345678123456781234567812345678123456781234567812345678'
b'123456781234567812345678123456781234567812345678123456781234567\x08'
b'812345678'),
(b'1' * 128 + b'12345678' * 17,
b'\x821\xff1~2345678123456781234567812345678123456781234567812345678'
b'1234567812345678123456781234567812345678123456781234567812345678'
b'12345678\x0712345678'),
(b'\xaa\xaa\xaa\x80\x00\x2a\xaa\xaa\xaa\xaa\x80\x00'
b'\x2a\x22\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa',
b'\xfe\xaa\x02\x80\x00\x2a\xfd\xaa\x03\x80\x00\x2a\x22\xf7\xaa')]
@pytest.mark.parametrize('data', range(len(PACKBITS_DATA)))
@pytest.mark.parametrize('codec', ['encode', 'decode'])
def test_packbits(codec, data):
"""Test PackBits codec."""
encode = imagecodecs.packbits_encode
decode = imagecodecs.packbits_decode
uncompressed, compressed = PACKBITS_DATA[data]
if codec == 'decode':
assert decode(compressed) == uncompressed
elif codec == 'encode':
try:
assert encode(uncompressed) == compressed
except AssertionError:
# roundtrip
assert decode(encode(uncompressed)) == uncompressed
def test_packbits_nop():
"""Test PackBits decoding empty data."""
decode = imagecodecs.packbits_decode
assert decode(b'\x80') == b''
assert decode(b'\x80\x80') == b''
@pytest.mark.parametrize('output', [None, 'array'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
def test_packbits_array(codec, output):
"""Test PackBits codec with arrays."""
encode = imagecodecs.packbits_encode
decode = imagecodecs.packbits_decode
uncompressed, compressed = PACKBITS_DATA[-1]
shape = (2, 7, len(uncompressed))
data = numpy.empty(shape, dtype='uint8')
data[..., :] = numpy.frombuffer(uncompressed, dtype='uint8')
compressed = compressed * (shape[0] * shape[1])
if codec == 'encode':
if output == 'array':
out = numpy.empty(data.size, data.dtype)
assert_array_equal(encode(data, out=out),
numpy.frombuffer(compressed, dtype='uint8'))
else:
assert encode(data) == compressed
else:
if output == 'array':
out = numpy.empty(data.size, data.dtype)
assert_array_equal(decode(compressed, out=out), data.flat)
else:
assert decode(compressed) == data.tobytes()
@pytest.mark.parametrize('output', ['new', 'out', 'inplace'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
@pytest.mark.parametrize(
'kind', ['u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8', 'f4', 'f8', 'B',
pytest.param('b', marks=pytest.mark.skipif(
sys.version_info[0] == 2, reason='Python 2'))])
@pytest.mark.parametrize('func', ['delta', 'xor'])
def test_delta(output, kind, codec, func):
"""Test Delta codec."""
if func == 'delta':
encode = imagecodecs.delta_encode
decode = imagecodecs.delta_decode
encode_py = imagecodecs_py.delta_encode
# decode_py = imagecodecs_py.imagecodecs.delta_decode
elif func == 'xor':
encode = imagecodecs.xor_encode
decode = imagecodecs.xor_decode
encode_py = imagecodecs_py.xor_encode
# decode_py = imagecodecs_py.imagecodecs.xor_decode
bytetype = bytearray
if kind == 'b':
bytetype = bytes
kind = 'B'
axis = -2 # do not change
dtype = numpy.dtype(kind)
if kind[0] in 'iuB':
low = numpy.iinfo(dtype).min
high = numpy.iinfo(dtype).max
data = numpy.random.randint(low, high, size=33 * 31 * 3,
dtype=dtype).reshape(33, 31, 3)
else:
low, high = -1e5, 1e5
data = numpy.random.randint(low, high, size=33 * 31 * 3,
dtype='i4').reshape(33, 31, 3)
data = data.astype(dtype)
data[16, 14] = [0, 0, 0]
data[16, 15] = [low, high, low]
data[16, 16] = [high, low, high]
data[16, 17] = [low, high, low]
data[16, 18] = [high, low, high]
data[16, 19] = [0, 0, 0]
if kind == 'B':
# data = data.reshape(-1)
data = data.tobytes()
diff = encode_py(data, axis=0)
if output == 'new':
if codec == 'encode':
encoded = encode(data, out=bytetype)
assert encoded == diff
elif codec == 'decode':
decoded = decode(diff, out=bytetype)
assert decoded == data
elif output == 'out':
if codec == 'encode':
encoded = bytetype(len(data))
encode(data, out=encoded)
assert encoded == diff
elif codec == 'decode':
decoded = bytetype(len(data))
decode(diff, out=decoded)
assert decoded == data
elif output == 'inplace':
if codec == 'encode':
encoded = bytetype(data)
encode(encoded, out=encoded)
assert encoded == diff
elif codec == 'decode':
decoded = bytetype(diff)
decode(decoded, out=decoded)
assert decoded == data
else:
# if func == 'xor' and kind in ('f4', 'f8'):
# with pytest.raises(ValueError):
# encode(data, axis=axis)
# pytest.skip("XOR codec not implemented for float data")
diff = encode_py(data, axis=-2)
if output == 'new':
if codec == 'encode':
encoded = encode(data, axis=axis)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = decode(diff, axis=axis)
assert_array_equal(decoded, data)
elif output == 'out':
if codec == 'encode':
encoded = numpy.zeros_like(data)
encode(data, axis=axis, out=encoded)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = numpy.zeros_like(data)
decode(diff, axis=axis, out=decoded)
assert_array_equal(decoded, data)
elif output == 'inplace':
if codec == 'encode':
encoded = data.copy()
encode(encoded, axis=axis, out=encoded)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = diff.copy()
decode(decoded, axis=axis, out=decoded)
assert_array_equal(decoded, data)
@pytest.mark.parametrize('output', ['new', 'out'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
@pytest.mark.parametrize('endian', ['le', 'be'])
@pytest.mark.parametrize('planar', ['rgb', 'rrggbb'])
def test_floatpred(planar, endian, output, codec):
"""Test FloatPred codec."""
encode = imagecodecs.floatpred_encode
decode = imagecodecs.floatpred_decode
data = numpy.fromfile(
datafiles('rgb.bin'), dtype='<f4').reshape(33, 31, 3)
if planar == 'rgb':
axis = -2
if endian == 'le':
encoded = numpy.fromfile(
datafiles('rgb.floatpred_le.bin'), dtype='<f4')
encoded = encoded.reshape(33, 31, 3)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif endian == 'be':
data = data.astype('>f4')
encoded = numpy.fromfile(
datafiles('rgb.floatpred_be.bin'), dtype='>f4')
encoded = encoded.reshape(33, 31, 3)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif planar == 'rrggbb':
axis = -1
data = numpy.ascontiguousarray(numpy.moveaxis(data, 2, 0))
if endian == 'le':
encoded = numpy.fromfile(
datafiles('rrggbb.floatpred_le.bin'), dtype='<f4')
encoded = encoded.reshape(3, 33, 31)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif endian == 'be':
data = data.astype('>f4')
encoded = numpy.fromfile(
datafiles('rrggbb.floatpred_be.bin'), dtype='>f4')
encoded = encoded.reshape(3, 33, 31)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
def test_lzw_msb():
"""Test LZW decoder with MSB."""
# TODO: add test_lzw_lsb
decode = imagecodecs.lzw_decode
for data, decoded in [
(b'\x80\x1c\xcc\'\x91\x01\xa0\xc2m6\x99NB\x03\xc9\xbe\x0b'
b'\x07\x84\xc2\xcd\xa68|"\x14 3\xc3\xa0\xd1c\x94\x02\x02\x80',
b'say hammer yo hammer mc hammer go hammer'),
(b'\x80\x18M\xc6A\x01\xd0\xd0e\x10\x1c\x8c\xa73\xa0\x80\xc7\x02'
b'\x10\x19\xcd\xe2\x08\x14\x10\xe0l0\x9e`\x10\x10\x80',
b'and the rest can go and play'),
(b'\x80\x18\xcc&\xe19\xd0@t7\x9dLf\x889\xa0\xd2s',
b"can't touch this"),
(b'\x80@@', b'')]:
assert decode(data) == decoded
@pytest.mark.parametrize('output', ['new', 'size', 'ndarray', 'bytearray'])
def test_lzw_decode(output):
"""Test LZW decoder of input with horizontal differencing."""
decode = imagecodecs.lzw_decode
delta_decode = imagecodecs.delta_decode
data = readfile('bytes.lzw_horizontal.bin')
decoded_size = len(BYTES)
if output == 'new':
decoded = decode(data)
decoded = numpy.frombuffer(decoded, 'uint8').reshape(16, 16)
delta_decode(decoded, out=decoded, axis=-1)
assert_array_equal(BYTESIMG, decoded)
elif output == 'size':
decoded = decode(data, out=decoded_size)
decoded = numpy.frombuffer(decoded, 'uint8').reshape(16, 16)
delta_decode(decoded, out=decoded, axis=-1)
| assert_array_equal(BYTESIMG, decoded) | numpy.testing.assert_array_equal |
import environments.ControlledRangeVariance
from opebet import wealth_lb_1d, wealth_lb_2d, wealth_2d, wealth_lb_2d_individual_qps
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
def getenv(wsq, tv=None):
wsupport = [0, 0.5, 2, 100]
env = environments.ControlledRangeVariance.ControlledRangeVariance(seed=90210, wsupport=wsupport, expwsq=wsq, tv=tv)
return env, env.getpw(), env.range(), env.expectedwsq()
def compress(data):
# could be improved but it's used only for debugging.
sd = sorted(tuple(datum) for datum in data)
from itertools import groupby
return [(len(list(g)),) + tuple(map(float, k)) for k, g in groupby(sd)]
def produce_results(env, method, alpha, ndata=100, reps=10):
wmin, wmax = env.range()
ubd = np.zeros(ndata)
lbd = np.zeros(ndata)
cov = np.zeros((reps, ndata))
width = np.zeros((reps, ndata))
bounds = []
for i in range(reps):
(truevalue, data) = env.sample(ndata)
try:
cs = method(data=data, wmin=wmin, wmax=wmax, alpha=alpha)
assert np.isfinite(cs[0]).all() and np.isfinite(cs[1]).all()
assert np.all(cs[1] >= cs[0] - 1e-4)
assert cs[1][-1] <= 1 + 1e-4
assert cs[0][-1] >= -1e-4
except:
import json
with open('bad_case.json', 'w') as out:
perm_state = list(env.perm_state)
perm_state[1] = list(map(int, perm_state[1]))
out.write(json.dumps((float(truevalue), compress(data), perm_state, float(wmin), float(wmax), alpha)))
print('truevalue was {}'.format(truevalue))
print('data was {}'.format(compress(data)))
print('wmin, wmax was {} {}'.format(wmin, wmax))
print('ci was {} {}'.format(cs[0][-1], cs[1][-1]))
raise
np.greater_equal(cs[1], truevalue, out=ubd)
np.less_equal(cs[0], truevalue, out=lbd)
cov[i, :] = ubd * lbd
width[i, :] += np.subtract(cs[1], cs[0])
bounds.append((truevalue, cs[0], cs[1]))
upper_ends = [d[2][-1] for d in bounds]
lower_ends = [d[1][-1] for d in bounds]
upperbounded = [1 if d[0] <= d[2][-1] else 0 for d in bounds]
lowerbounded = [1 if d[1][-1] <= d[0] else 0 for d in bounds]
covered = [1 if u * l > 0 else 0 for (u, l) in zip(upperbounded, lowerbounded)]
final_width = [d[2][-1] - d[1][-1] for d in bounds]
def std_mean(x):
return np.std(x, ddof=1) / np.sqrt(len(x) - 1)
dbg = {
'cov': np.mean(covered),
'covstd': std_mean(covered),
'ubcov': np.mean(upperbounded),
'lbcov': np.mean(lowerbounded),
'final_width': np.mean(final_width),
'widthstd': std_mean(final_width),
'widthlo': np.quantile(final_width, q=[0.05])[0],
'widthhi': np.quantile(final_width, q=[0.95])[0],
'ub': np.mean(upper_ends),
'lb': np.mean(lower_ends),
}
verbose = True
if verbose:
print('{}'.format((ndata, {k: np.round(v, 4) for k, v in dbg.items()})), flush=True)
return (ndata,
{
'cov': np.mean(cov, axis=0),
'covstd': np.std(cov, axis=0, ddof=1) / np.sqrt(cov.shape[0] - 1),
'width': np.mean(width, axis=0),
'widtstd': np.std(width, axis=0, ddof=1) / np.sqrt(width.shape[0] - 1),
},
)
def produce_results_ci(env, method, alpha, ndata=100, reps=10):
wmin, wmax = env.range()
ubd = np.zeros(1)
lbd = np.zeros(1)
cov = np.zeros(reps)
width = | np.zeros(reps) | numpy.zeros |
import os.path as osp
import torch
import pandas as pd
import numpy as np
class CateAttrEvaluator(object):
""" Evaluate DeepFashion topk recall.
Args:
category_topk (list(int)) topk category recall
attr_topk (list(int)): topk attribute recall
data_root (str): root directory of dataset
Usage:
```python
evaluator = CateAttrEvaluator()
for batch in loader:
out = model(batch)
evaluator.add(out, batch)
res = evaluator.evaluate()
for topk, accuracy in res['category_accuracy_topk'].items():
print('metrics/category_top{}'.format(topk), accuracy)
for topk, accuracy in res['attr_group_recall'].items():
for attr_type in range(1, 6):
print('metrics/attr_top{}_type_{}_{}_recall'.format(
topk, attr_type, const.attrtype2name[attr_type]), accuracy[attr_type - 1]
)
print('metrics/attr_top{}_all_recall'.format(topk), res['attr_recall'][topk])
```
"""
def __init__(self,
category_topk=(1, 3, 5),
attr_topk=(3, 5),
data_root="data/df"):
self.category_topk = category_topk
self.attr_topk = attr_topk
self.reset()
with open(osp.join(data_root, 'annotations/list_attr_cloth.txt')) as f:
ret = []
f.readline()
f.readline()
for line in f:
line = line.split(' ')
while line[-1].strip().isdigit() is False:
line = line[:-1]
ret.append([
' '.join(line[0:-1]).strip(),
int(line[-1])
])
attr_type = pd.DataFrame(ret, columns=['attr_name', 'type'])
attr_type['attr_index'] = ['attr_' + str(i) for i in range(1000)]
attr_type.set_index('attr_index', inplace=True)
self.attr_type = attr_type
self.attrtype2name = {1: 'texture', 2: 'fabric', 3: 'shape', 4: 'part', 5: 'style'}
def reset(self):
self.category_accuracy = []
self.attr_group_gt = np.array([0.] * 5)
self.attr_group_tp = np.zeros((5, len(self.attr_topk)))
self.attr_all_gt = 0
self.attr_all_tp = np.zeros((len(self.attr_topk),))
def category_topk_accuracy(self, output, target):
""" Compute topk category recall
Notes:
N: number of samples
K: number of categorys
Args:
output (torch.Tensor(N, K)): prediction cateogory score
target (torch.Tensor(N, ))
"""
if isinstance(output, np.ndarray):
output = torch.from_numpy(output)
if isinstance(target, np.ndarray):
target = torch.from_numpy(target)
with torch.no_grad():
maxk = max(self.category_topk)
batch_size = target.shape[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in self.category_topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100 / batch_size))
for i in range(len(res)):
res[i] = res[i].cpu().numpy()[0] / 100
self.category_accuracy.append(res)
def attr_count(self, output, sample):
attr_group_gt = np.array([0.] * 5)
attr_group_tp = np.zeros((5, len(self.attr_topk)))
attr_all_tp = np.zeros((len(self.attr_topk),))
if isinstance(sample['attr'], torch.Tensor):
target = sample['attr'].cpu().numpy()
elif isinstance(sample['attr'], np.ndarray):
target = sample['attr']
else:
raise TypeError("Error type of sampe['attr']")
target = np.split(target, target.shape[0])
target = [x[0, :] for x in target]
if isinstance(output['attr'], torch.Tensor):
pred = output['attr'].cpu().detach().numpy()
elif isinstance(output['attr'], np.ndarray):
pred = output['attr']
else:
raise TypeError("Error type of output['attr']")
pred = np.split(pred, pred.shape[0])
pred = [x[0, 1, :] for x in pred]
for batch_idx in range(len(target)):
result_df = pd.DataFrame([target[batch_idx], pred[batch_idx]],
index=['target', 'pred'], columns=['attr_' + str(i) for i in range(1000)])
result_df = result_df.transpose()
result_df = result_df.join(self.attr_type[['type']])
ret = []
for i in range(1, 6):
ret.append(result_df[result_df['type'] == i]['target'].sum())
attr_group_gt += np.array(ret)
ret = []
result_df = result_df.sort_values('pred', ascending=False)
attr_all_tp += np.array([
result_df.head(k)['target'].sum() for k in self.attr_topk
])
for i in range(1, 6):
sort_df = result_df[result_df['type'] == i]
ret.append([
sort_df.head(k)['target'].sum() for k in self.attr_topk
])
attr_group_tp += np.array(ret)
self.attr_group_gt += attr_group_gt
self.attr_group_tp += attr_group_tp
self.attr_all_gt += attr_group_gt.sum()
self.attr_all_tp += attr_all_tp
def add(self, output, sample):
self.category_topk_accuracy(output['cate'], sample['category_id'])
self.attr_count(output, sample)
def evaluate(self):
category_accuracy = np.array(self.category_accuracy).mean(axis=0)
category_accuracy_topk = {}
for i, top_n in enumerate(self.category_topk):
category_accuracy_topk[top_n] = category_accuracy[i]
attr_group_recall = {}
attr_recall = {}
for i, top_n in enumerate(self.attr_topk):
attr_group_recall[top_n] = self.attr_group_tp[..., i] / self.attr_group_gt
attr_recall[top_n] = self.attr_all_tp[i] / self.attr_all_gt
return {
'category_accuracy_topk': category_accuracy_topk,
'attr_group_recall': attr_group_recall,
'attr_recall': attr_recall,
}
class CateCalculator(object):
"""Calculate Category prediction top-k recall rate
Usage:
```python
cate_calculator = CateCalculator()
for data in loader:
result = model(data)
cate_calculator.collect_result(result, data)
cate_calculator.show_result()
```
"""
def __init__(self, category_num, topns=[1, 3], show_cate_name=False, cate_name_file=None):
self.collector = dict()
self.num_cate = category_num
self.topns = topns
# true positive
for topi in topns:
self.collector['top%s' % str(topi)] = dict()
tp = np.zeros(self.num_cate)
fn = np.zeros(self.num_cate)
self.collector['top%s' % str(topi)]['tp'] = tp
self.collector['top%s' % str(topi)]['fn'] = fn
""" topn recall rate """
self.recall = dict()
# collect target per category
self.target_per_cate = np.zeros(self.num_cate)
# num of total predictions
self.total = 0
# topn category prediction
self.topns = topns
self.show_cate_name = show_cate_name
if self.show_cate_name:
assert cate_name_file is not None
self.cate_dict = {}
cate_names = open(cate_name_file).readlines()
for i, cate_name in enumerate(cate_names[2:]):
self.cate_dict[i] = cate_name.split()[0]
def collect(self, indexes, target, topk):
"""calculate and collect recall rate
Args:
indexes(list): predicted top-k indexes
target(list): ground-truth
topk(str): top-k, e.g., top1, top3, top5
"""
for i, cnt in enumerate(self.collector[topk]['tp']): # true-positive
if i in indexes and i in target:
self.collector[topk]['tp'][i] += 1
for i, cnt in enumerate(self.collector[topk]['fn']): # false negative
if i not in indexes and i in target:
self.collector[topk]['fn'][i] += 1
def collect_result(self, pred, target):
"""collect_result.
Args:
pred: Tensor of shape [N, category_num]
target: Tensor of shape [N, 1]
"""
if isinstance(pred, torch.Tensor):
data = pred.data.cpu().numpy()
elif isinstance(pred, np.ndarray):
data = pred
else:
raise TypeError('type {} cannot be calculated.'.format(type(pred)))
for i in range(pred.shape[0]):
self.total += 1
indexes = np.argsort(data[i])[::-1]
for k in self.topns:
idx = indexes[:k]
self.collect(idx, target[i], 'top%d'%k)
def compute_one_recall(self, tp, fn):
""" Mean recall for each category. """
empty = 0
recall = np.zeros(tp.shape)
for i, num in enumerate(tp):
# ground truth number = true_positive(tp) + false_negative(fn)
if tp[i] + fn[i] == 0:
empty += 1
continue
else:
recall[i] = float(tp[i]) / float(tp[i] + fn[i])
sorted_recall = sorted(recall)[::-1]
return 100 * sum(sorted_recall) / (len(sorted_recall) - empty)
def compute_recall(self):
for key, top in self.collector.items():
self.recall[key] = self.compute_one_recall(top['tp'], top['fn'])
def show_result(self, batch_idx=None):
print('----------- Category Prediction ----------')
if batch_idx is not None:
print('Batch[%d]' % batch_idx)
else:
print('Total')
self.compute_recall()
print('[ Recall Rate ]')
for k in self.topns:
print('top%d = %.2f' % (k, self.recall['top%d' % k]))
def show_per_cate_result(self):
for key, top in self.collector.items():
tp = top['tp']
fn = top['fn']
recall = | np.zeros(tp.shape) | numpy.zeros |
import itertools
import os
import traceback
from copy import deepcopy
from typing import Callable, Dict, List, Tuple
import GPy
import numpy as np
import paramz
import stan_utility
from GPy.core.parameterization.param import Param
from GPy.likelihoods import Gaussian
from GPy.util import choleskies
from GPy.util.linalg import dpotrs, dtrtrs, jitchol, pdinv, tdot
import emukit
from emukit.core.interfaces import IModel
from . import util
from .inferences import StanPosterior
from .inferences import ep_batch_comparison as ep
from .inferences import vi_batch_comparison as vi
class ComparisonGP(GPy.core.Model):
"""
A class for all common methods needed for the different ComparisonGP wrappers
"""
def get_current_best(self) -> float:
"""
:return: minimum of means of predictions at all input locations (needed by q-EI)
"""
return min(self.Y)
def get_y_pred(self) -> np.ndarray:
"""
:return: GP mean at inputs used to compute the posterior approximation (needed by q-EI)
"""
y_pred, _ = self.predict(self.X, include_likelihood=False)
return y_pred
def log_likelihood(self) -> float:
"""
:return: log marginal likelihood needed for optimizing hyper parameters and performing model comparison
"""
return self._log_marginal_likelihood
def predict(
self, Xnew: np.ndarray, full_cov: bool = False, include_likelihood=True
) -> Tuple[np.ndarray, np.ndarray]:
"""
Predictive mean and covariance of the GP at the input location
:param Xnew: Locations the prections are wanted at
:param full_cov: If the user wants the function to return the full covariance or only the diagonal
:param include_likelihood: If the user wants the function to add the noise of the observations to the prediction covariance
:return: predictive mean and predictive covariance
"""
pred_mean, pred_var = self.posterior._raw_predict(
self.kern, Xnew, self.X, full_cov=full_cov
) # self.posterior._raw_predict(self.kern, np.hstack([Xnew,ki]), np.hstack([self.X, self.ki]), full_cov=full_cov)
if include_likelihood:
pred_var = pred_var + self.likelihood.variance
return pred_mean, pred_var
def predict_noiseless(self, Xnew, full_cov=False) -> Tuple[np.ndarray, np.ndarray]:
"""
Predictive mean and covariance of the GP latent function at the input location
:param Xnew: Locations the prections are wanted at
:param full_cov: If the user wants the function to return the full covariance or only the diagonal
:return: predictive latent mean and predictive latent covariance
"""
return self.predict(Xnew, full_cov=full_cov, include_likelihood=False)
def posterior_samples_f(self, X: np.ndarray, size: int = 10, **predict_kwargs) -> np.ndarray:
"""
Draw random samples from the posterior predictive distribution
:param X: Locations where the posterior samples should be drawn at
:param size: Number of posterior samples
:return: Simulated posterior samples
"""
predict_kwargs["full_cov"] = True # Always use the full covariance for posterior samples.
predict_kwargs["include_likelihood"] = False
m, v = self.predict(X, **predict_kwargs)
def sim_one_dim(m, v):
# Draw posterior sample in one dimension
return np.random.multivariate_normal(m, v, size).T
if self.output_dim == 1:
return sim_one_dim(m.flatten(), v)[:, np.newaxis, :]
else:
fsim = np.empty((X.shape[0], self.output_dim, size))
for d in range(self.output_dim):
if v.ndim == 3:
fsim[:, d, :] = sim_one_dim(m[:, d], v[:, :, d])
else:
fsim[:, d, :] = sim_one_dim(m[:, d], v)
return fsim
class EPComparisonGP(ComparisonGP):
"""
GPy wrapper for a GP model consisting of preferential batch observations when the posterior is approximated using Expectation Propagation
:param X: All locations of both direct observations and batch comparisons
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
:param kernel: A GPy kernel used
:param likelihood: A GPy likelihood. Only Gaussian likelihoods are accepted
:param name: Name of the model. Defaults to 'EpComparisonGP'
:param ep_max_it: Maximum number of iterations used when approximating the posterior in EP.
:param eta: parameter for fractional EP updates.
:param delta: damping EP updates factor.
:param get_logger: Function for receiving the legger where the prints are forwarded.
"""
def __init__(
self,
X: np.ndarray,
y: List[Tuple[int, float]],
yc: List[List[Tuple[int, int]]],
kernel: GPy.kern.Kern,
likelihood: GPy.likelihoods.Gaussian,
name: str = "EPComparisonGP",
ep_max_itt: int = 100,
delta: float = 0.5,
eta: float = 0.5,
get_logger: Callable = None,
):
super(EPComparisonGP, self).__init__(name=name)
self.N, self.D = X.shape[0], X.shape[1]
self.output_dim = 1 # hard coded, the code doesn't support multi output case
self.X = X
self.y = y
self.yc = yc
self.kern = kernel
self.likelihood = likelihood
# A helper parameter for EP. Each observation could possibly come from different kernels and likelihoods.
# The inference supports this already, but this GPy wrapper doesn't
self.sigma2s = self.likelihood.variance * np.ones((X.shape[0], 1), dtype=int)
self.ep_max_itt = ep_max_itt
self.eta = eta
self.delta = delta
self.link_parameter(self.kern)
self.link_parameter(self.likelihood)
self.posterior, self.ga_approx, self.Y = None, None, None
self.get_logger = get_logger
def parameters_changed(self):
"""
Update the posterior approximation after kernel or likelihood parameters have changed or there are new observations
"""
# Recompute the posterior approximation
self.posterior, self._log_marginal_likelihood, self.grad_dict, self.ga_approx = ep.ep_comparison(
self.X,
self.y,
self.yc,
self.kern,
self.sigma2s,
max_itt=self.ep_max_itt,
tol=1e-6,
delta=self.delta,
eta=self.eta,
ga_approx_old=self.ga_approx,
get_logger=self.get_logger,
)
# predict Y at inputs (needed by q-EI)
self.Y = self.get_y_pred()
def set_XY(self, X: np.ndarray, y: List[Tuple[int, float]], yc: List[List[Tuple[int, int]]]):
"""
Set new observations and recompute the posterior
:param X: All locations of both direct observations and batch comparisons
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
"""
self.X = X
self.y = y
self.yc = yc
self.sigma2s = self.likelihood.variance * np.ones((X.shape[0], 1), dtype=int)
self.parameters_changed()
class VIComparisonGP(ComparisonGP):
"""
GPy wrapper for a GP model consisting of preferential batch observations when the posterior is approximated using Variational Inference
:param X: All locations of both direct observations and batch comparisons
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
:param kernel: A GPy kernel used
:param likelihood: A GPy likelihood. Only Gaussian likelihoods are accepted
:param vi_mode: A string indicating if to use full rank or mean field VI
:param name: Name of the model. Defaults to 'VIComparisonGP'
:param max_iters: Maximum number of iterations used when approximating the posterior in VI.
:param get_logger: Function for receiving the legger where the prints are forwarded.
"""
def __init__(
self,
X: np.ndarray,
y: List[Tuple[int, float]],
yc: List[List[Tuple[int, int]]],
kernel: GPy.kern.Kern,
likelihood: Gaussian,
vi_mode: str = "fr",
name: str = "VIComparisonGP",
max_iters: int = 50,
get_logger: Callable = None,
):
super(VIComparisonGP, self).__init__(name=name)
self.N, self.D = X.shape[0], X.shape[1]
self.output_dim = 1
self.get_logger = get_logger
self.X = X
self.y = y
self.yc = yc
self.max_iters = max_iters
self.vi_mode = vi_mode
self.kern = kernel
self.likelihood = likelihood
self.sigma2s = self.likelihood.variance * np.ones((X.shape[0], 1), dtype=int)
jitter = 1e-6
K = self.kern.K(X)
L = np.linalg.cholesky(K + np.identity(K.shape[0]) * jitter)
self.alpha = np.zeros((self.N, 1))
self.beta = np.ones((self.N, 1))
self.posterior = None
# If we are using full rank VI, we initialize it with mean field VI
if self.vi_mode == "FRVI":
self.posterior, _, _, self.alpha, self.beta = vi.vi_comparison(
self.X, self.y, self.yc, self.kern, self.sigma2s, self.alpha, self.beta, max_iters=50, method="mf"
)
self.beta = choleskies._triang_to_flat_pure(jitchol(self.posterior.covariance)[None, :])
def parameters_changed(self):
"""
Update the posterior approximation after kernel or likelihood parameters have changed or there are new observations
"""
if self.vi_mode == "fr":
method = "fr"
else:
method = "mf"
self.posterior, self._log_marginal_likelihood, self.grad_dict, alpha, beta = vi.vi_comparison(
self.X,
self.y,
self.yc,
self.kern,
self.sigma2s,
self.alpha,
self.beta,
max_iters=self.max_iters,
method=method,
get_logger=self.get_logger,
)
self.alpha = alpha
self.beta = beta
self.Y = self.get_y_pred()
def set_XY(self, X: np.ndarray, y: List[Tuple[int, float]], yc: List[List[Tuple[int, int]]]):
"""
Set new observations and recompute the posterior
:param X: All locations of both direct observations and batch comparisons
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
"""
self.N, self.D = X.shape[0], X.shape[1]
self.X = X
self.y = y
self.yc = yc
self.sigma2s = self.likelihood.variance * | np.ones((X.shape[0], 1), dtype=int) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 2020
Class to read and manipulate CryoSat-2 waveform data
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
http://www.numpy.org
http://www.scipy.org/NumPy_for_Matlab_Users
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 08/2020: flake8 compatible binary regular expression strings
Forked 02/2020 from read_cryosat_L1b.py
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import pointCollection as pc
import netCDF4
import re
import os
class data(pc.data):
np.seterr(invalid='ignore')
def __default_field_dict__(self):
"""
Define the default fields that get read from the CryoSat-2 file
"""
field_dict = {}
field_dict['Location'] = ['days_J2k','Day','Second','Micsec','USO_Corr',
'Mode_ID','SSC','Inst_config','Rec_Count','Lat','Lon','Alt','Alt_rate',
'Sat_velocity','Real_beam','Baseline','ST_ID','Roll','Pitch','Yaw','MCD']
field_dict['Data'] = ['TD', 'H_0','COR2','LAI','FAI','AGC_CH1','AGC_CH2',
'TR_gain_CH1','TR_gain_CH2','TX_Power','Doppler_range','TR_inst_range',
'R_inst_range','TR_inst_gain','R_inst_gain','Internal_phase',
'External_phase','Noise_power','Phase_slope']
field_dict['Geometry'] = ['dryTrop','wetTrop','InvBar','DAC','Iono_GIM',
'Iono_model','ocTideElv','lpeTideElv','olTideElv','seTideElv','gpTideElv',
'Surf_type','Corr_status','Corr_error']
field_dict['Waveform_20Hz'] = ['Waveform','Linear_Wfm_Multiplier',
'Power2_Wfm_Multiplier','N_avg_echoes']
field_dict['METADATA'] = ['MPH','SPH']
return field_dict
def from_dbl(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from binary formats
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
# CryoSat-2 Mode record sizes
i_size_timestamp = 12
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 125
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2 + 6*4 + 3*3*4 + 3*2 + 4*4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_BC_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_BC_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_BC_RW*2 + \
n_SARIN_BC_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baseline C
read_cryosat_variables = self.cryosat_baseline_C
else:
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2+ 6*4 + 3*3*4 + 4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_RW*2 + \
n_SARIN_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baselines A and B
read_cryosat_variables = self.cryosat_baseline_AB
# get dataset MODE from PRODUCT portion of file name
# set record sizes and DS_TYPE for read_DSD function
self.MODE = re.findall('(LRM|SAR|SIN)', PRODUCT).pop()
if (self.MODE == 'LRM'):
i_record_size = i_record_size_LRM_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SAR'):
i_record_size = i_record_size_SAR_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SIN'):
i_record_size = i_record_size_SARIN_L1b
DS_TYPE = 'CS_L1B'
# read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
# num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
# print file information
if verbose:
print(full_filename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
# If there are MPH/SPH/DSD headers
s_MPH_fields = self.read_MPH(full_filename)
j_sph_size = np.int32(re.findall(r'[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = self.read_SPH(full_filename, j_sph_size)
# extract information from DSD fields
s_DSD_fields = self.read_DSD(full_filename, DS_TYPE=DS_TYPE)
# extract DS_OFFSET
j_DS_start = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
# extract number of DSR in the file
j_num_DSR = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
# check the record size
j_DSR_size = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
# minimum size is start of the read plus number of records to read
j_check_size = j_DS_start + (j_DSR_size*j_num_DSR)
if verbose:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
# check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
# extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# add headers to output dictionary as METADATA
CS_L1b_mds['METADATA'] = {}
CS_L1b_mds['METADATA']['MPH'] = s_MPH_fields
CS_L1b_mds['METADATA']['SPH'] = s_SPH_fields
CS_L1b_mds['METADATA']['DSD'] = s_DSD_fields
# close the input CryoSat binary file
fid.close()
else:
# If there are not MPH/SPH/DSD headers
# extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# close the input CryoSat binary file
fid.close()
# if unpacking the units
if unpack:
CS_l1b_scale = self.cryosat_scaling_factors()
# for each dictionary key
for group in CS_l1b_scale.keys():
# for each variable
for key,val in CS_L1b_mds[group].items():
# check if val is the 20Hz waveform beam variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100): converted from telemetry units to be
# the noise floor of FBR measurement echoes.
# Set to -9999.99 when the telemetry contains zero.
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
# CryoSat-2 mode specific waveforms
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [512]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [512]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
# Phase Difference [512]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry']['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
CS_l1b_mds['Geometry']['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 Average Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = | np.fromfile(fid,dtype='>i4',count=1) | numpy.fromfile |
"""
Multiple Scattering code, By Dr <NAME>
For more information see:
<NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2015). Acta Cryst. A71, 20-25.
http://dx.doi.org/10.5281/zenodo.12866
Example:
xtl = dif.Crystal('Diamond.cif')
mslist = run_calcms(xtl, [0,0,3], [0,1,0], [1,0], [2.83, 2.85], plot=True)
Created from python package "calcms"
Version 1.0
12/12/2019
-------------------------------------------
Copyright 2014 Diamond Light Source Ltd.123
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Dr <NAME>, <EMAIL> Tel: +44 1235 778786
www.diamond.ac.uk
Diamond Light Source, Chilton, Didcot, Oxon, OX11 0DE, U.K.
"""
import numpy as np
import matplotlib.pyplot as plt
import itertools
__version__ = '1.0'
def run_calcms(xtl, hkl, azir=[0, 0, 1], pv=[1, 0], energy_range=[7.8, 8.2], numsteps=60,
full=False, pv1=False, pv2=False, sfonly=True, pv1xsf1=False):
"""
Run the multiple scattering code
mslist = run_calcms(xtl, [0,0,1])
:param xtl: Crystal structure from Dans_Diffraction
:param hkl: [h,k,l] principle reflection
:param azir: [h,k,l] reference of azimuthal 0 angle
:param pv: [s,p] polarisation vector
:param energy_range: [min, max] energy range in keV
:param numsteps: int: number of calculation steps from energy min to max
:param full: True/False: calculation type: full
:param pv1: True/False: calculation type: pv1
:param pv2: True/False: calculation type: pv2
:param sfonly: True/False: calculation type: sfonly *default
:param pv1xsf1: True/False: calculation type: pv1xsf1?
:return: array
"""
# ===============================================================================
# DMS Calculation
# ===============================================================================
mslist = [[np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN]]
# ================= Generate Reflist from Cif ===================================
sf, reflist, lattice, structure = loadcif(xtl, energy_range[-1])
refindex = ~np.isnan(Vfind(reflist, np.round(hkl) - reflist).vindex())
sf = sf[refindex]
reflist = reflist[refindex]
sf2 = sf[Vfind(reflist, np.round(hkl) - reflist).vindex()]
loopnum = 1
# ------------------------------------------------------------------------------
if pv1 + pv2 + sfonly + full + pv1xsf1 > 1:
print('Choose only one intensity option')
print('full=%s, pv1=%s, pv2=%s, sfonly=%s, pv1xsf1=%s' % (full, pv1, pv2, sfonly, pv1xsf1))
return None
elif pv1 + pv2 + sfonly + full + pv1xsf1 == 0:
print('Geometry Only')
mslist = [[np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN]]
for enval in np.linspace(energy_range[0], energy_range[1], numsteps):
print(str(loopnum) + ' of ' + str(numsteps))
# ===========================================================================
# SF0*Gauss*SF1*SF2*PV2
# ===========================================================================
if full:
#print('full calculation: SF1*SF2*PV2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2) # [:,[3,4,5]]
polfull = ms.polfull(pv)
mslist = np.concatenate((mslist, ms.polfull(pv)), 0)
# ===========================================================================
# PV1 only
# ===========================================================================
elif pv1:
#print('pv1 calculation: PV1')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.pol1only(pv)), 0)
# ===========================================================================
# PV2 only
# ===========================================================================
elif pv2:
#print('pv2 calculation: PV2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.pol2only(pv)), 0)
# ===========================================================================
# SF only
# ===========================================================================
elif sfonly:
#print('sfonly calculation: SF1*SF2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.sfonly()), 0)
# ===========================================================================
# SF only
# ===========================================================================
elif pv1xsf1:
#print('pv1xsf1 calculation: SF1*PV1')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf)
mslist = np.concatenate((mslist, ms.pv1xsf1(pv)), 0)
# ===========================================================================
# Geometry only - no structure factors
# ===========================================================================
else:
print('Geometry Only')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir)
mslist = np.concatenate((mslist, ms.geometry()), 0)
loopnum = loopnum + 1
keepindex = np.where([~np.isnan(mslist).any(1)])[1]
mslist = np.array(mslist[keepindex, :])
return mslist
########################################################################################################################
############################################### Ancillary Functions ##################################################
########################################################################################################################
def loadcif(xtl, energy_kev):
"""
New loadcif from Dans_Diffraction
returns:
intensity: Structure factor^2. I = sf x sf*
reflist: array of [h,k,l] reflections
lattice: [a,b,c,alpha,beta,gamma]
sf: complex structure factors
"""
lattice = xtl.Cell.lp()
reflist = xtl.Cell.all_hkl(energy_kev)
reflist = xtl.Cell.sort_hkl(reflist)
reflist = reflist[1:]
old_sf = xtl.Scatter._return_structure_factor
xtl.Scatter._return_structure_factor = True
sf = xtl.Scatter.intensity(reflist) # complex structure factor
xtl.Scatter._return_structure_factor = old_sf
intensity = np.real(sf * np.conj(sf))
print('MS Reflections: %d' % len(reflist))
return intensity, reflist, lattice, sf
class Bmatrix(object):
""" Convert to Cartesian coordinate system. Returns the Bmatrix and the metric tensors in direct and reciprocal spaces"""
def __init__(self, lattice):
self.lattice = lattice
lattice = self.lattice
a = lattice[0]
b = lattice[1]
c = lattice[2]
alph = lattice[3]
bet = lattice[4]
gamm = lattice[5]
alpha1 = alph * np.pi / 180.0
alpha2 = bet * np.pi / 180.0
alpha3 = gamm * np.pi / 180.0
beta1 = np.arccos((np.cos(alpha2) * np.cos(alpha3) - np.cos(alpha1)) / (np.sin(alpha2) * np.sin(alpha3)))
beta2 = np.arccos((np.cos(alpha1) * np.cos(alpha3) - np.cos(alpha2)) / (np.sin(alpha1) * np.sin(alpha3)))
beta3 = np.arccos((np.cos(alpha1) * np.cos(alpha2) - np.cos(alpha3)) / (np.sin(alpha1) * np.sin(alpha2)))
b1 = 1. / (a * np.sin(alpha2) * np.sin(beta3))
b2 = 1. / (b * np.sin(alpha3) * np.sin(beta1))
b3 = 1. / (c * np.sin(alpha1) * np.sin(beta2))
c1 = b1 * b2 * np.cos(beta3)
c2 = b1 * b3 * np.cos(beta2)
c3 = b2 * b3 * np.cos(beta1)
self.bmatrix = np.matrix([[b1, b2 * np.cos(beta3), b3 * np.cos(beta2)],
[0.0, b2 * np.sin(beta3), -b3 * np.sin(beta2) * np.cos(alpha1)], [0.0, 0.0, 1. / c]])
def bm(self):
return self.bmatrix
def ibm(self):
return self.bmatrix.I
def mt(self):
return self.bmatrix.I * self.bmatrix.transpose().I
def rmt(self):
mt = self.bmatrix.I * self.bmatrix.transpose().I
return mt.I
class Rotxyz(object):
"""Example p = Rotxyz(initial_vector, vectorrotateabout, angle)"""
def __init__(self, u, angle):
self.u = u
self.angle = angle
u = np.matrix(self.u) / np.linalg.norm(np.matrix(self.u))
e11 = u[0, 0] ** 2 + (1 - u[0, 0] ** 2) * np.cos(angle * np.pi / 180.0)
e12 = u[0, 0] * u[0, 1] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 2] * np.sin(angle * np.pi / 180.0)
e13 = u[0, 0] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 1] * np.sin(angle * np.pi / 180.0)
e21 = u[0, 0] * u[0, 1] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 2] * np.sin(angle * np.pi / 180.0)
e22 = u[0, 1] ** 2 + (1 - u[0, 1] ** 2) * np.cos(angle * np.pi / 180.0)
e23 = u[0, 1] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 0] * np.sin(angle * np.pi / 180.0)
e31 = u[0, 0] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 1] * np.sin(angle * np.pi / 180.0)
e32 = u[0, 1] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 0] * np.sin(angle * np.pi / 180.0)
e33 = u[0, 2] ** 2 + (1 - u[0, 2] ** 2) * np.cos(angle * np.pi / 180.0)
self.rotmat = np.matrix([[e11, e12, e13], [e21, e22, e23], [e31, e32, e33]])
def rmat(self):
return self.rotmat
class Dhkl(object):
"""calculate d-spacing for reflection from reciprocal metric tensor
d = Dhkl(lattice,HKL)
lattice = [a b c alpha beta gamma] (angles in degrees)
HKL: list of hkl. size(HKL) = n x 3 or 3 x n
!!! if size(HKL) is 3 x 3, HKL must be in the form:
HKL = [h1 k1 l1 ; h2 k2 l2 ; h3 k3 l3]
"""
def __init__(self, lattice, hkl):
self.lattice = lattice
self.hkl = np.matrix(hkl)
def d(self):
hkl = self.hkl
if | np.shape(hkl) | numpy.shape |
''' Script to check the correctness of the geometry utils functions (rotation, translation matrices)
'''
import numpy as np
import unittest
from beam_telescope_analysis.tools import geometry_utils
class TestTrackAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_transformations(self): # Transforms from global to local system and back and checks for equality
position = np.array([0, 0, 0]) # Position in global system to transfrom
for position in (np.array([-1, -2, -3]), np.array([0, 1, 0]), np.array([3, 2, 1])):
for x in range(-3, 4, 3): # Loop over x translation values
for y in range(-3, 4, 3): # Loop over y translation values
for z in range(-3, 4, 3): # Loop over z translation values
for alpha in [- np.pi, -np.pi / 3., 0, np.pi / 4., np.pi / 3., np.pi / 2., 3 * np.pi / 4., np.pi, 4. * np.pi / 3.]: # Loop x rotation values
for beta in [- np.pi, -np.pi / 3., 0, np.pi / 4., np.pi / 3., np.pi / 2., 3 * np.pi / 4., np.pi, 4. * np.pi / 3.]: # Loop y rotation values
for gamma in [- np.pi, -np.pi / 3., 0, np.pi / 4., np.pi / 3., np.pi / 2., 3 * np.pi / 4., np.pi, 4. * np.pi / 3.]: # Loop z rotation values
position_g = np.array([position[0], position[1], position[2], 1]) # Extend global position dimension
transformation_matrix_to_local = geometry_utils.global_to_local_transformation_matrix(x, y, z, alpha, beta, gamma)
transformation_matrix_to_global = geometry_utils.local_to_global_transformation_matrix(x, y, z, alpha, beta, gamma)
position_l = np.dot(transformation_matrix_to_local, position_g) # Transform to local coordinate system
position_g_result = np.dot(transformation_matrix_to_global, position_l) # Transform back to global coordinate system
self.assertTrue(np.allclose(position, np.array(position_g_result[:-1]))) # Finite precision needs equality check with finite precision
def test_rotation_matrices(self):
# Check that the rotation matrices in x, y, z have the features of a rotation matrix (det = 1, inverse = transposed matrix)
for alpha in [- np.pi, -np.pi / 3., 0, np.pi / 4., np.pi / 3., np.pi / 2., 3 * np.pi / 4., np.pi, 4. * np.pi / 3.]: # Loop x rotation values
rotation_matrix_x = geometry_utils.rotation_matrix_x(alpha)
self.assertAlmostEqual(np.linalg.det(rotation_matrix_x), 1)
self.assertTrue(np.allclose(rotation_matrix_x.T, np.linalg.inv(rotation_matrix_x)))
for beta in [- np.pi, -np.pi / 3., 0, np.pi / 4., np.pi / 3., np.pi / 2., 3 * np.pi / 4., np.pi, 4. * np.pi / 3.]: # Loop y rotation values
rotation_matrix_y = geometry_utils.rotation_matrix_y(beta)
self.assertAlmostEqual(np.linalg.det(rotation_matrix_y), 1)
self.assertTrue(np.allclose(rotation_matrix_y.T, | np.linalg.inv(rotation_matrix_y) | numpy.linalg.inv |
import glob
import os
import warnings
import numpy as np
from astropy import units as u
from astropy.io import ascii
from astropy.io import fits
from exorad.log import Logger
from exorad.utils import exolib
from .signal import Sed
warnings.filterwarnings('ignore', category=UserWarning, append=True)
class Star(Logger, object):
"""
Instantiate a Stellar class using Phenix Stellar Models
Attributes
----------
lumiosity : float
Stellar bolometric luminosity computed from Phenix stellar modle. Units [W]
wl array
wavelength [micron]
sed array
Spectral energy density [W m**-2 micron**-1]
ph_wl array
phoenix wavelength [micron]. Phenix native resolution
ph_sed array
phenix spectral energy density [W m**-2 micron**-1]. Phenix native resolution
ph_filename phenix filename
"""
# def __init__(self, star_sed_path, star_distance, star_temperature, star_logg, star_f_h, star_radius):
def __init__(self,
star_sed_path,
starDistance,
starTemperature,
starLogg,
starMetallicity,
starRadius,
use_planck_spectrum=False,
wl_min=0.4 * u.um,
wl_max=10.0 * u.um,
phoenix_model_filename=None):
"""
Parameters
__________
exocat_star : object
exodata star object
star_sed_path: : string
path to Phoenix stellar spectra
"""
self.set_log_name()
if use_planck_spectrum == True:
self.debug('Planck spectrum used')
wl = np.linspace(wl_min, wl_max, 10000)
ph_wl, ph_sed, ph_L = self.__get_star_spectrum(
wl,
starDistance.to(u.m),
starTemperature.to(u.K),
starRadius.to(u.m))
ph_file = None
else:
if phoenix_model_filename:
ph_file = os.path.join(star_sed_path, phoenix_model_filename)
self.debug('phoenix file name : {}'.format(ph_file))
else:
ph_file = self.__get_phonix_model_filename(
star_sed_path,
starTemperature.to(u.K),
starLogg,
starMetallicity)
self.debug('phoenix file name : {}'.format(ph_file))
ph_wl, ph_sed, ph_L = self.__read_phenix_spectrum(
ph_file,
starDistance.to(u.m),
starRadius.to(u.m))
self.luminosity = ph_L
self.sed = Sed(wl_grid=ph_wl, data=ph_sed)
self.filename = ph_file
self.model = 'Planck' if use_planck_spectrum else os.path.basename(ph_file)
def __get_phonix_model_filename(self, path, star_temperature,
star_logg, star_f_h):
sed_name = glob.glob(os.path.join(path, "*.BT-Settl.spec.fits.gz"))
if len(sed_name) == 0:
self.error("No stellar SED files found")
raise OSError("No stellar SED files found")
sed_T_list = np.array([np.float(os.path.basename(k)[3:8]) for k in sed_name])
sed_Logg_list = np.array([np.float(os.path.basename(k)[9:12]) for k in sed_name])
sed_Z_list = np.array([np.float(os.path.basename(k)[13:16]) for k in sed_name])
idx = np.argmin(np.abs(sed_T_list - np.round(star_temperature.value / 100.0)) +
| np.abs(sed_Logg_list - star_logg) | numpy.abs |
# normal_cf_ds_classification_by_ufl_w_t_dis.py
# 1. fix a_max a_conf S_jam for a driver; mix seq points and random points for initialization: failed
# 2. fix S_jam for a driver; mix seq points and random points for initialization: still tried
# 3. add temporal distance when calculating distance for assigning labels
import numpy as np
from scipy.optimize import leastsq
import scipy.stats
import matplotlib.pyplot as plt
import pickle, time, copy, os, warnings
import pandas as pd
import pylab
from scipy.stats import entropy as kl_div
import threading, sys
from datetime import datetime
from sklearn.preprocessing import normalize, minmax_scale
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
f = open('all_data_for_cf_model_w_t_pre_info_1101.pkl', 'rb')
all_data_for_cf_model = pickle.load(f)
f.close()
from scipy.optimize import minimize, basinhopping, brute, differential_evolution, shgo, dual_annealing
import random
from pathos.multiprocessing import ProcessingPool as Pool
def set_cons(a_max_n_boundary=[0.1, 2.5], desired_V_n_boundary=[1, 40], a_comf_n_boundary=[0.1, 5],
S_jam_boundary=[0.1, 10], desired_T_n_boundary=[0.1, 5], beta_boundary=[4, 4]):
# constraints: eq or ineq
a_max_n_boundary = a_max_n_boundary
desired_V_n_boundary = desired_V_n_boundary
a_comf_n_boundary = a_comf_n_boundary
S_jam_boundary = S_jam_boundary
desired_T_n_boundary = desired_T_n_boundary
beta_boundary = beta_boundary
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - a_max_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[0] + a_max_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[1] - desired_V_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[1] + desired_V_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[2] - a_comf_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[2] + a_comf_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[3] - S_jam_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[3] + S_jam_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[4] - desired_T_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[4] + desired_T_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[5] - beta_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[5] + beta_boundary[1]})
return cons
def initialize(a_max_n_boundary=[0.1, 2.5], desired_V_n_boundary=[1, 40], a_comf_n_boundary=[0.1, 5], S_jam_boundary=[0.1, 10], \
desired_T_n_boundary=[0.1, 5], beta_boundary=[4, 4]):
# a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta
x0 = (random.uniform(a_max_n_boundary[0], a_max_n_boundary[1]),
random.uniform(desired_V_n_boundary[0], desired_V_n_boundary[1]), \
random.uniform(a_comf_n_boundary[0], a_comf_n_boundary[1]),
random.uniform(S_jam_boundary[0], S_jam_boundary[1]), \
random.uniform(desired_T_n_boundary[0], desired_T_n_boundary[1]), 4)
return x0
def IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
# if a_max_n * a_comf_n <= 0:
# print("a_max_n", a_max_n, "a_comf_n", a_comf_n)
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
a_n_t = []
for i in range(len(delta_V_n_t)):
desired_S_n = desired_space_hw(S_jam_n, V_n_t[i], desired_T_n, delta_V_n_t[i], a_max_n, a_comf_n)
a_n_t.append(a_max_n * (1 - (V_n_t[i] / desired_V_n) ** beta - (desired_S_n / S_n_t[i]) ** 2))
return np.array(a_n_t)
def tv_IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
# if a_max_n * a_comf_n <= 0:
# print("a_max_n", a_max_n, "a_comf_n", a_comf_n)
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
a_n_t = []
for i in range(len(a_max_n)):
desired_S_n = desired_space_hw(S_jam_n[i], V_n_t, desired_T_n[i], delta_V_n_t, a_max_n[i], a_comf_n[i])
a_n_t.append(a_max_n[i] * (1 - (V_n_t / desired_V_n[i]) ** beta - (desired_S_n / S_n_t) ** 2))
return np.array(a_n_t)
def IDM_cf_model_for_p(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
desired_S_n = desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n)
a_n_t = a_max_n * (1 - (V_n_t / desired_V_n) ** beta - (desired_S_n / S_n_t) ** 2)
return a_n_t
def obj_func(args):
a, delta_V_n_t, S_n_t, V_n_t = args
# x[0:6]: a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta
# err = lambda x: np.sqrt( np.sum( ( (a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) / a ) ** 2) / len(a) )
# err = lambda x: np.sqrt( np.sum((a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) ** 2) / np.sum(a**2))
err = lambda x: np.sqrt(
np.sum((a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) ** 2) / len(a))
return err
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- new folder " + path + " ... ---")
print("--- OK ---")
def _timed_run(func, distribution, args=(), kwargs={}, default=None):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout is exceeded.
http://stackoverflow.com/questions/492519/timeout-on-a-python-function-call
"""
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
self.exc_info = (None, None, None)
def run(self):
try:
self.result = func(args, **kwargs)
except Exception as err: # pragma: no cover
self.exc_info = sys.exc_info()
def suicide(self): # pragma: no cover
raise RuntimeError('Stop has been called')
it = InterruptableThread()
it.start()
started_at = datetime.now()
it.join(self.timeout)
ended_at = datetime.now()
diff = ended_at - started_at
if it.exc_info[0] is not None: # pragma: no cover ; if there were any exceptions
a, b, c = it.exc_info
raise Exception(a, b, c) # communicate that to caller
if it.isAlive(): # pragma: no cover
it.suicide()
raise RuntimeError
else:
return it.result
def fit_posterior(data, Nbest=3, timeout=10):
param_names = ["a_max", "desired_V", "a_comf", "S_jam", "desired_T"]
common_distributions = ['cauchy', 'chi2', 'expon', 'exponpow', 'gamma', 'lognorm', 'norm', 'powerlaw', 'rayleigh',
'uniform']
distributions = {}
data = np.array(data).T
for i in range(len(data)):
fitted_param = {}
fitted_pdf = {}
sumsquare_error = {}
y, x = np.histogram(data[i], bins=100, density=True)
x = [(this + x[i + 1]) / 2. for i, this in enumerate(x[0:-1])]
for distribution in common_distributions:
try:
# need a subprocess to check time it takes. If too long, skip it
dist = eval("scipy.stats." + distribution)
param = dist.fit(data[i])
pdf_fitted = dist.pdf(x, *param)
fitted_param[distribution] = param[:]
fitted_pdf[distribution] = pdf_fitted
# calculate error
sq_error = pylab.sum((fitted_pdf[distribution] - y) ** 2)
sumsquare_error[distribution] = sq_error
# calcualte information criteria
# logLik = np.sum(dist.logpdf(x, *param))
# k = len(param[:])
# n = len(data[i])
# aic = 2 * k - 2 * logLik
# bic = n * np.log(sq_error / n) + k * np.log(n)
# calcualte kullback leibler divergence
# kullback_leibler = kl_div(fitted_pdf[distribution], self.y)
# compute some errors now
# _fitted_errors[distribution] = sq_error
# _aic[distribution] = aic
# _bic[distribution] = bic
# _kldiv[distribution] = kullback_leibler
except Exception: # pragma: no cover
print("SKIPPED {} distribution (taking more than {} seconds)".format(distribution, timeout))
# print(Exception)
# if we cannot compute the error, set it to large values
# fitted_param[distribution] = []
# fitted_pdf[distribution] = np.nan
# sumsquare_error[distribution] = np.inf
srt_sq_error = sorted(sumsquare_error.items(), key=lambda kv:(kv[1], kv[0]))
for j in range(Nbest):
dist_name = srt_sq_error[j][0]
sq_error = srt_sq_error[j][1]
param = fitted_param[dist_name]
pdf = fitted_pdf[dist_name]
if not param_names[i] in distributions:
distributions[param_names[i]] = [{"distribution": dist_name, "fitted_param": param, "sq_error": sq_error}]
else:
distributions[param_names[i]].append({"distribution": dist_name, "fitted_param": param, "sq_error": sq_error})
return distributions
def get_peak_value(data):
a_max_n_boundary = [0.1, 2.5]
desired_V_n_boundary = [1, 40]
a_comf_n_boundary = [0.1, 5]
S_jam_boundary = [0.1, 10]
desired_T_n_boundary = [0.1, 5]
data = data.T
a_max_hist, a_max_bins = np.histogram(data[0], bins=100, range=(a_max_n_boundary[0], a_max_n_boundary[1]))
desired_V_hist, desired_V_bins = np.histogram(data[1], bins=100, range=(desired_V_n_boundary[0], desired_V_n_boundary[1]))
a_comf_hist, a_comf_bins = np.histogram(data[2], bins=100, range=(a_comf_n_boundary[0], a_comf_n_boundary[1]))
S_jam_hist, S_jam_bins = np.histogram(data[3], bins=100, range=(S_jam_boundary[0], S_jam_boundary[1]))
desired_T_hist, desired_T_bins = np.histogram(data[4], bins=100, range=(desired_T_n_boundary[0], desired_T_n_boundary[1]))
idx = np.argmax(a_max_hist)
a_max = (a_max_bins[idx + 1] + a_max_bins[idx]) / 2
idx = np.argmax(desired_V_hist)
desired_V = (desired_V_bins[idx + 1] + desired_V_bins[idx]) / 2
idx = np.argmax(a_comf_hist)
a_comf = (a_comf_bins[idx + 1] + a_comf_bins[idx]) / 2
idx = np.argmax(S_jam_hist)
S_jam = (S_jam_bins[idx + 1] + S_jam_bins[idx]) / 2
idx = np.argmax(desired_T_hist)
desired_T = (desired_T_bins[idx + 1] + desired_T_bins[idx]) / 2
return a_max, desired_V, a_comf, S_jam, desired_T
def get_mean_value(data):
data = data.T
return np.mean(data[0]), np.mean(data[1]), np.mean(data[2]), np.mean(data[3]), np.mean(data[4])
def get_tv_params(next_v, v_id, all_cf_data):
print("-------------------------------------------------------------------------------------------------")
print(str(next_v) + 'th vehicle with id ' + str(v_id))
# [delta_v_l, space_hw_l, ego_v_l, a_l]
delta_V_n_t = np.array(all_cf_data[0])
S_n_t = np.array(all_cf_data[1])
V_n_t = np.array(all_cf_data[2])
a = np.array(all_cf_data[3])
t = np.array(all_cf_data[4])
pre_v = np.array(all_cf_data[5])
pre_tan_acc = np.array(all_cf_data[6])
pre_lat_acc = np.array(all_cf_data[7])
print(len(a), np.mean(np.abs(a)))
print(len(pre_v), np.mean(pre_v))
print(len(pre_tan_acc), np.mean(np.abs(pre_tan_acc)))
print(len(pre_lat_acc), np.mean(np.abs(pre_lat_acc)))
args = (a, delta_V_n_t, S_n_t, V_n_t)
cons = set_cons()
if os.path.exists('0714_dist_param/'+str(int(v_id))+'/using_all_data.txt'):
res_param = np.loadtxt('0714_dist_param/'+str(int(v_id))+'/using_all_data.txt')
else:
return False, False, False
while True:
try:
x0 = np.asarray(initialize())
res = minimize(obj_func(args), x0, constraints=cons, method='trust-constr')
if res.success:
break
except ValueError:
continue
rmse_using_all_data = res.fun
# f = open('0704_dist_param/res_using_all_data.txt', 'a+')
# f.write("v id " + str(int(v_id)) + " " + str(res.success) + " | RMSE: " + str(res.fun) + " | a_max: " + str(
# res.x[0]) + " | desired_V: " + str(res.x[1]) + " | a_comf: " + str(res.x[2]) + " | S_jam: " + str(res.x[3]) +
# " | desired_T: " + str(res.x[4]) + " | beta: " + str(res.x[5]) + "\n")
# f.close()
mkdir('0714_dist_param/'+str(int(v_id))+'/')
mkdir('0714_dist_param/' + str(int(v_id)) + '/posterior_figure/')
np.savetxt('0714_dist_param/'+str(int(v_id))+'/using_all_data.txt', np.array(res.x))
res_param = res.x
fix_a_max = res_param[0]
fix_desired_V = res_param[1]
fix_a_comf = res_param[2]
fix_S_jam = res_param[3]
fix_desired_T = res_param[4]
fix_beta = res_param[5]
data_array = np.array([delta_V_n_t, S_n_t, V_n_t, a, t]).T
data_array = data_array[data_array[:, -1].argsort()]
t = np.array(data_array[:, 4])
# data_array = data_array[:, 0:-1]
data = data_array.tolist()
sample_size = 10000
i = 0
fix_sum_err = 0
tv_sum_err = 0
# tv_params = []
tv_params_mean = []
for frame in data:
if i % 100 == 0:
print(str(next_v)+"th vehicle v_id "+str(v_id)+" frame "+str(i))
if os.path.exists('0714_dist_param/' + str(int(v_id)) + '/' + str(int(i)) + '_tv_params.txt'):
with open('0714_dist_param/' + str(int(v_id)) + '/' + str(int(i)) + '_tv_params.txt') as f:
accept_tv_params = np.array([line.strip().split() for line in f], float)
# accept_tv_params = np.loadtxt('0714_dist_param/' + str(int(v_id)) + '/' + str(int(i)) + '_tv_params.txt')
# tv_params.append(accept_tv_params)
a_max, desired_V, a_comf, S_jam, desired_T = get_mean_value(accept_tv_params)
tv_params_mean.append([a_max, desired_V, a_comf, S_jam, desired_T])
else:
if i == 0:
return False, False, False
print('0714_dist_param/' + str(int(v_id)) + '/' + str(int(i)) + '_tv_params.txt does not exist!!!')
i += 1
a_max, desired_V, a_comf, S_jam, desired_T = fix_a_max, fix_desired_V, fix_a_comf, fix_S_jam, fix_desired_T
tv_params_mean.append([a_max, desired_V, a_comf, S_jam, desired_T])
delta_V_n_t = frame[0]
S_n_t = frame[1]
V_n_t = frame[2]
a_n_t = frame[3]
fix_err = (a_n_t - IDM_cf_model_for_p(delta_V_n_t, S_n_t, V_n_t, fix_a_max, fix_desired_V, fix_a_comf, fix_S_jam,
fix_desired_T, 4)) ** 2
fix_sum_err += fix_err
# a_max, desired_V, a_comf, S_jam, desired_T = get_peak_value(accept_tv_params)
a_n_t_hat = IDM_cf_model_for_p(delta_V_n_t, S_n_t, V_n_t, a_max, desired_V, a_comf, S_jam, desired_T, 4)
tv_sum_err += (a_n_t_hat - a_n_t) ** 2
# if (a_n_t_hat - a_n_t) ** 2 > fix_err:
# distributions = fit_posterior(accept_tv_params)
# print("--------------------------------------------------------------------------------------------")
# print(next_v, "v_id", v_id, "frame", i, "tv_err", (a_n_t_hat - a_n_t) ** 2, "fix_err", fix_err)
# for param_name in ["a_max", "desired_V", "a_comf", "S_jam", "desired_T"]:
# for dist in distributions[param_name]:
# print(v_id, param_name, dist["distribution"], dist["fitted_param"], dist["sq_error"])
# print("--------------------------------------------------------------------------------------------")
i += 1
print("all data %d | RMSE: %.4f | a_max: %.4f | desired_V: %.4f | a_comf: %.4f | S_jam: %.4f | desired_T: %.4f | beta: %.3f" % \
(v_id, np.sqrt(fix_sum_err / len(a)), res_param[0], res_param[1], res_param[2], res_param[3], res_param[4], res_param[5]))
print(str(int(v_id)), "RMSE:", np.sqrt(fix_sum_err / len(a)), np.sqrt(tv_sum_err / len(a)))
# print(str(int(v_id)), "mean:", np.mean(np.abs(a-a_hat)), np.std(np.abs(a-a_hat)))
# tv_params = np.array(tv_params)
# print(str(int(v_id)), "tv params:", tv_params.shape)
np.savetxt('0714_dist_param/' + str(int(v_id)) + '/tv_params_mean.txt', np.array(tv_params_mean))
return np.array(tv_params_mean), np.sqrt(tv_sum_err / len(a))
def JS_divergence(p, q):
# lb = boundary[0]
# ub = boundary[1]
# interval = (ub - lb) / 3000
# x = np.arange(lb, ub, interval)
# p = eval("scipy.stats." + dist1["name"]).pdf(x, *dist1["param"])
# q = eval("scipy.stats." + dist2["name"]).pdf(x, *dist2["param"])
p = p.reshape(1000, 5)
q = q.reshape(1000, 5)
p = p.T
q = q.T
js = 0
for i in range(5):
M = (p[i] + q[i]) / 2
js += 0.5 * scipy.stats.entropy(p[i], M) + 0.5 * scipy.stats.entropy(q[i], M)
# print(js)
return js
def get_raw_features(next_v, v_id):
print("-------------------------------------------------------------------------------------------------")
print(str(next_v) + 'th vehicle with id ' + str(v_id))
if os.path.exists('0714_dist_param/' + str(int(v_id)) + '/tv_params_mean.txt'):
tv_params_mean = np.loadtxt('0714_dist_param/'+str(int(v_id))+'/tv_params_mean.txt')
else:
return
mean = np.mean(tv_params_mean, axis=0)
std = np.std(tv_params_mean, axis=0)
per25 = np.percentile(tv_params_mean, 0.25, axis=0)
per75 = np.percentile(tv_params_mean, 0.75, axis=0)
a_max_inc_diff_seq = []
a_max_dec_diff_seq = []
desired_V_inc_diff_seq = []
desired_V_dec_diff_seq = []
a_comf_inc_diff_seq = []
a_comf_dec_diff_seq = []
S_jam_inc_diff_seq = []
S_jam_dec_diff_seq = []
desired_T_inc_diff_seq = []
desired_T_dec_diff_seq = []
print(tv_params_mean.shape)
for i in range(len(tv_params_mean) - 1):
diff = tv_params_mean[i + 1] - tv_params_mean[i]
if diff[0] > 0:
a_max_inc_diff_seq.append(diff[0])
elif diff[0] < 0:
a_max_dec_diff_seq.append(diff[0])
if diff[1] > 0:
desired_V_inc_diff_seq.append(diff[1])
elif diff[1] < 0:
desired_V_dec_diff_seq.append(diff[1])
if diff[2] > 0:
a_comf_inc_diff_seq.append(diff[2])
elif diff[2] < 0:
a_comf_dec_diff_seq.append(diff[2])
if diff[3] > 0:
S_jam_inc_diff_seq.append(diff[3])
elif diff[3] < 0:
S_jam_dec_diff_seq.append(diff[3])
if diff[4] > 0:
desired_T_inc_diff_seq.append(diff[4])
elif diff[4] < 0:
desired_T_dec_diff_seq.append(diff[4])
diff_seq = [np.mean(a_max_inc_diff_seq), np.std(a_max_inc_diff_seq), np.mean(a_max_dec_diff_seq), np.std(a_max_dec_diff_seq),
np.mean(desired_V_inc_diff_seq), np.std(desired_V_inc_diff_seq), np.mean(desired_V_dec_diff_seq), np.std(desired_V_dec_diff_seq),
np.mean(a_comf_inc_diff_seq), np.std(a_comf_inc_diff_seq), np.mean(a_comf_dec_diff_seq), np.std(a_comf_dec_diff_seq),
np.mean(S_jam_inc_diff_seq), np.std(S_jam_inc_diff_seq), np.mean(S_jam_dec_diff_seq), np.std(S_jam_dec_diff_seq),
np.mean(desired_T_inc_diff_seq), np.std(desired_T_inc_diff_seq), np.mean(desired_T_dec_diff_seq), np.std(desired_T_dec_diff_seq)]
return mean, std, per25, per75, diff_seq
def scale_features(mean_l, std_l, per25_l, per75_l, diff_seq_l):
scaled_mean_l = minmax_scale(mean_l, axis=0)
scaled_std_l = minmax_scale(std_l, axis=0)
scaled_per25_l = minmax_scale(per25_l, axis=0)
scaled_per75_l = minmax_scale(per75_l, axis=0)
scaled_diff_seq_l = minmax_scale(diff_seq_l, axis=0)
return scaled_mean_l, scaled_std_l, scaled_per25_l, scaled_per75_l, scaled_diff_seq_l
def cal_agg_index(mean, per25, per75, diff_seq):
# a_max, desired_v, a_comf, S_jam, deisred_T
relation_op = [1, 1, 1, -1, -1]
res1 = 0
res2 = 0
res3 = 0
for i in range(5):
res1 += relation_op[i] * (mean[i] + per25[i] + per75[i])
res2 += relation_op[i] * (diff_seq[i * 4] + diff_seq[i * 4 + 1])
res3 += relation_op[i] * (diff_seq[i * 4 + 2] + diff_seq[i * 4 + 3])
return [res1, res2, res3]
def cal_agg_matrix(mean, std, per25, per75, diff_seq):
# a_max, desired_v, a_comf, S_jam, deisred_T
relation_op = [1, 1, 1, -1, -1]
agg_matrix = np.zeros((2, 5))
for i in range(5):
agg_matrix[0][i] = mean[i]
agg_matrix[1][i] = std[i]
# agg_matrix[2][i] = per75[i]
# agg_matrix[3][i] = diff_seq[i * 4]
# agg_matrix[4][i] = diff_seq[i * 4 + 1]
# agg_matrix[5][i] = diff_seq[i * 4 + 2]
# agg_matrix[6][i] = diff_seq[i * 4 + 3]
return agg_matrix
def analyze_tv_params(v_id, tv_params, tv_params_mean):
# plot tv_params_mean
mkdir("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/")
tv_params_mean = tv_params_mean.T
# plt.figure()
# plt.plot(range(len(tv_params_mean[0])), tv_params_mean[0], color="red")
# plt.savefig("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/a_max.png")
# plt.figure()
# plt.plot(range(len(tv_params_mean[0])), tv_params_mean[1]/4, color="blue")
# plt.savefig("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/desired_V.png")
# plt.figure()
# plt.plot(range(len(tv_params_mean[0])), tv_params_mean[2], color="green")
# plt.savefig("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/a_comf.png")
# plt.figure()
# plt.plot(range(len(tv_params_mean[0])), tv_params_mean[3], color="orange")
# plt.savefig("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/S_jam.png")
# plt.figure()
# plt.plot(range(len(tv_params_mean[0])), tv_params_mean[4], color="pink")
# plt.savefig("0714_dist_param/" + str(int(v_id)) + "/tv_params_figure/all.png")
# calculate cov
cov = []
corrcoef = []
for t in range(len(tv_params)):
params = tv_params[t].T
cov.append(np.cov(params))
this_corrcoef = np.corrcoef(params)
for i in range(5):
this_corrcoef[i][i] = 0
corrcoef.append(this_corrcoef)
print( | np.array(cov) | numpy.array |
"""
Test DOE Driver and Generators.
"""
import unittest
import os
import os.path
import glob
import csv
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_distributed import DistParab
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver, printoptions
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class ParaboloidArray(om.ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.
Where x and y are xy[0] and xy[1] respectively.
"""
def setup(self):
self.add_input('xy', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = inputs['xy'][0]
y = inputs['xy'][1]
outputs['f_xy'] = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
class ParaboloidDiscrete(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=10, tags='xx')
self.add_discrete_input('y', val=0, tags='yy')
self.add_discrete_output('f_xy', val=0, tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = int(f_xy)
class ParaboloidDiscreteArray(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=np.ones((2, )), tags='xx')
self.add_discrete_input('y', val=np.ones((2, )), tags='yy')
self.add_discrete_output('f_xy', val=np.ones((2, )), tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = f_xy.astype(np.int)
class TestErrors(unittest.TestCase):
def test_generator_check(self):
prob = om.Problem()
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.FullFactorialGenerator)
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: FullFactorialGenerator")
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.Problem())
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but an instance of Problem was found.")
def test_lhc_criterion(self):
with self.assertRaises(ValueError) as err:
om.LatinHypercubeGenerator(criterion='foo')
self.assertEqual(str(err.exception),
"Invalid criterion 'foo' specified for LatinHypercubeGenerator. "
"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', "
"'cm', 'correlation', 'corr', None].")
@use_tempdirs
class TestDOEDriver(unittest.TestCase):
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_no_generator(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 0)
def test_list(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(cases)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_list_errors(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# data does not contain a list
cases = {'desvar': 1.0}
with self.assertRaises(RuntimeError) as err:
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
self.assertEqual(str(err.exception), "Invalid DOE case data, "
"expected a list but got a dict.")
# data contains a list of non-list
cases = [{'desvar': 1.0}]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n{'desvar': 1.0}")
# data contains a list of list, but one has the wrong length
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.y', 1., 'foo']]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n"
"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]")
# data contains a list of list, but one case has an invalid design var
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"'p2.z' is not a valid design variable:\n"
"[['p1.x', 1.0], ['p2.z', 1.0]]")
# data contains a list of list, but one case has multiple invalid design vars
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.y', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"['p1.y', 'p2.z'] are not valid design variables:\n"
"[['p1.y', 1.0], ['p2.z', 1.0]]")
def test_csv(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for (var, val) in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_csv_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))
model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))
model.add_subsystem('comp1', Paraboloid())
model.add_subsystem('comp2', Paraboloid())
model.connect('p1.x', 'comp1.x', src_indices=[0])
model.connect('p2.y', 'comp1.y', src_indices=[0])
model.connect('p1.x', 'comp2.x', src_indices=[1])
model.connect('p2.y', 'comp2.y', src_indices=[1])
model.add_design_var('p1.x', lower=0.0, upper=1.0)
model.add_design_var('p2.y', lower=0.0, upper=1.0)
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = [
{'p1.x': np.array([0., 0.]), 'p2.y': | np.array([0., 0.]) | numpy.array |
"""Implements the forward mode of automatic differentiation
This module implements the forward mode of automatic differentiation. It does this by
overloading the many math dunder methods that are provided in Python such as __add__,
__sub__, __mul__, etc. In addition to these primitive functions, this module also
defines the elementary functions such as the trigonometric functions, hyperbolic
functions, logarithms, exponentials, and logistic function.
To create a new function of a variable, you can instantiate a variable with an
initial value using the constructor Forward. For example, Forward('x', 1) creates a
variable named 'x' with initial value 1. If we use this value in mathematical
operations, then we will create more Forward objects that we can access.
To get the results of computation, we can access the actual value by doing .value on
the Forward object at hand. To get the gradient with respect to a certain variable,
we can call .get_gradient(variable_name) on the Forward object.
"""
import numpy as np
np.seterr(all="ignore")
def _coerce(arg):
"""Private function which does the actual coercion of a type into a
Forward object.
"""
if isinstance(arg, Forward):
return arg
# we support complex numbers too!
if isinstance(arg, (float, int, complex)):
return Forward(arg)
# otherwise raise ValueError cause we don't support
raise ValueError(type(arg))
def coerce(fun):
"""Decorates a function and coerces each of the inputs of the function into a
Forward object.
Many of our functions would like to operate on Forward objects instead of raw
values. For example, __add__ might get an integer in the case of Forward('x', 5)
+ 2, but we want the 2 to be wrapped in a Forward object. This decorator
automates that process and cleans up the code.
"""
def ret_f(*args):
new_args = [_coerce(arg) for arg in args]
return fun(*new_args)
return ret_f
class Forward:
"""The primary class of the forward mode of automatic differentiation.
The Forward class can be used to instantiate the forward mode of automatic
differentiation. By overloading the many dunder methods of Python, this class
enables the user to seamlessly define the forward computation of a function while
simultaneously deriving the gradient.
The result of the computation can be accessed via the .value attribute of the
object. The gradient can be accessed by the .get_gradient(variable_name) method
which returns the gradient with respect to a particular variable.
The object can be instantiated with one or two arguments. If one argument is
provided, it must be a numeric type. This represents a constant within automatic
differentiation. If two arguments are provided, the first must be a string, which
represents the variable name, and the second must be a numeric type, which
represents the value of that variable.
"""
def __init__(self, *args):
if len(args) == 1:
value = args[0]
if not isinstance(value, (int, float, complex)):
raise ValueError
self.derivatives = {}
self.value = value
elif len(args) == 2:
var_name, value = args
if not isinstance(var_name, str):
raise ValueError
if not isinstance(value, (int, float, complex)):
raise ValueError
# initialize the variable to have derivative 1
self.derivatives = {var_name: 1}
self.value = value
else:
raise ValueError("Incorrect number of args")
def get_gradient(self, var_name):
"""Gets the gradient with respect to a particular variable.
Accesses the .derivatives dictionary of the Forward object which stores the
results of the computations that were done by the duner methods during the
computation of the result and gradient. If the variable name is not within
the dictionary, this implies that the expression was constant and the
derivative should be zero. If the stored value is nan, this means there was
some error during the computation or the gradient does not exist at that
point.
"""
grad = self.derivatives.get(var_name, 0)
# check to see if the computatino was nan, indicating that the gradient
# most likely does not exist
if | np.isnan(grad) | numpy.isnan |
import numpy as np
import sys
import math
from itertools import combinations
class Sphere:
def __init__(self, name, mass, radius, xpos, ypos, zpos, xvel, yvel, zvel):
self.name = name
self.radius = radius
self.mass = mass
self.pos = | np.array((xpos, ypos, zpos)) | numpy.array |
"""
This is a self-standing module that calculates limb darkening parameters for either 1D or 3D stellar models. It
returns the parameters for 4-parameter, 3-parameter, quadratic and linear limb darkening models.
"""
import os
import numpy as np
import pandas as pd
from scipy.io import readsav
from scipy.interpolate import interp1d, splev, splrep
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter
def limb_dark_fit(mode, wsdata, M_H, Teff, logg, dirsen, ld_model='1D', custom_wave=None, custom_sen=None):
"""
Calculates stellar limb-darkening coefficients for a given wavelength bin.
Modes Currently Supported:
Spectroscopic:
HST STIS G750L, G750M, G430L gratings
HST WFC3 UVIS/G280+1, UVIS/G280-1, IR/G102, IR/G141 grisms
JWST NIRSpec Prism, G395H, G395M, G235H, G235M, G140H-f100, G140M-f100, G140H-f070, G140M-f070
JWST NIRISS SOSSo1, SOSSo2
JWST NIRCam F322W2, F444
JWST MIRI LRS
Photometric:
TESS
Spitzer IRAC Ch1 (3.6 microns), Ch2 (4.5 microns)
Custom throughput model
Procedure from Sing et al. (2010, A&A, 510, A21).
Uses 3D limb darkening from Magic et al. (2015, A&A, 573, 90).
Uses photon FLUX Sum over (lambda*dlamba).
:param mode: string; mode to use Spectroscopic: ('STIS_G430L','STIS_G750L', 'WFC3_G280p1', 'WFC3_G280n1', 'WFC3_G102', 'WFC3_G141', 'NIRSpec_Prism', 'NIRSpec_G395H', 'NIRSpec_G395M', 'NIRSpec_G235H', 'NIRSpec_G235M', 'NIRSpec_G140Hf100', 'NIRSpec_G140Mf100', 'NIRSpec_G140Hf070', 'NIRSpec_G140Mf070', 'NIRISS_SOSSo1', 'NIRISS_SOSSo2', 'NIRCam_F322W2', 'NIRCam_F444', 'MIRI_LRS'), Photometric: ('IRAC_Ch1', 'IRAC_Ch2', 'TESS'), Custom: ('Custom')
:param wsdata: array; data wavelength solution for range required
:param M_H: float; stellar metallicity
:param Teff: float; stellar effective temperature (K)
:param logg: float; stellar gravity
:param dirsen: string; path to main limb darkening directory downloaded from Zenodo V2.1
:param ld_model: string; '1D' or '3D', makes choice between limb darkening models; default is 1D
;optional param custom_wave: array; wavelength array for custom throughput profile
;optional param custom_sen: array; throughput for custom instrument profile (values between 0 and 1)
:return: uLD: float; linear limb darkening coefficient
aLD, bLD: float; quadratic limb darkening coefficients
cp1, cp2, cp3, cp4: float; three-parameter limb darkening coefficients
c1, c2, c3, c4: float; non-linear limb-darkening coefficients
"""
print('You are using the', str(ld_model), 'limb darkening models.')
if ld_model == '1D':
direc = os.path.join(dirsen, 'Kurucz')
print('Current Directories Entered:')
print(' ' + dirsen)
print(' ' + direc)
# Select metallicity
M_H_Grid = np.array([-0.1, -0.2, -0.3, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0, 0.0, 0.1, 0.2, 0.3, 0.5, 1.0])
M_H_Grid_load = np.array([0, 1, 2, 3, 5, 7, 8, 9, 10, 11, 12, 13, 14, 17, 20, 21, 22, 23, 24])
optM = (abs(M_H - M_H_Grid)).argmin()
MH_ind = M_H_Grid_load[optM]
# Determine which model is to be used, by using the input metallicity M_H to figure out the file name we need
file_list = 'kuruczlist.sav'
sav1 = readsav(os.path.join(direc, file_list))
model = bytes.decode(sav1['li'][MH_ind]) # Convert object of type "byte" to "string"
# Select Teff and subsequently logg
Teff_Grid = np.array([3500, 3750, 4000, 4250, 4500, 4750, 5000, 5250, 5500, 5750, 6000, 6250, 6500])
optT = (abs(Teff - Teff_Grid)).argmin()
logg_Grid = np.array([4.0, 4.5, 5.0])
optG = (abs(logg - logg_Grid)).argmin()
if logg_Grid[optG] == 4.0:
Teff_Grid_load = | np.array([8, 19, 30, 41, 52, 63, 74, 85, 96, 107, 118, 129, 138]) | numpy.array |
#!/usr/bin/env python
"""
This is an example script showing how to do parallel regridding with ESMPy. ESMPy abstracts some
of the parallel components from the user so that very few calls to mpi4py methods are necessary.
"""
import ESMF
import matplotlib.pyplot as plt
from mpi4py import MPI
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2017"
__email__ = "<EMAIL>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__version__ = "1.0.1"
############################################# CONFIG #############################################
# This is the variable that will be interpolated. A few others are possible.
IVAR = 'dpc'
# This will toggle the use of the `Gatherv` method. The `Gatherv` method can
# send unequal chunks of numpy arrays to other MPI processes. This will
# **generally** be faster than the `gather` method that sends python objects.
# From my limited experience, `Gatherv` is faster for larger grids, but not grids
# that are as small as being used in this example. As always, test for yourself
# on your system to determine the best choice for your particular use case.
GATHERV = True
# Toggle some informative print statements
VERBOSE = True
# Toggle plot, saves otherwise
PLOT = True
# Below, you will see arrays that are transposed. This is done to get the arrays into Fortran
# contiguous memory order. ESMF subroutines that are called are written in Fortran and passing
# arrays with their native memory order will help improve efficiency. Unfortunately, these
# changes can make the logic of the script less intuitive. The efficiency gain will mostly affect
# larger grid sizes. The good news is that you can remove the the transpose (`.T`) calls from the
# arrays and go back to the original, straightforward code without a problem.
FORTRAN_CONTIGUOUS = True
##################################################################################################
def get_processor_bounds(target, staggerloc):
"""
:param target: The grid object from which to extract local bounds.
:type target: :class:`ESMF.Grid`
:return: A tuple of integer bounds. See ``return`` statement.
:rtype: tuple
"""
# The lower_bounds and upper_bounds properties give us global indices of the processor local
# bounds. The assumed dimension order is Z, Y, X (based on the data being used in this example)
x_lower_bound = target.lower_bounds[staggerloc][1]
x_upper_bound = target.upper_bounds[staggerloc][1]
y_lower_bound = target.lower_bounds[staggerloc][0]
y_upper_bound = target.upper_bounds[staggerloc][0]
return x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound
# Turn on the debugger. An output file for each process will be produced
ESMF.Manager(debug=True)
# Set up MPI communicator and get environment information
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
if rank == 0 and VERBOSE:
print('Loading data...')
#RUC Grid (this will be the source grid)
with | np.load('ruc2_130_20120414_1200_006.npz') | numpy.load |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace( | np.arcsinh(minm) | numpy.arcsinh |
import numba
import numpy as np
#######################
# HELPFUL TOOLS BELOW #
#######################
@numba.njit(cache=True, fastmath=True) # Speeding up by a lot!
def unpack_alms(maps, lmax):
#print("Unpacking alms")
mmax = lmax
nmaps = len(maps)
# Nalms is length of target alms
Nalms = int(mmax * (2 * lmax + 1 - mmax) / 2 + lmax + 1)
alms = np.zeros((nmaps, Nalms), dtype=np.complex128)
# Unpack alms as output by commander
for sig in range(nmaps):
i = 0
for l in range(lmax+1):
j_real = l ** 2 + l
alms[sig, i] = complex(maps[sig, j_real], 0.0)
i += 1
for m in range(1, lmax + 1):
for l in range(m, lmax + 1):
j_real = l ** 2 + l + m
j_comp = l ** 2 + l - m
alms[sig, i] = complex(maps[sig, j_real], maps[sig, j_comp],) / np.sqrt(2.0)
i += 1
return alms
def alm2fits_tool(input, dataset, nside, lmax, fwhm, save=True,):
"""
Function for converting alms in hdf file to fits
"""
import h5py
import healpy as hp
try:
sample = int(dataset.split("/")[0])
print(f"Using sample {sample}")
except:
print(f"No sample specified, fetching last smample")
with h5py.File(input, "r") as f:
sample = str(len(f.keys()) - 2).zfill(6)
dataset=sample+"/"+dataset
print(f"Sample {sample} found, dataset now {dataset}")
with h5py.File(input, "r") as f:
alms = f[dataset][()]
lmax_h5 = f[f"{dataset[:-3]}lmax"][()] # Get lmax from h5
if lmax:
# Check if chosen lmax is compatible with data
if lmax > lmax_h5:
print(
"lmax larger than data allows: ", lmax_h5,
)
print("Please chose a value smaller than this")
else:
# Set lmax to default value
lmax = lmax_h5
mmax = lmax
alms_unpacked = unpack_alms(alms, lmax) # Unpack alms
# If not amp map, set spin 0.
if "amp_alm" in dataset:
pol = True
if np.shape(alms_unpacked)[0]==1:
pol = False
else:
pol = False
print(f"Making map from alms, setting lmax={lmax}, pol={pol}")
maps = hp.sphtfunc.alm2map(alms_unpacked, int(nside), lmax=int(lmax), mmax=int(mmax), fwhm=arcmin2rad(fwhm), pol=pol, pixwin=True,)
outfile = dataset.replace("/", "_")
outfile = outfile.replace("_alm", "")
if save:
outfile += f"_{str(int(fwhm))}arcmin" if fwhm > 0.0 else ""
hp.write_map(outfile + f"_n{str(nside)}_lmax{lmax}.fits", maps, overwrite=True, dtype=None)
return maps, nside, lmax, fwhm, outfile
def h5handler(input, dataset, min, max, maxchain, output, fwhm, nside, command, pixweight=None, zerospin=False, lowmem=False, notchain=False):
"""
Function for calculating mean and stddev of signals in hdf file
"""
# Check if you want to output a map
import h5py
import healpy as hp
from tqdm import tqdm
if (lowmem and command == np.std): #need to compute mean first
mean_data = h5handler(input, dataset, min, max, maxchain, output, fwhm, nside, np.mean, pixweight, zerospin, lowmem,)
print()
if command: print("{:-^50}".format(f" {dataset} calculating {command.__name__} "))
print("{:-^50}".format(f" nside {nside}, {fwhm} arcmin smoothing "))
if dataset.endswith("map"):
type = "map"
elif dataset.endswith("rms"):
type = "map"
elif dataset.endswith("alm"):
type = "alm"
elif dataset.endswith("sigma"):
type = "sigma"
else:
type = "data"
if (lowmem):
nsamp = 0 #track number of samples
first_samp = True #flag for first sample
else:
dats = []
use_pixweights = False if pixweight == None else True
maxnone = True if max == None else False # set length of keys for maxchains>1
pol = True if zerospin == False else False # treat maps as TQU maps (polarization)
for c in range(1, maxchain + 1):
filename = input.replace("c0001", "c" + str(c).zfill(4))
with h5py.File(filename, "r") as f:
if notchain:
data = f[dataset][()]
if data.shape[0] == 1:
# Make sure its interprated as I by healpy
# For non-polarization data, (1,npix) is not accepted by healpy
data = data.ravel()
dats.append(data)
continue
if maxnone:
# If no max is specified, chose last sample
max = len(f.keys()) - 2
print("{:-^48}".format(f" Samples {min} to {max} in {filename}"))
for sample in tqdm(range(min, max + 1), ncols=80):
# Identify dataset
# alm, map or (sigma_l, which is recognized as l)
# Unless output is ".fits" or "map", don't convert alms to map.
alm2map = True if output.endswith((".fits", "map")) else False
# HDF dataset path formatting
s = str(sample).zfill(6)
# Sets tag with type
tag = f"{s}/{dataset}"
#print(f"Reading c{str(c).zfill(4)} {tag}")
# Check if map is available, if not, use alms.
# If alms is already chosen, no problem
try:
data = f[tag][()]
if len(data[0]) == 0:
tag = f"{tag[:-3]}map"
print(f"WARNING! No {type} data found, switching to map.")
data = f[tag][()]
type = "map"
except:
print(f"Found no dataset called {dataset}")
print(f"Trying alms instead {tag}")
try:
# Use alms instead (This takes longer and is not preferred)
tag = f"{tag[:-3]}alm"
type = "alm"
data = f[tag][()]
except:
print("Dataset not found.")
# If data is alm, unpack.
if type == "alm":
lmax_h5 = f[f"{tag[:-3]}lmax"][()]
data = unpack_alms(data, lmax_h5) # Unpack alms
if data.shape[0] == 1:
# Make sure its interprated as I by healpy
# For non-polarization data, (1,npix) is not accepted by healpy
data = data.ravel()
# If data is alm and calculating std. Bin to map and smooth first.
if type == "alm" and command == np.std and alm2map:
#print(f"#{sample} --- alm2map with {fwhm} arcmin, lmax {lmax_h5} ---")
data = hp.alm2map(data, nside=nside, lmax=lmax_h5, fwhm=arcmin2rad(fwhm), pixwin=True,verbose=False,pol=pol,)
# If data is map, smooth first.
elif type == "map" and fwhm > 0.0 and command == np.std:
#print(f"#{sample} --- Smoothing map ---")
if use_pixweights:
data = hp.sphtfunc.smoothing(data, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_pixel_weights=True,datapath=pixweight)
else: #use ring weights
data = hp.sphtfunc.smoothing(data, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_weights=True)
if (lowmem):
if (first_samp):
first_samp=False
if (command==np.mean):
dats=data.copy()
elif (command==np.std):
dats=(mean_data - data)**2
else:
print(' Unknown command {command}. Exiting')
exit()
else:
if (command==np.mean):
dats=dats+data
elif (command==np.std):
dats=dats+(mean_data - data)**2
nsamp+=1
else:
# Append sample to list
dats.append(data)
if (lowmem):
if (command == np.mean):
outdata = dats/nsamp
elif (command == np.std):
outdata = np.sqrt(dats/nsamp)
else:
# Convert list to array
dats = np.array(dats)
# Calculate std or mean
print(dats.shape)
outdata = command(dats, axis=0) if command else dats
# Smoothing afterwards when calculating mean
if type == "alm" and command == np.mean and alm2map:
print(f"# --- alm2map mean with {fwhm} arcmin, lmax {lmax_h5} ---")
outdata = hp.alm2map(
outdata, nside=nside, lmax=lmax_h5, fwhm=arcmin2rad(fwhm), pixwin=True, pol=pol
)
if type == "map" and fwhm > 0.0 and command == np.mean:
print(f"--- Smoothing mean map with {fwhm} arcmin,---")
if use_pixweights:
outdata = hp.sphtfunc.smoothing(outdata, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_pixel_weights=True,datapath=pixweight)
else: #use ring weights
outdata = hp.sphtfunc.smoothing(outdata, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_weights=True)
# Outputs fits map if output name is .fits
if output.endswith(".fits"):
hp.write_map(output, outdata, overwrite=True, dtype=None)
elif output.endswith(".dat"):
while np.ndim(outdata)>2:
if outdata.shape[-1]==4:
tdata = outdata[:,0,0]
print(tdata)
outdata = outdata[:,:,3]
outdata[:,0] = tdata
else:
outdata = outdata[:,:,0]
np.savetxt(output, outdata)
return outdata
def arcmin2rad(arcmin):
return arcmin * (2 * np.pi) / 21600
def legend_positions(df, y, scaling):
"""
Calculate position of labels to the right in plot...
"""
positions = {}
for column in y:
positions[column] = df[column].values[-1] - 0.005
def push(dpush):
"""
...by puting them to the last y value and
pushing until no overlap
"""
collisions = 0
for column1, value1 in positions.items():
for column2, value2 in positions.items():
if column1 != column2:
dist = abs(value1-value2)
if dist < scaling:# 0.075: #0.075: #0.023:
collisions += 1
if value1 < value2:
positions[column1] -= dpush
positions[column2] += dpush
else:
positions[column1] += dpush
positions[column2] -= dpush
return True
dpush = .001
pushings = 0
while True:
if pushings == 1000:
dpush*=10
pushings = 0
pushed = push(dpush)
if not pushed:
break
pushings+=1
return positions
def cmb(nu, A):
"""
CMB blackbody spectrum
"""
h = 6.62607e-34 # Planck's konstant
k_b = 1.38065e-23 # Boltzmanns konstant
Tcmb = 2.7255 # K CMB Temperature
x = h*nu/(k_b*Tcmb)
g = (np.exp(x)-1)**2/(x**2*np.exp(x))
s_cmb = A/g
return s_cmb
def sync(nu, As, alpha, nuref=0.408):
"""
Synchrotron spectrum using template
"""
print("nuref", nuref)
#alpha = 1., As = 30 K (30*1e6 muK)
nu_0 = nuref*1e9 # 408 MHz
from pathlib import Path
synch_template = Path(__file__).parent / "Synchrotron_template_GHz_extended.txt"
fnu, f = np.loadtxt(synch_template, unpack=True)
f = np.interp(nu, fnu*1e9, f)
f0 = np.interp(nu_0, nu, f) # Value of s at nu_0
s_s = As*(nu_0/nu)**2*f/f0
return s_s
def ffEM(nu,EM,Te):
"""
Freefree spectrum using emission measure
"""
#EM = 1 cm-3pc, Te= 500 #K
T4 = Te*1e-4
nu9 = nu/1e9 #Hz
g_ff = np.log(np.exp(5.960-np.sqrt(3)/np.pi*np.log(nu9*T4**(-3./2.)))+np.e)
tau = 0.05468*Te**(-3./2.)*nu9**(-2)*EM*g_ff
s_ff = 1e6*Te*(1-np.exp(-tau))
return s_ff
def ff(nu,A,Te, nuref=40.):
"""
Freefree spectrum
"""
h = 6.62607e-34 # Planck's konstant
k_b = 1.38065e-23 # Boltzmanns konstant
nu_ref = nuref*1e9
S = np.log(np.exp(5.960 - np.sqrt(3.0)/np.pi * np.log( nu/1e9*(Te/1e4)**-1.5))+2.71828)
S_ref = np.log(np.exp(5.960 - np.sqrt(3.0)/np.pi * np.log(nu_ref/1e9*(Te/1e4)**-1.5))+2.71828)
s_ff = A*S/S_ref*np.exp(-h*(nu-nu_ref)/k_b/Te)*(nu/nu_ref)**-2
return s_ff
def sdust(nu, Asd, nu_p, polfrac, fnu = None, f_ = None, nuref=22.,):
"""
Spinning dust spectrum using spdust2
"""
nuref = nuref*1e9
scale = 30./nu_p
try:
f = np.interp(scale*nu, fnu, f_)
f0 = np.interp(scale*nuref, fnu, f_) # Value of s at nu_0
except:
from pathlib import Path
ame_template = Path(__file__).parent / "spdust2_cnm.dat"
fnu, f_ = np.loadtxt(ame_template, unpack=True)
fnu *= 1e9
f = np.interp(scale*nu, fnu, f_)
f0 = np.interp(scale*nuref, fnu, f_) # Value of s at nu_0
s_sd = polfrac*Asd*(nuref/nu)**2*f/f0
return s_sd
def tdust(nu,Ad,betad,Td,nuref=545.):
"""
Thermal dust modified blackbody spectrum.
"""
h = 6.62607e-34 # Planck's konstant
k_b = 1.38065e-23 # Boltzmanns konstant
nu0=nuref*1e9
gamma = h/(k_b*Td)
s_d=Ad*(nu/nu0)**(betad+1)*(np.exp(gamma*nu0)-1)/(np.exp(gamma*nu)-1)
return s_d
def lf(nu,Alf,betalf,nuref=30e9):
"""
low frequency component spectrum (power law)
"""
return Alf*(nu/nuref)**(betalf)
def line(nu, A, freq, conversion=1.0):
"""
Line emission spectrum
"""
if isinstance(nu, np.ndarray):
return np.where(np.isclose(nu, 1e9*freq), A*conversion, 0.0)
else:
if np.isclose(nu, 1e9*freq[0]):
return A*conversion
else:
return 0.0
def rspectrum(nu, r, sig, scaling=1.0):
"""
Calculates the CMB amplituded given a value of r and requested modes
"""
import camb
from camb import model, initialpower
import healpy as hp
#Set up a new set of parameters for CAMB
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(As=2e-9, ns=0.965, r=r)
lmax=6000
pars.set_for_lmax(lmax, lens_potential_accuracy=0)
pars.WantTensors = True
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(params=pars, lmax=lmax, CMB_unit='muK', raw_cl=True,)
l = np.arange(2,lmax+1)
if sig == "TT":
cl = powers['unlensed_scalar']
signal = 0
elif sig == "EE":
cl = powers['unlensed_scalar']
signal = 1
elif sig == "BB":
cl = powers['tensor']
signal = 2
bl = hp.gauss_beam(40/(180/np.pi*60), lmax,pol=True)
A = np.sqrt(sum( 4*np.pi * cl[2:,signal]*bl[2:,signal]**2/(2*l+1) ))
return cmb(nu, A*scaling)
def fits_handler(input, min, max, minchain, maxchain, chdir, output, fwhm, nside, zerospin, drop_missing, pixweight, command, lowmem=False, fields=None, write=False):
"""
Function for handling fits files.
"""
# Check if you want to output a map
import healpy as hp
from tqdm import tqdm
import os
if (not input.endswith(".fits")):
print("Input file must be a '.fits'-file")
exit()
if (lowmem and command == np.std): #need to compute mean first
mean_data = fits_handler(input, min, max, minchain, maxchain, chdir, output, fwhm, nside, zerospin, drop_missing, pixweight, lowmem, np.mean, fields, write=False)
if (minchain > maxchain):
print('Minimum chain number larger that maximum chain number. Exiting')
exit()
aline=input.split('/')
dataset=aline[-1]
print()
if command: print("{:-^50}".format(f" {dataset} calculating {command.__name__} "))
if (nside == None):
print("{:-^50}".format(f" {fwhm} arcmin smoothing "))
else:
print("{:-^50}".format(f" nside {nside}, {fwhm} arcmin smoothing "))
type = 'map'
if (not lowmem):
dats = []
nsamp = 0 #track number of samples
first_samp = True #flag for first sample
use_pixweights = False if pixweight == None else True
maxnone = True if max == None else False # set length of keys for maxchains>1
pol = True if zerospin == False else False # treat maps as TQU maps (polarization)
for c in range(minchain, maxchain + 1):
if (chdir==None):
filename = input.replace("c0001", "c" + str(c).zfill(4))
else:
filename = chdir+'_c%i/'%(c)+input
basefile = filename.split("k000001")
if maxnone:
# If no max is specified, find last sample of chain
# Assume residual file of convention res_label_c0001_k000234.fits,
# i.e. final numbers of file are sample number
max_found = False
siter=min
while (not max_found):
filename = basefile[0]+'k'+str(siter).zfill(6)+basefile[1]
if (os.path.isfile(filename)):
siter += 1
else:
max_found = True
max = siter - 1
else:
if (first_samp):
for chiter in range(minchain,maxchain + 1):
if (chdir==None):
tempname = input.replace("c0001", "c" + str(c).zfill(4))
else:
tempname = chdir+'_c%i/'%(c)+input
temp = tempname.split("k000001")
for siter in range(min,max+1):
tempf = temp[0]+'k'+str(siter).zfill(6)+temp[1]
if (not os.path.isfile(tempf)):
print('chain %i, sample %i missing'%(c,siter))
print(tempf)
if (not drop_missing):
exit()
print("{:-^48}".format(f" Samples {min} to {max} in {filename}"))
for sample in tqdm(range(min, max + 1), ncols=80):
# dataset sample formatting
filename = basefile[0]+'k'+str(sample).zfill(6)+basefile[1]
if (first_samp):
# Check which fields the input maps have
if (not os.path.isfile(filename)):
if (not drop_missing):
exit()
else:
continue
_, header = hp.fitsfunc.read_map(filename, verbose=False, h=True, dtype=None)
if fields!=None:
nfields = 0
for par in header:
if (par[0] == 'TFIELDS'):
nfields = par[1]
break
if (nfields == 0):
print('No fields/maps in input file')
exit()
elif (nfields == 1):
fields=(0)
elif (nfields == 2):
fields=(0,1)
elif (nfields == 3):
fields=(0,1,2)
#print(' Reading fields ',fields)
nest = False
for par in header:
if (par[0] == 'ORDERING'):
if (not par[1] == 'RING'):
nest = True
break
nest = False
for par in header:
if (par[0] == 'NSIDE'):
nside_map = par[1]
break
if (not nside == None):
if (nside > nside_map):
print(' Specified nside larger than that of the input maps')
print(' Not up-grading the maps')
print('')
if (not os.path.isfile(filename)):
if (not drop_missing):
exit()
else:
continue
data = hp.fitsfunc.read_map(filename,field=fields,verbose=False,h=False, nest=nest, dtype=None)
if (nest): #need to reorder to ring-ordering
data = hp.pixelfunc.reorder(data,n2r=True)
# degrading if relevant
if (not nside == None):
if (nside < nside_map):
data=hp.pixelfunc.ud_grade(data,nside) #ordering=ring by default
if data.shape[0] == 1:
# Make sure its interprated as I by healpy
# For non-polarization data, (1,npix) is not accepted by healpy
data = data.ravel()
# If smoothing applied and calculating stddev, smooth first.
if fwhm > 0.0 and command == np.std:
#print(f"#{sample} --- Smoothing map ---")
if use_pixweights:
data = hp.sphtfunc.smoothing(data, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_pixel_weights=True,datapath=pixweight)
else: #use ring weights
data = hp.sphtfunc.smoothing(data, fwhm=arcmin2rad(fwhm),verbose=False,pol=pol,use_weights=True)
if (lowmem):
if (first_samp):
if (command==np.mean):
dats=data.copy()
elif (command==np.std):
dats=(mean_data - data)**2
else:
print(' Unknown command {command}. Exiting')
exit()
else:
if (command==np.mean):
dats=dats+data
elif (command==np.std):
dats=dats+(mean_data - data)**2
nsamp+=1
else:
# Append sample to list
dats.append(data)
first_samp=False
if (lowmem):
if (command == np.mean):
outdata = dats/nsamp
elif (command == np.std):
outdata = np.sqrt(dats/nsamp)
else:
# Convert list to array
dats = | np.array(dats) | numpy.array |
import numpy as np
import xarray as xr
from xclim import land, set_options
def test_base_flow_index(ndq_series):
out = land.base_flow_index(ndq_series, freq="YS")
assert out.attrs["units"] == ""
assert isinstance(out, xr.DataArray)
def test_rb_flashiness_index(ndq_series):
out = land.base_flow_index(ndq_series, freq="YS")
assert out.attrs["units"] == ""
assert isinstance(out, xr.DataArray)
class Test_FA:
def test_simple(self, ndq_series):
out = land.freq_analysis(
ndq_series, mode="max", t=[2, 5], dist="gamma", season="DJF"
)
assert out.long_name == "N-year return period max winter 1-day flow"
assert out.shape == (2, 2, 3) # nrt, nx, ny
np.testing.assert_array_equal(out.isnull(), False)
def test_no_indexer(self, ndq_series):
out = land.freq_analysis(ndq_series, mode="max", t=[2, 5], dist="gamma")
assert out.long_name == "N-year return period max annual 1-day flow"
assert out.shape == (2, 2, 3) # nrt, nx, ny
np.testing.assert_array_equal(out.isnull(), False)
def test_q27(self, ndq_series):
out = land.freq_analysis(ndq_series, mode="max", t=2, dist="gamma", window=7)
assert out.shape == (1, 2, 3)
def test_empty(self, ndq_series):
q = ndq_series.copy()
q[:, 0, 0] = np.nan
out = land.freq_analysis(
q, mode="max", t=2, dist="genextreme", window=6, freq="YS"
)
assert np.isnan(out.values[:, 0, 0]).all()
class TestStats:
def test_simple(self, ndq_series):
out = land.stats(ndq_series, freq="YS", op="min", season="MAM")
assert out.attrs["units"] == "m^3 s-1"
def test_missing(self, ndq_series):
a = ndq_series
a = ndq_series.where(~((a.time.dt.dayofyear == 5) * (a.time.dt.year == 1902)))
assert a.shape == (5000, 2, 3)
out = land.stats(a, op="max", month=1)
np.testing.assert_array_equal(out.sel(time="1900").isnull(), False)
np.testing.assert_array_equal(out.sel(time="1902").isnull(), True)
class TestFit:
def test_simple(self, ndq_series):
ts = land.stats(ndq_series, freq="YS", op="max")
p = land.fit(ts, dist="gumbel_r")
assert p.attrs["estimator"] == "Maximum likelihood"
def test_nan(self, q_series):
r = | np.random.rand(22) | numpy.random.rand |
import numpy as np
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
# TODO implement softmax
# Your final implementation shouldn't have any loops
if predictions.ndim == 1:
predictions_normalized = predictions.copy() - predictions.max()
predictions_exp = np.exp(predictions_normalized)
exp_sum = predictions_exp.sum()
results = predictions_exp / exp_sum
else:
predictions_normalized = predictions.copy() - predictions.max(axis=1).reshape((-1, 1))
predictions_exp = np.exp(predictions_normalized)
exp_sum = predictions_exp.sum(axis=1)
results = predictions_exp / exp_sum.reshape((-1, 1))
return results
def l2_regularization(W, reg_strength):
'''
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
'''
# TODO: Copy from previous assignment
loss = reg_strength * | np.power(W, 2) | numpy.power |
#!/usr/bin/env python
# coding: utf-8
# # 3D Volume Analysis Functions
# ## Introduction
# These functions were developed at **Boston University** to aid in the analysis of pre-segmented 3D reconsructed volumes. This code was originally written to segment three-phase material, but can be modified to analyze two-phase materials (with the exception of the TPB function, which by definition requires three phases). The TPB density function relies on a wonderful package called "Skan", which is cited below.
# ### Citations:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, Solid State Ionics, 148(1), 15 (2002).
#
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, PeerJ, 6(4312), 2018. doi:10.7717/peerj.4312.
#
# <NAME>, <NAME>, <NAME>, and <NAME>, J. Power Sources, 196(10), 4555 (2011).
# In[2]:
import numpy
from PIL import Image
from scipy import ndimage, misc
import scipy
import matplotlib.pyplot as plt
import cv2
import pandas as pd
import math
import skimage
from skimage.measure import label, regionprops
from skimage.morphology import skeletonize
import random
import skan
# ### Rescaling
# This function takes a 3D labeled volume that may have asymmetric voxel sizes, and resamples the volume to produce a labeled volume with the smallest possible symmetric voxel size. The function takes a labeled volume (arr), and the physical dimensions along the x, y, and z directions in arbitrary units (d1, d2, and d3 respectively).
# In[9]:
def rescale(arr,d1,d2,d3):
sizeArr=numpy.shape(arr)
vs=[d1/sizeArr[0],d2/sizeArr[1],d3/sizeArr[2]]
v=max(vs)
zoomV=((d1/v)/sizeArr[0],(d2/v)/sizeArr[1],1)
Labeled_us = ndimage.zoom(Labeled, zoomV, mode='nearest')
print("Voxel size: {:.3f} um".format(v))
return(Labeled_us)
# ### Average Particle Size
# This function implements a version of the intercept method that both determines an average particle size and the average particle size when measured in the x, y, and z directions. The function takes a labeled volume (with the phase of interest occuping voxels labeled val) and calculates, in each direction and overall, the average intercept size through the particles in that phase. The "scale" input value should be in units/voxel side length (for example, um/voxel side length). The function returns an array with the x-direction, y-direction, z-direction, and average particle sizes for that phase, and automatically multiplies each with a stereographic coefficient, assuming that each particle is spherical.
#
# See Lee et. al. for further details about the intercept method.
# In[10]:
def bisector_size(arr,val,scale):
sizeArr=numpy.shape(arr)
x_sz=[]
y_sz=[]
z_sz=[]
x_int=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
y_int=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
z_int=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
#find x sizes
ct=0
for z in range(sizeArr[2]):
for y in range(sizeArr[1]):
ct=0
for x in range(sizeArr[0]):
if arr[x,y,z]==val:
ct+=1
else:
ct=0
x_int[x,y,z]=ct
for z in range(sizeArr[2]):
for y in range(sizeArr[1]):
for x in range(sizeArr[0]-1):
if x_int[x,y,z]>0 and x_int[x+1,y,z]==0:
x_sz.append(x_int[x,y,z])
if x_int[-1,y,z]>0:
x_sz.append(x_int[-1,y,z])
x_size=numpy.mean(x_sz)*scale
#find y sizes
ct=0
for z in range(sizeArr[2]):
for x in range(sizeArr[0]):
ct=0
for y in range(sizeArr[1]):
if arr[x,y,z]==val:
ct+=1
else:
ct=0
y_int[x,y,z]=ct
for z in range(sizeArr[2]):
for x in range(sizeArr[0]):
for y in range(sizeArr[1]-1):
if y_int[x,y,z]>0 and y_int[x,y+1,z]==0:
y_sz.append(y_int[x,y,z])
if y_int[x,-1,z]>0:
y_sz.append(y_int[x,-1,z])
y_size=numpy.mean(y_sz)*scale
#find z sizes
ct=0
for y in range(sizeArr[1]):
for x in range(sizeArr[0]):
ct=0
for z in range(sizeArr[2]):
if arr[x,y,z]==val:
ct+=1
else:
ct=0
z_int[x,y,z]=ct
for y in range(sizeArr[1]):
for x in range(sizeArr[0]):
for z in range(sizeArr[2]-1):
if z_int[x,y,z]>0 and z_int[x,y,z+1]==0:
z_sz.append(z_int[x,y,z])
if z_int[x,y,-1]>0:
z_sz.append(z_int[x,y,-1])
z_size=numpy.mean(z_sz)*scale
cat_sz=x_sz+y_sz+z_sz
cat_size=numpy.mean(cat_sz)*scale
return([1.5*x_size,1.5*y_size,1.5*z_size,1.5*cat_size])
# ### Percolation Fraction
# The "perc" function takes a labeled volume and a phase label, and calculates the fraction of pixels that belong to the largest continuous connected volume of that phase within the sampled volume. This function returns the percolated fraction of that phase, as well as an array containing the coordinates of the voxels in the percolated regions of that phase.
# In[11]:
def perc(arr,phase_label):
sizeArr=numpy.shape(arr)
barray=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
for x in range(sizeArr[0]):
for y in range(sizeArr[1]):
for z in range(sizeArr[2]):
if arr[x,y,z]==phase_label:
barray[x,y,z]=1
regions=label(barray, connectivity=3)
reg_props=regionprops(regions)
reg_areas=numpy.zeros(len(reg_props))
for i in range(len(reg_props)):
reg_areas[i]=reg_props[i].area
perc_index=(numpy.where(reg_areas == numpy.amax(reg_areas)))[0]
perc_area=reg_props[int(perc_index)].area
perc_frac=perc_area/sum(sum(sum(barray)))
perc_locs=reg_props[int(perc_index)].coords
return(perc_frac,perc_locs)
# ### Tortuosity
# The tortuosity function calculates the tortuosity of a phase using the random walker method. The function takes a labeled array, the label of the phase of interest (phase_label), the number of walkers you'd like to deploy (walkers), and the number of steps those walkers should take (steps). The function returns the tortuosity values as an array (in the x, y, z directions and on average) as well as the average distance of each walker from its origin (rp, rpx, rpy, rpz) and an array containing the walker time steps. To plot, for example, the distance as a function of time step, plot rp vs. steps. For further details about this calculation, see Kishimoto et. al. cited above.
# In[6]:
def tortuosity(arr,phase_label,walkers,steps):
sizeArr=numpy.shape(arr)
## find the tortuosity in the percolated phase
perc_results=perc(arr,phase_label)
perc_coords=perc_results[1]
perc_vol=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
for x in range(len(perc_coords)):
perc_vol[(perc_coords[x,:])[0],(perc_coords[x,:])[1],(perc_coords[x,:])[2]]=1
rp=numpy.zeros(steps)
rpx=numpy.zeros(steps)
rpy=numpy.zeros(steps)
rpz=numpy.zeros(steps)
for n in range(walkers):
walk_seed_index=random.randint(0,len(perc_coords))
walk_seed=(perc_results[1])[walk_seed_index]
pos=walk_seed
for s in range(steps):
xchg=random.randint(-1,1)
ychg=random.randint(-1,1)
zchg=random.randint(-1,1)
postemp=numpy.array([pos[0]+xchg,pos[1]+ychg,pos[2]+zchg])
if (postemp[0] in range(sizeArr[0])) and (postemp[1] in range(sizeArr[1])) and (postemp[2]in range(sizeArr[2])) and (perc_vol[postemp[0],postemp[1],postemp[2]]==1):
pos=postemp
else:
pos=pos
rp[s]+=((pos[0]-walk_seed[0])**2+(pos[1]-walk_seed[1])**2+(pos[2]-walk_seed[2])**2)
rpx[s]+=abs(pos[0]-walk_seed[0])**2
rpy[s]+=abs(pos[1]-walk_seed[1])**2
rpz[s]+=abs(pos[2]-walk_seed[2])**2
# get displacement as a function of steps for all space, x, y and z
rp=rp/walkers
rpx=rpx/walkers
rpy=rpy/walkers
rpz=rpz/walkers
## Free space tortuosity
rpfs=numpy.zeros(steps)
rpxfs=numpy.zeros(steps)
rpyfs=numpy.zeros(steps)
rpzfs=numpy.zeros(steps)
for n in range(walkers):
walk_seed=numpy.array([random.randint(0,sizeArr[0]),random.randint(0,sizeArr[1]),random.randint(0,sizeArr[2])])
pos=walk_seed
for s in range(steps):
xchg=random.randint(-1,1)
ychg=random.randint(-1,1)
zchg=random.randint(-1,1)
postemp=numpy.array([pos[0]+xchg,pos[1]+ychg,pos[2]+zchg])
if (postemp[0] in range(sizeArr[0])) and (postemp[1] in range(sizeArr[1])) and (postemp[2]in range(sizeArr[2])):
pos=postemp
else:
pos=pos
rpfs[s]+=((pos[0]-walk_seed[0])**2+(pos[1]-walk_seed[1])**2+(pos[2]-walk_seed[2])**2)
rpxfs[s]+=abs(pos[0]-walk_seed[0])**2
rpyfs[s]+=abs(pos[1]-walk_seed[1])**2
rpzfs[s]+=abs(pos[2]-walk_seed[2])**2
rpfs=rpfs/walkers
rpxfs=rpxfs/walkers
rpyfs=rpyfs/walkers
rpzfs=rpzfs/walkers
step_array=numpy.arange(1,steps+1)
crp=(numpy.polyfit(step_array,rp,1))[0]
crpx=(numpy.polyfit(step_array,rpx,1))[0]
crpy=(numpy.polyfit(step_array,rpy,1))[0]
crpz=(numpy.polyfit(step_array,rpz,1))[0]
crpfs=(numpy.polyfit(step_array,rpfs,1))[0]
crpxfs=(numpy.polyfit(step_array,rpxfs,1))[0]
crpyfs=(numpy.polyfit(step_array,rpyfs,1))[0]
crpzfs=(numpy.polyfit(step_array,rpzfs,1))[0]
V_frac=perc_results[0]
tort=(1/V_frac)*(crpfs/crp)
tortx=(1/V_frac)*(crpxfs/crpx)
torty=(1/V_frac)*(crpyfs/crpy)
tortz=(1/V_frac)*(crpzfs/crpz)
tortvals=[numpy.sqrt(tortx),numpy.sqrt(torty),numpy.sqrt(tortz),numpy.sqrt(tort)]
return(tortvals,rp,rpx,rpy,rpz,step_array)
# ### TPB Density
# TPB density is calculated by the volume expansion method, and takes a labeled volume, the values of each of the three phases of interest as an array, the voxel side length (voxel_size), and the number of dilations you'd like to perform on each of the phases to extract the TPBs. Dilations should be initially set to 1, but can be increased if you are concerned about noise or misclassified pixels in your volume.
# In[12]:
def TPB_per_vol(arr,phase_values,voxel_size,dilations):
sizeArr=numpy.shape(arr)
tpb_array=numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int)
for i in phase_values:
barray= | numpy.zeros((sizeArr[0],sizeArr[1],sizeArr[2]), dtype=int) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
| **@created on:** 9/28/20,
| **@author:** prathyushsp,
| **@version:** v0.0.1
|
| **Description:**
|
|
| **Sphinx Documentation Status:**
"""
import torch
import cv2
import numpy as np
import wandb
from pert.utils import tv_norm, numpy_to_torch, save_timeseries
import matplotlib.pyplot as plt
from scipy.ndimage.filters import median_filter
import logging
import random
from nte.experiment.evaluation import run_evaluation_metrics
from nte.utils.perturbation_manager import PerturbationManager
from nte.models.saliency_model import Saliency
from torch.optim.lr_scheduler import ExponentialLR
from nte.utils.priority_buffer import PrioritizedBuffer
logger = logging.getLogger(__name__)
class PertSaliency(Saliency):
def __init__(self, background_data, background_label, predict_fn, enable_wandb, use_cuda, args):
super(PertSaliency, self).__init__(background_data=background_data,
background_label=background_label,
predict_fn=predict_fn)
self.enable_wandb = enable_wandb
self.use_cuda = use_cuda
self.args = args
self.softmax_fn = torch.nn.Softmax(dim=-1)
self.perturbation_manager = None
self.r_index = random.randrange(0, len(self.background_data)) if self.args.r_index < 0 else self.args.r_index
self.rs_priority_buffer = None
self.ro_priority_buffer = None
self.eps = 1.0
self.eps_decay = 0.9991
def priority_dual_greedy_pick_rt(self, kwargs, data, label):
self.eps *= self.eps_decay
if np.random.uniform() < self.eps:
self.mode = 'Explore'
rs_index = [np.random.choice(len(getattr(self.args.dataset, f"test_class_{int(label)}_data")))]
ro_index = [np.random.choice(len(getattr(self.args.dataset, f"test_class_{1-int(label)}_data")))]
Rs, rs_weight = [getattr(self.args.dataset, f"test_class_{int(label)}_data")[rs_index[0]]], [1.0]
Ro, ro_weight = [getattr(self.args.dataset, f"test_class_{1-int(label)}_data")[ro_index[0]]], [1.0]
else:
self.mode = 'Exploit'
Rs, rs_weight, rs_index = self.rs_priority_buffer.sample(1)
Ro, ro_weight, ro_index = self.ro_priority_buffer.sample(1)
return {'rs': [Rs, rs_weight, rs_index],
'ro': [Ro, ro_weight, ro_index]}
def dynamic_dual_pick_zt(self, kwargs, data, label):
ds = kwargs['dataset'].test_class_0_indices
ls = len(ds)
ods = kwargs['dataset'].test_class_1_indices
ols = len(ods)
self.r_index = random.randrange(0, ls)
self.ro_index = random.randrange(0, ols)
Zt = self.background_data[ds[self.r_index]]
ZOt = self.background_data[ods[self.ro_index]]
return Zt, ZOt
def dynamic_pick_zt(self, kwargs, data, label):
self.r_index = None
if self.args.grad_replacement == 'zeros':
Zt = torch.zeros_like(data)
else:
if self.args.grad_replacement == 'class_mean':
if label == 1:
Zt = torch.tensor(kwargs['dataset'].test_class_0_mean, dtype=torch.float32)
else:
Zt = torch.tensor(kwargs['dataset'].test_class_1_mean, dtype=torch.float32)
elif self.args.grad_replacement == 'instance_mean':
Zt = torch.mean(data).cpu().detach().numpy()
Zt = torch.tensor(np.repeat(Zt, data.shape[0]), dtype=torch.float32)
elif self.args.grad_replacement == 'random_instance':
self.r_index = random.randrange(0, len(self.background_data))
Zt = torch.tensor(self.background_data[self.r_index],
dtype=torch.float32)
elif self.args.grad_replacement == 'random_opposing_instance':
if label == 1:
sds = kwargs['dataset'].test_class_0_indices
sls = len(sds)
else:
sds = kwargs['dataset'].test_class_1_indices
sls = len(sds)
self.r_index = random.randrange(0, sls)
Zt = torch.tensor(sds[self.r_index], dtype=torch.float32)
return Zt
def static_pick_zt(self, kwargs, data, label):
if self.args.grad_replacement == 'zeros':
Zt = torch.zeros_like(data)
else:
if self.args.grad_replacement == 'class_mean':
if label == 1:
Zt = torch.tensor(kwargs['dataset'].test_class_0_mean, dtype=torch.float32)
else:
Zt = torch.tensor(kwargs['dataset'].test_class_1_mean, dtype=torch.float32)
elif self.args.grad_replacement == 'instance_mean':
Zt = torch.mean(data).cpu().detach().numpy()
Zt = torch.tensor(np.repeat(Zt, data.shape[0]), dtype=torch.float32)
elif self.args.grad_replacement == 'random_instance':
Zt = torch.tensor(self.background_data[self.r_index], dtype=torch.float32)
elif self.args.grad_replacement == 'random_opposing_instance':
if label == 1:
Zt = torch.tensor(kwargs['dataset'].test_statistics['between_class']['opposing'][0],
dtype=torch.float32)
else:
Zt = torch.tensor(kwargs['dataset'].test_statistics['between_class']['opposing'][1],
dtype=torch.float32)
return Zt
def weighted_mse_loss(self, input, target, weight):
return torch.mean(weight * (input - target) ** 2)
def generate_saliency(self, data, label, **kwargs):
self.rs_priority_buffer = PrioritizedBuffer(
background_data=getattr(kwargs['dataset'], f"test_class_{int(label)}_data"))
self.ro_priority_buffer = PrioritizedBuffer(
background_data=getattr(kwargs['dataset'], f"test_class_{1 - int(label)}_data"))
self.eps = 1.0
if isinstance(data, np.ndarray):
data = torch.tensor(data, dtype=torch.float32)
category = np.argmax(kwargs['target'].cpu().data.numpy())
if kwargs['save_perturbations']:
self.perturbation_manager = PerturbationManager(
original_signal=data.cpu().detach().numpy().flatten(),
algo=self.args.algo, prediction_prob=np.max(kwargs['target'].cpu().data.numpy()),
original_label=label, sample_id=self.args.single_sample_id)
plt.plot(data, label="Original Signal Norm")
gkernel = cv2.getGaussianKernel(3, 0.5)
gaussian_blur_signal = cv2.filter2D(data.cpu().detach().numpy(), -1, gkernel).flatten()
plt.plot(gaussian_blur_signal, label="Gaussian Blur")
median_blur_signal = median_filter(data, 3)
plt.plot(median_blur_signal, label="Median Blur")
blurred_signal = (gaussian_blur_signal + median_blur_signal) / 2
plt.plot(blurred_signal, label="Blurred Signal")
mask_init = np.random.uniform(size=len(data), low=-1e-2, high=1e-2)
blurred_signal_norm = blurred_signal / | np.max(blurred_signal) | numpy.max |
'''
'''
import numpy as np
class Replace(object):
def __init__(self, strategy="Replace", fill_value=np.nan, thresh=3.5):
'''
:param strategy:"Replace"
:param fill_value: when startegy="replace", replace outlier with np.nan
'''
self.strategy = strategy
self.fill_value = fill_value
self.thresh = thresh
def fit(self, X, y=None):
median = np.median(X, axis=0)
diff = np.abs(X - median)
med_abs_deviation = np.median(diff, axis=0)
self.median = median
self.med_abs_deviation = med_abs_deviation
return self
def transform(self, X):
diff = np.abs(X - self.median)
modified_z_score = 0.6745 * diff / self.med_abs_deviation
mask = modified_z_score > self.thresh
X = X.astype("float")
X[mask] = self.fill_value
return X
class Filter(object):
def __init__(self, method='filter', missing_values=np.nan, threshold= 0.85):
'''
:param method: 'filter' - filter columns
:param missing_values:
:param threshold:
'''
self.missing_values= missing_values
self.threshold=threshold
def fit(self, X, y=None):
self.index = | np.isnan(X) | numpy.isnan |
#****************************************************#
# This file is part of OPTALG. #
# #
# Copyright (c) 2019, <NAME>. #
# #
# OPTALG is released under the BSD 2-clause license. #
#****************************************************#
from __future__ import print_function
import numpy as np
from functools import reduce
from .opt_solver_error import *
from .problem import cast_problem, OptProblem
from .opt_solver import OptSolver
from optalg.lin_solver import new_linsolver
from scipy.sparse import bmat,eye,coo_matrix,tril
class OptSolverAugL(OptSolver):
parameters = {'beta_large' : 0.9, # for decreasing sigma when progress
'beta_med' : 0.5, # for decreasing sigma when forcing
'beta_small' : 0.1, # for decreasing sigma
'feastol' : 1e-4, # feasibility tolerance
'optol' : 1e-4, # optimality tolerance
'gamma' : 0.1, # for determining required decrease in ||f||
'tau' : 0.1, # for reductions in ||GradF||
'kappa' : 1e-2, # for initializing sigma
'maxiter' : 1000, # maximum iterations
'sigma_min' : 1e-12, # minimum sigma
'sigma_init_min' : 1e-3, # minimum initial sigma
'sigma_init_max' : 1e8, # maximum initial sigma
'theta_min' : 1e-6, # minimum barrier parameter
'theta_max' : 1e0 , # maximum initial barrier parameter
'lam_reg' : 1e-4, # regularization of first order dual update
'subprob_force' : 10, # for periodic sigma decrease
'subprob_maxiter' : 150, # maximum subproblem iterations
'linsolver' : 'default', # linear solver
'quiet' : False} # flag for omitting output
def __init__(self):
"""
Augmented Lagrangian algorithm.
"""
OptSolver.__init__(self)
self.parameters = OptSolverAugL.parameters.copy()
self.linsolver1 = None
self.linsolver2 = None
self.barrier = None
def supports_properties(self, properties):
for p in properties:
if p not in [OptProblem.PROP_CURV_LINEAR,
OptProblem.PROP_CURV_QUADRATIC,
OptProblem.PROP_CURV_NONLINEAR,
OptProblem.PROP_VAR_CONTINUOUS,
OptProblem.PROP_TYPE_FEASIBILITY,
OptProblem.PROP_TYPE_OPTIMIZATION]:
return False
return True
def solve(self, problem):
# Local vars
norm2 = self.norm2
norminf = self.norminf
params = self.parameters
# Parameters
tau = params['tau']
gamma = params['gamma']
kappa = params['kappa']
optol = params['optol']
feastol = params['feastol']
beta_small = params['beta_small']
beta_large = params['beta_large']
sigma_init_min = params['sigma_init_min']
sigma_init_max = params['sigma_init_max']
theta_max = params['theta_max']
theta_min = params['theta_min']
# Problem
problem = cast_problem(problem)
self.problem = problem
# Linear solver
self.linsolver1 = new_linsolver(params['linsolver'],'symmetric')
self.linsolver2 = new_linsolver(params['linsolver'],'symmetric')
# Reset
self.reset()
# Barrier
self.barrier = AugLBarrier(problem.get_num_primal_variables(),
problem.l,
problem.u,
eps=feastol/10.)
# Init primal
if problem.x is not None:
self.x = self.barrier.to_interior(problem.x.copy(),
eps=feastol/10.)
else:
self.x = (self.barrier.umax+self.barrier.umin)/2.
assert(np.all(self.x > self.barrier.umin))
assert(np.all(self.x < self.barrier.umax))
# Init dual
if problem.lam is not None:
self.lam = problem.lam.copy()
else:
self.lam = | np.zeros(problem.b.size) | numpy.zeros |
"""
A finder chart maker, designed for use with transients.
TO USE:
> import findermaker
> findermaker.FinderMaker(ra='r.a. string',dec='declination string',name='object name')
-<NAME>, 2015
"""
import aplpy
import os
import time
from subprocess import Popen, PIPE
import coord
from astrometryClient.client import Client as anClient
#import pyfits as pf
from astropy.io import fits as pf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import leastsq
import wget
# a hackaround for a compass rose in aplpy
def initCompass(self, parent):
self._ax1 = parent._ax1
self._wcs = parent._wcs
self.world2pixel = parent.world2pixel
self.pixel2world = parent.pixel2world
self._initialize_compass()
aplpy.overlays.Compass.__init__ = initCompass
# define some pretty colors
red = '#990000'
blue = '#0000FF'
green = '#006600'
orange = '#996600'
class FinderMaker(object):
"""
A class to make finder charts.
image: path to input image
ra,dec: coordinates of object
name: name of object
diagnostics: if True, will plot diagnostic plots of the fit to each star's location
NOTE: Either (ra, dec) OR image must be given (both is ok too). Will use astrometry.net to find WCS solution
for any image without one. If (ra, dec) not given, the object of interest must be clearly discernable
from the background in the given image.
"""
def __init__(self, image=None, ra=None, dec=None, name=None, diagnostics=False):
self.name = name
self.diagnostics = diagnostics
self.annotations = []
# make sure we have one of the allowed input combinations
if image != None:
if image.rsplit('.')[-1].lower() not in ['fit','fits']:
raise ValueError('Image must be in fits format.')
else:
self.image = image
else:
self.image = None
# parse input ra and dec (can be sexagesimal or decimal degrees)
if (ra != None) & (dec != None):
self.ra = coord.parse_ra( ra )
self.dec = coord.parse_dec( dec )
else:
self.ra = None
self.dec = None
if (self.image == None) and any([v==None for v in [self.ra, self.dec]]):
raise ValueError('Must include either a fits image or the target coordinates.')
# run it!
self.go()
def go(self):
if self.image == None:
# get an image of the field
self.get_dss_image()
else:
# make sure we have WCS info
header = pf.open(self.image)[0].header
if header.get('WCSAXES') == None:
self.get_astrometry()
self.build_plot()
if (self.ra == self.dec == None):
# have the user choose the object
input( '\n\nHit enter and then identify the target.\n\n' )
while True:
res = self.get_star()
if res != None:
self.ra, self.dec = res
break
else:
print("\nThat didn't work. Try again.\n")
self.add_object( self.ra, self.dec, wcs=True, marker='a' )
self.add_offset_stars()
def get_astrometry(self):
"""
Interact with astrometry.net to get the WCS for our image, using the
astrometry client code.
"""
# connect to astrometry.net
supernova_key = '<KEY>'
supernova_url = 'http://supernova.astrometry.net/api/'
nova_key = '<KEY>'
nova_url = 'http://nova.astrometry.net/api/'
new_image = self.image.replace('.fits','.wcs.fits')
# The short routines below are pulled from astrometryClient/client.py in the __main__
c = anClient(apiurl=nova_url)
c.login(nova_key)
# upload the image
print('\n\nUploading image to astrometry.net\n\n')
kwargs = {'publicly_visible': 'y', 'allow_modifications': 'd', 'allow_commercial_use': 'd'}
upres = c.upload(self.image, **kwargs)
stat = upres['status']
if stat != 'success':
raise IOError('Upload failed: status %s\n %s\n' %(str(stat), str(upres)))
subID = upres['subid']
print('\n\nUpload successful. Submission id:',subID,'\n\n')
# Wait for the response
while True:
stat = c.sub_status(subID, justdict=True)
jobs = stat.get('jobs', [])
if len(jobs):
for j in jobs:
if j is not None:
break
if j is not None:
print('\n\nReceived job id',j,'\n\n')
jobID = j
break
time.sleep(5)
# wait for the calculation to finish
success = False
while True:
stat = c.job_status(jobID, justdict=True)
if stat.get('status','') in ['success']:
success = (stat['status'] == 'success')
break
time.sleep(5)
if not success:
raise IOError('astrometry.net query failed: status %s'%str(stat))
# download the new image
print('\n\nGrabbing solved image\n\n')
url = nova_url.replace('api','new_fits_file/%i' %jobID)
try:
os.remove( new_image )
except OSError:
pass
wget.download( url, out=new_image )
self.image = new_image
def get_dss_image(self, name='finder_field.fits', size=10.0):
"""
Get a DSS finder chart, if not given input image. Size is width in arcminutes.
"""
url = "http://archive.stsci.edu/cgi-bin/dss_search?v=3&r=%.8f&d=%.8f$" %(self.ra, self.dec) +\
"&h=%.2f&w=%.2f&f=fits&c=none&fov=NONE&e=J2000" %(size, size)
print('Downloading image.')
try:
os.remove( name )
except OSError:
pass
wget.download( url, out=name )
self.image = name
def build_plot(self):
"""
Creates the plot with which you interact.
"""
self.hdu = pf.open( self.image )
# assume, from here on out, that the first table is the relevant one
self.fig = aplpy.FITSFigure( self.image )
self.fig.show_grayscale(stretch='log', invert=True)
self.fig.compass = aplpy.overlays.Compass(self.fig)
self.fig.compass.show_compass(color='black', corner=2, length=0.1)
self.fig.show_grid()
self.fig.grid.set_color('k')
self.fig.grid.set_alpha(0.25)
if self.name != None:
plt.title(self.name)
def _gauss2D(self, params, x, y):
A, x0, y0, sigmaX, sigmaY, C = params
return A*np.exp(-((x-x0)**2/(2*sigmaX**2) + (y-y0)**2/(2*sigmaY**2))) + C
def _gauss2D_residuals(self, params, z, x, y):
return z - self._gauss2D(params, x, y)
def _plane(self, params, x, y):
a, bx, by = params
return a + bx*x + by*y
def _plane_residuals(self, params, z, x, y):
return z - self._plane(params, x, y)
def get_star(self, cutout=10.0, error_plot=None, fit_plane=2):
"""
Get a single star from the user interacting with the image.
Star must be bright enough to fit a Gaussian to it!
cutout is the region size to consider during fitting (in arcseconds).
Returns best-fit coordinates (in WCS).
If error_plot > 0, will plot up cutout and the best-fit Gaussian.
If error_plot > 1, will plot background fitting result in addition.
fit_plane must be an integer describing the size of the background region to
subtract. fit_plane == 0: none
fit_plane == 1: same size as cutout
fit_plane > 2: size = fit_plane*cutout
"""
if error_plot == None:
error_plot = self.diagnostics
# convert arcseconds into degrees
cutout = cutout/3600.0
print("Click on the object.")
[(x,y)] = plt.ginput() # in pixels
# map the coutout size (in arcseconds) onto pixel size
# Use the fact that delta(Dec) = delta(true angle)
ra, dec = self.fig.pixel2world(x,y)
_,y1 = self.fig.world2pixel( ra, dec+cutout )
_,y2 = self.fig.world2pixel( ra, dec )
w = abs(y1-y2)
# get a subarray to fit the Gaussian to
[xmin,xmax, ymin,ymax] = map( lambda l: int(round(l)),
[x-w,x+w, y-w,y+w] )
subarray = np.copy(self.hdu[0].data[ymin:ymax, xmin:xmax]).astype(float)
X = np.arange(xmin,xmax) # keeping track of the true pixel
Y = np.arange(ymin,ymax) # numbers of the subarray
XX,YY = np.meshgrid( X,Y )
# need to make everything 1D
X = XX.reshape( XX.shape[0]*XX.shape[1] )
Y = YY.reshape( YY.shape[0]*YY.shape[1] )
Z = subarray.reshape( subarray.shape[0]*subarray.shape[1] )
if fit_plane:
# fit and subtract a 2d surface fit_plane*cutout in dimension
f = fit_plane
[bgxmin,bgxmax, bgymin,bgymax] = map( lambda l: int(round(l)),
[x-f*w,x+f*w, y-f*w,y+f*w] )
bgarray = np.copy(self.hdu[0].data[bgymin-1:bgymax, bgxmin-1:bgxmax]).astype(float)
bgX = np.arange(bgxmin-1,bgxmax, dtype=float) # keeping track of the true pixel
bgY = np.arange(bgymin-1,bgymax, dtype=float) # numbers of the subarray
bgXX,bgYY = np.meshgrid( bgX,bgY )
bgX = bgXX.reshape( bgXX.shape[0]*bgXX.shape[1] )
bgY = bgYY.reshape( bgYY.shape[0]*bgYY.shape[1] )
bgZ = bgarray.reshape( bgarray.shape[0]*bgarray.shape[1] )
fit_params, success = leastsq(self._plane_residuals,
[np.min(bgZ),0.0, 0.0],
args=(bgZ, bgX, bgY))
if not success:
print('background fitting failed!')
return None
else:
plane = self._plane(fit_params, X, Y)
Z -= plane
if error_plot > 1:
curax = plt.gca() # need to manage current axis variable
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(bgXX,bgYY,bgarray, color='k')
ax.plot_wireframe(bgXX,bgYY, self._plane(fit_params, bgXX, bgYY), color='r')
plt.xlabel('RA (px)')
plt.ylabel('Dec (px)')
plt.title(self.image+' ::: BG fitting')
plt.show()
plt.sca( curax )
# now fit a 2D Gaussian to it
maximum = np.max(Z)
inparams = [maximum, x,y, w/5, w/5, | np.median(Z) | numpy.median |
"""
This file contains classes and functions for representing, solving, and simulating agents
who must allocate their resources among consumption, risky or rental housing, saving in a
risk-free asset (with a low return), and saving in a risky asset (with higher average return).
"""
from copy import copy, deepcopy
import numpy as np
from HARK import MetricObject, make_one_period_oo_solver, NullFunc
from HARK.ConsumptionSaving.ConsIndShockModel import (
IndShockConsumerType,
utility,
utilityP,
utilityP_inv,
utility_inv,
utility_invP,
)
from HARK.ConsumptionSaving.ConsPortfolioModel import (
PortfolioSolution,
PortfolioConsumerType,
solveConsPortfolio,
)
from HARK.distribution import (
Lognormal,
combine_indep_dstns,
calc_expectation,
Bernoulli,
)
from HARK.interpolation import (
LinearInterp,
IdentityFunction,
ValueFuncCRRA,
LinearInterpOnInterp1D,
BilinearInterp,
MargValueFuncCRRA,
TrilinearInterp,
CubicInterp,
)
from numba import njit, prange
from scipy.optimize import minimize_scalar
from Calibration.params_CGM import dict_portfolio
class PortfolioRiskyHousingSolution(MetricObject):
distance_criteria = ["vPfuncRnt", "vPfuncHse"]
def __init__(
self,
cFuncRnt=NullFunc(),
hseFuncRnt=NullFunc(),
totExpFuncRnt=NullFunc(),
ShareFuncRnt=NullFunc(),
vFuncRnt=NullFunc(),
vPfuncRnt=NullFunc(),
cFuncHse=NullFunc(),
ShareFuncHse=NullFunc(),
vFuncHse=NullFunc(),
vPfuncHse=NullFunc(),
):
# Set attributes of self
self.cFuncRnt = cFuncRnt
self.hseFuncRnt = hseFuncRnt
self.totExpFuncRnt = totExpFuncRnt
self.cFuncHse = cFuncHse
self.ShareFuncRnt = ShareFuncRnt
self.ShareFuncHse = ShareFuncHse
self.vFuncRnt = vFuncRnt
self.vFuncHse = vFuncHse
self.vPfuncRnt = vPfuncRnt
self.vPfuncHse = vPfuncHse
class PortfolioRentalHousingType(PortfolioConsumerType):
"""
A consumer type with rental housing and a portfolio choice. This agent type has
log-normal return factors. Their problem is defined by a coefficient of relative
risk aversion, share of expenditures spent on rental housing, intertemporal
discount factor, risk-free interest factor, and time sequences of permanent income
growth rate, survival probability, and permanent and transitory income shock
standard deviations (in logs). The agent may also invest in a risky asset, which
has a higher average return than the risk-free asset. He *might* have age-varying
beliefs about the risky-return; if he does, then "true" values of the risky
asset's return distribution must also be specified.
"""
time_inv_ = deepcopy(PortfolioConsumerType.time_inv_)
time_inv_ = time_inv_ + ["RntHseShare"]
def __init__(self, cycles=1, verbose=False, quiet=False, **kwds):
params = init_portfolio_housing.copy()
params.update(kwds)
kwds = params
# Initialize a basic consumer type
PortfolioConsumerType.__init__(
self, cycles=cycles, verbose=verbose, quiet=quiet, **kwds
)
self.solve_one_period = make_one_period_oo_solver(
ConsPortfolioRentalHousingSolver
)
if not hasattr(self, "RntHseShare"):
raise Exception(
"Portfolio Choice with Risky Housing must have a RntHseShare parameter."
)
def update(self):
IndShockConsumerType.update(self)
self.update_AdjustPrb()
self.update_human_wealth()
self.update_RiskyShares()
self.update_RiskyDstn()
self.update_ShockDstn()
self.update_ShareGrid()
self.update_ShareLimit()
def update_solution_terminal(self):
PortfolioConsumerType.update_solution_terminal(self)
self.solution_terminal.hNrm = 0
def update_human_wealth(self):
hNrm = np.empty(self.T_cycle + 1)
hNrm[-1] = 0.0
for t in range(self.T_cycle - 1, -1, -1):
IncShkDstn = self.IncShkDstn[t]
ShkPrbsNext = IncShkDstn.pmf
PermShkValsNext = IncShkDstn.X[0]
TranShkValsNext = IncShkDstn.X[1]
# Calculate human wealth this period
Ex_IncNext = np.dot(ShkPrbsNext, TranShkValsNext * PermShkValsNext)
hNrm[t] = self.PermGroFac[t] / self.Rfree * (Ex_IncNext + hNrm[t + 1])
self.hNrm = hNrm
def update_RiskyShares(self):
if self.ExRiskyShareBool:
if type(self.ExRiskyShare) is list:
if len(self.ExRiskyShare) == self.T_cycle:
self.add_to_time_vary("ExRiskyShare")
else:
raise AttributeError(
"If ExRiskyShare is time-varying, it must have length of T_cycle!"
)
else:
self.add_to_time_inv("ExRiskyShare")
if "ExRiskyShare" in self.time_vary:
self.RiskyAvg = []
self.RiskyStd = []
for t in range(self.T_cycle):
mean = self.RiskyAvgTrue
std = self.RiskyStdTrue
mean_squared = mean ** 2
variance = std ** 2
mu = np.log(mean_squared / (np.sqrt(mean_squared + variance)))
sigma = np.sqrt(np.log(1.0 + variance / mean_squared))
ratio = (self.WlthNrmAvg[t] + self.hNrm[t]) / (
self.CRRA * self.ExRiskyShare[t] * self.WlthNrmAvg[t]
)
if self.FixRiskyAvg and self.FixRiskyStd:
# This case ignores exogenous risky shares as option parameters indicate
# fixing both RiskyAvg and RiskyStd to their true values
self.RiskyAvg.append(self.RiskyAvgTrue)
self.RiskyStd.append(self.RiskyStdTrue)
elif self.FixRiskyStd:
# There is no analytical solution for this case, so we look for a numerical one
risky_share = (
lambda x: np.log(x / self.Rfree)
* (1.0 + self.hNrm[t] / self.WlthNrmAvg[t])
/ (self.CRRA * np.log(1 + variance / x ** 2))
- self.ExRiskyShare[t]
)
res = minimize_scalar(
risky_share, bounds=(mean, 2), method="bounded"
)
RiskyAvg = res.x
self.RiskyAvg.append(RiskyAvg)
self.RiskyStd.append(self.RiskyStdTrue)
elif self.FixRiskyAvg:
# This case has an analytical solution
RiskyVar = ((mean / self.Rfree) ** ratio - 1) * mean_squared
self.RiskyAvg.append(self.RiskyAvgTrue)
self.RiskyStd.append(np.sqrt(RiskyVar))
else:
# There are 2 ways to do this one, but not implemented yet
raise NotImplementedError(
"The case when RiskyAvg and RiskyStd are both not fixed is not implemented yet."
)
def post_solve(self):
for i in range(self.T_age):
TotalExpAdj = copy(self.solution[i].cFuncAdj)
self.solution[i].TotalExpAdj = TotalExpAdj
if isinstance(TotalExpAdj, LinearInterp):
x_list = TotalExpAdj.x_list
y_list = TotalExpAdj.y_list
self.solution[i].cFuncAdj = LinearInterp(
x_list, (1 - self.RntHseShare) * y_list
)
self.solution[i].hFuncAdj = LinearInterp(
x_list, self.RntHseShare * y_list
)
elif isinstance(TotalExpAdj, IdentityFunction):
x_list = np.array([0, 1])
y_list = np.array([0, 1])
self.solution[i].cFuncAdj = LinearInterp(
x_list, (1 - self.RntHseShare) * y_list
)
self.solution[i].hFuncAdj = LinearInterp(
x_list, self.RntHseShare * y_list
)
class ConsPortfolioRentalHousingSolver(MetricObject):
def __init__(
self,
solution_next,
ShockDstn,
IncShkDstn,
RiskyDstn,
LivPrb,
DiscFac,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
aXtraGrid,
ShareGrid,
vFuncBool,
AdjustPrb,
DiscreteShareBool,
ShareLimit,
IndepDstnBool,
):
self.solution_next = solution_next
self.ShockDstn = ShockDstn
self.IncShkDstn = IncShkDstn
self.RiskyDstn = RiskyDstn
self.LivPrb = LivPrb
self.DiscFac = DiscFac
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
self.BoroCnstArt = BoroCnstArt
self.aXtraGrid = aXtraGrid
self.ShareGrid = ShareGrid
self.vFuncBool = vFuncBool
self.AdjustPrb = AdjustPrb
self.DiscreteShareBool = DiscreteShareBool
self.ShareLimit = ShareLimit
self.IndepDstnBool = IndepDstnBool
def add_human_wealth(self):
self.ShkPrbsNext = self.IncShkDstn.pmf
self.PermShkValsNext = self.IncShkDstn.X[0]
self.TranShkValsNext = self.IncShkDstn.X[1]
# Calculate human wealth this period
self.Ex_IncNext = np.dot(
self.ShkPrbsNext, self.TranShkValsNext * self.PermShkValsNext
)
self.hNrmNow = (
self.PermGroFac / self.Rfree * (self.Ex_IncNext + self.solution_next.hNrm)
)
return self.hNrmNow
def solve(self):
solution = solveConsPortfolio(
self.solution_next,
self.ShockDstn,
self.IncShkDstn,
self.RiskyDstn,
self.LivPrb,
self.DiscFac,
self.CRRA,
self.Rfree,
self.PermGroFac,
self.BoroCnstArt,
self.aXtraGrid,
self.ShareGrid,
self.vFuncBool,
self.AdjustPrb,
self.DiscreteShareBool,
self.ShareLimit,
self.IndepDstnBool,
)
solution.hNrm = self.add_human_wealth()
return solution
class PortfolioRiskyHousingType(PortfolioConsumerType):
time_inv_ = deepcopy(PortfolioConsumerType.time_inv_)
time_inv_ = time_inv_ + ["HouseShare", "HseDiscFac", "RntHseShare", "HseInitPrice"]
time_vary_ = deepcopy(PortfolioConsumerType.time_vary_)
time_vary_ = time_vary_ + ["RentPrb", "HseGroFac"]
shock_vars_ = PortfolioConsumerType.shock_vars_ + ["RntShk", "HouseShk"]
state_vars = PortfolioConsumerType.state_vars + ["haveHse", "hNrm"]
track_vars = ["mNrm", "hNrm", "haveHse", "cNrm", "aNrm", "pLvl", "aLvl", "Share"]
def __init__(self, cycles=1, verbose=False, quiet=False, **kwds):
params = init_portfolio_risky_housing.copy()
params.update(kwds)
kwds = params
# Initialize a basic consumer type
PortfolioConsumerType.__init__(
self, cycles=cycles, verbose=verbose, quiet=quiet, **kwds
)
self.solve_one_period = make_one_period_oo_solver(
ConsPortfolioRiskyHousingSolver
)
def update_HouseDstn(self):
"""
Creates the attributes RiskyDstn from the primitive attributes RiskyAvg,
RiskyStd, and RiskyCount, approximating the (perceived) distribution of
returns in each period of the cycle.
Parameters
----------
None
Returns
-------
None
"""
# Determine whether this instance has time-varying risk perceptions
if (
(type(self.HouseAvg) is list)
and (type(self.HouseStd) is list)
and (len(self.HouseAvg) == len(self.HouseStd))
and (len(self.HouseAvg) == self.T_cycle)
):
self.add_to_time_vary("HouseAvg", "HouseStd")
elif (type(self.HouseStd) is list) or (type(self.HouseAvg) is list):
raise AttributeError(
"If HouseAvg is time-varying, then HouseStd must be as well, and they must both have length of T_cycle!"
)
else:
self.add_to_time_inv("HouseAvg", "HouseStd")
# Generate a discrete approximation to the risky return distribution if the
# agent has age-varying beliefs about the risky asset
if "HouseAvg" in self.time_vary:
self.HouseDstn = []
for t in range(self.T_cycle):
self.HouseDstn.append(
Lognormal.from_mean_std(self.HouseAvg[t], self.HouseStd[t]).approx(
self.HouseShkCount
)
)
self.add_to_time_vary("HouseDstn")
# Generate a discrete approximation to the risky return distribution if the
# agent does *not* have age-varying beliefs about the risky asset (base case)
else:
self.HouseDstn = Lognormal.from_mean_std(
self.HouseAvg,
self.HouseStd,
).approx(self.HouseShkCount)
self.add_to_time_inv("HouseDstn")
def update_ShockDstn(self):
"""
Combine the income shock distribution (over PermShk and TranShk) with the
risky return distribution (RiskyDstn) to make a new attribute called ShockDstn.
Parameters
----------
None
Returns
-------
None
"""
if "HouseDstn" in self.time_vary:
self.ShockDstn = [
combine_indep_dstns(self.IncShkDstn[t], self.HouseDstn[t])
for t in range(self.T_cycle)
]
else:
self.ShockDstn = [
combine_indep_dstns(self.IncShkDstn[t], self.HouseDstn)
for t in range(self.T_cycle)
]
self.add_to_time_vary("ShockDstn")
# Mark whether the risky returns, income shocks, and housing shocks are independent (they are)
self.IndepDstnBool = True
self.add_to_time_inv("IndepDstnBool")
def update(self):
IndShockConsumerType.update(self)
self.update_AdjustPrb()
self.update_RiskyDstn()
self.update_HouseDstn()
self.update_ShockDstn()
self.update_ShareGrid()
self.update_HouseGrid()
self.update_ShareLimit()
def update_solution_terminal(self):
PortfolioConsumerType.update_solution_terminal(self)
solution = portfolio_to_housing(self.solution_terminal, self.RntHseShare)
self.solution_terminal = solution
def update_HouseGrid(self):
"""
Creates the attribute HouseGrid as an evenly spaced grid on [HouseMin,HouseMax], using
the primitive parameter HouseCount.
Parameters
----------
None
Returns
-------
None
"""
self.HouseGrid = np.linspace(self.HouseMin, self.HouseMax, self.HouseCount)
self.add_to_time_inv("HouseGrid")
def get_HouseShk(self):
"""
Sets the attribute HouseShk as a single draw from a lognormal distribution.
Uses the attributes HouseAvg and HouseStd.
Parameters
----------
None
Returns
-------
None
"""
HouseAvg = self.HouseAvg
HouseStd = self.HouseStd
HouseAvgSqrd = HouseAvg ** 2
HouseVar = HouseStd ** 2
mu = np.log(HouseAvg / (np.sqrt(1.0 + HouseVar / HouseAvgSqrd)))
sigma = np.sqrt(np.log(1.0 + HouseVar / HouseAvgSqrd))
self.shocks["HouseShk"] = Lognormal(
mu, sigma, seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(1)
def get_RentShk(self):
"""
Sets the attribute RentShk as a boolean array of size AgentCount, indicating
whether each agent is forced to liquidate their house this period.
Uses the attribute RentPrb to draw from a Bernoulli distribution.
Parameters
----------
None
Returns
-------
None
"""
if not ("RentPrb" in self.time_vary):
self.shocks["RentShk"] = Bernoulli(
self.RentPrb, seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(self.AgentCount)
else:
RntShk = np.zeros(self.AgentCount, dtype=bool) # Initialize shock array
for t in range(self.T_cycle):
these = t == self.t_cycle
N = np.sum(these)
if N > 0:
if t == 0:
RentPrb = 0.0
else:
RentPrb = self.RentPrb[t - 1]
RntShk[these] = Bernoulli(
RentPrb, seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(N)
self.shocks["RentShk"] = RntShk
def get_shocks(self):
"""
Draw shocks as in PortfolioConsumerType, then draw
a single common value for the House price shock. Also draws whether each
agent is forced to rent next period.
Parameters
----------
None
Returns
-------
None
"""
PortfolioConsumerType.get_shocks(self)
self.get_HouseShk()
self.get_RentShk()
def get_states(self):
PortfolioConsumerType.get_states(self)
# previous house size
hNrmPrev = self.state_prev["hNrm"]
# new house size
self.state_now["hNrm"] = (
np.array(self.HseGroFac)[self.t_cycle] * hNrmPrev / self.shocks["PermShk"]
)
# cash on hand in case of liquidation
mRntNrmNow = (
self.state_now["mNrm"] + self.state_now["hNrm"] * self.shocks["HouseShk"]
)
# find index for households that were previously homeowners but
# will no longer be homeowners next period
# state_prev["haveHse"] = True and
# shocks["RentShk"] = True
trans_idx = np.logical_and(self.state_prev["haveHse"], self.shocks["RentShk"])
# only change state for agents who were previously homeowners
# they may stay homeowners or become renters
self.state_now["haveHse"] = self.state_prev["haveHse"].copy()
self.state_now["haveHse"][trans_idx] = False
# if households went from homeowner to renter, they
# receive their liquidation value as cash on hand
self.state_now["mNrm"][trans_idx] = mRntNrmNow[trans_idx]
return None
def get_controls(self):
"""
Calculates consumption cNrmNow and risky portfolio share ShareNow using
the policy functions in the attribute solution. These are stored as attributes.
Parameters
----------
None
Returns
-------
None
"""
cNrmNow = np.zeros(self.AgentCount) + np.nan
ShareNow = np.zeros(self.AgentCount) + np.nan
# Loop over each period of the cycle, getting controls separately depending on "age"
for t in range(self.T_cycle):
these = t == self.t_cycle
# Get controls for agents who are renters
those = np.logical_and(these, self.shocks["RentShk"])
cNrmNow[those] = self.solution[t].cFuncRnt(self.state_now["mNrm"][those])
ShareNow[those] = self.solution[t].ShareFuncRnt(
self.state_now["mNrm"][those]
)
# Get Controls for agents who are homeowners
those = np.logical_and(these, np.logical_not(self.shocks["RentShk"]))
cNrmNow[those] = self.solution[t].cFuncHse(
self.state_now["mNrm"][those], self.state_now["hNrm"][those]
)
ShareNow[those] = self.solution[t].ShareFuncHse(
self.state_now["mNrm"][those], self.state_now["hNrm"][those]
)
# Store controls as attributes of self
self.controls["cNrm"] = cNrmNow
self.controls["Share"] = ShareNow
def sim_birth(self, which_agents):
"""
Create new agents to replace ones who have recently died; takes draws of
initial aNrm and pLvl, as in PortfolioConsumerType, then sets RentShk
to zero as initial values.
Parameters
----------
which_agents : np.array
Boolean array of size AgentCount indicating which agents should be "born".
Returns
-------
None
"""
# Get and store states for newly born agents
# for now, agents start being homeowners and
# the distribution of houses is uniform
self.state_now["haveHse"][which_agents] = True
N = np.sum(which_agents) # Number of new consumers to make
self.state_now["hNrm"][which_agents] = np.linspace(1.0, 10.0, N)
PortfolioConsumerType.sim_birth(self, which_agents)
def initialize_sim(self):
"""
Initialize the state of simulation attributes. Simply calls the same method
for PortfolioConsumerType, then sets the type of RentShk to bool.
Parameters
----------
None
Returns
-------
None
"""
self.state_now["haveHse"] = np.zeros(self.AgentCount, dtype=bool)
PortfolioConsumerType.initialize_sim(self)
def get_poststates(self):
"""
Calculates end-of-period assets for each consumer of this type.
Parameters
----------
None
Returns
-------
None
"""
self.state_now["aNrm"] = (
self.state_now["mNrm"]
- self.controls["cNrm"]
- (np.array(self.HseGroFac)[self.t_cycle] - (1 - self.HseDiscFac))
* self.HseInitPrice
* self.state_now["hNrm"]
)
# Useful in some cases to precalculate asset level
self.state_now["aLvl"] = self.state_now["aNrm"] * self.state_now["pLvl"]
class MargValueFuncHousing(MetricObject):
distance_criteria = ["cFunc", "CRRA"]
def __init__(self, cFunc, HouseGrid, CRRA, HouseShare):
self.cFunc = deepcopy(cFunc)
self.hseGrid = HouseGrid
self.CRRA = CRRA
self.HouseShare = HouseShare
def __call__(self, m_nrm, h_nrm):
"""
Evaluate the marginal value function at given levels of market resources m.
Parameters
----------
cFuncArgs : floats or np.arrays
Values of the state variables at which to evaluate the marginal
value function.
Returns
-------
vP : float or np.array
Marginal lifetime value of beginning this period with state
cFuncArgs
"""
c_opt = self.cFunc(m_nrm, h_nrm)
x_comp = c_opt ** (1 - self.HouseShare) * h_nrm ** self.HouseShare
return utilityP(x_comp, gam=self.CRRA) * (h_nrm / c_opt) ** self.HouseShare
class ConsPortfolioRiskyHousingSolver(MetricObject):
"""
Define an object-oriented one period solver.
Solve the one period problem for a portfolio-choice consumer.
This solver is used when the income and risky return shocks
are independent and the allowed optimal share is continuous.
Parameters
----------
solution_next : PortfolioSolution
Solution to next period's problem.
ShockDstn : [np.array]
List with four arrays: discrete probabilities, permanent income shocks,
transitory income shocks, and risky returns. This is only used if the
input IndepDstnBool is False, indicating that income and return distributions
can't be assumed to be independent.
IncShkDstn : distribution.Distribution
Discrete distribution of permanent income shocks
and transitory income shocks. This is only used if the input IndepDsntBool
is True, indicating that income and return distributions are independent.
RiskyDstn : [np.array]
List with two arrays: discrete probabilities and risky asset returns. This
is only used if the input IndepDstnBool is True, indicating that income
and return distributions are independent.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. In this model, it is *required* to be zero.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
ShareGrid : np.array
Array of risky portfolio shares on which to define the interpolation
of the consumption function when Share is fixed.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
AdjustPrb : float
Probability that the agent will be able to update his portfolio share.
DiscreteShareBool : bool
Indicator for whether risky portfolio share should be optimized on the
continuous [0,1] interval using the FOC (False), or instead only selected
from the discrete set of values in ShareGrid (True). If True, then
vFuncBool must also be True.
ShareLimit : float
Limiting lower bound of risky portfolio share as mNrm approaches infinity.
IndepDstnBool : bool
Indicator for whether the income and risky return distributions are in-
dependent of each other, which can speed up the expectations step.
"""
def __init__(
self,
solution_next,
ShockDstn,
IncShkDstn,
RiskyDstn,
HouseDstn,
LivPrb,
DiscFac,
CRRA,
Rfree,
PermGroFac,
HseGroFac,
HseDiscFac,
HseInitPrice,
HouseShare,
RntHseShare,
BoroCnstArt,
aXtraGrid,
ShareGrid,
HouseGrid,
vFuncBool,
RentPrb,
DiscreteShareBool,
ShareLimit,
):
"""
Constructor for portfolio choice problem solver.
"""
self.solution_next = solution_next
self.ShockDstn = ShockDstn
self.IncShkDstn = IncShkDstn
self.RiskyDstn = RiskyDstn
self.HouseDstn = HouseDstn
self.LivPrb = LivPrb
self.DiscFac = DiscFac
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
self.HseGroFac = HseGroFac
self.HseDiscFac = HseDiscFac
self.HouseShare = HouseShare
self.HseInitPrice = HseInitPrice
self.RntHseShare = RntHseShare
self.BoroCnstArt = BoroCnstArt
self.aXtraGrid = aXtraGrid
self.ShareGrid = ShareGrid
self.HouseGrid = HouseGrid
self.vFuncBool = vFuncBool
self.RentPrb = RentPrb
self.DiscreteShareBool = DiscreteShareBool
self.ShareLimit = ShareLimit
# Make sure the individual is liquidity constrained. Allowing a consumer to
# borrow *and* invest in an asset with unbounded (negative) returns is a bad mix.
if self.BoroCnstArt != 0.0:
raise ValueError("PortfolioConsumerType must have BoroCnstArt=0.0!")
# Make sure that if risky portfolio share is optimized only discretely, then
# the value function is also constructed (else this task would be impossible).
if self.DiscreteShareBool and (not self.vFuncBool):
raise ValueError(
"PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!"
)
self.def_utility_funcs()
def def_utility_funcs(self):
"""
Define temporary functions for utility and its derivative and inverse
"""
self.u = lambda x: utility(x, self.CRRA)
self.uP = lambda x: utilityP(x, self.CRRA)
self.uPinv = lambda x: utilityP_inv(x, self.CRRA)
self.uinv = lambda x: utility_inv(x, self.CRRA)
self.uinvP = lambda x: utility_invP(x, self.CRRA)
def set_and_update_values(self):
"""
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods.
"""
# Unpack next period's solution
self.vPfuncRnt_next = self.solution_next.vPfuncRnt
self.vPfuncHse_next = self.solution_next.vPfuncHse
self.vFuncRnt_next = self.solution_next.vFuncRnt
self.vFuncHse_next = self.solution_next.vFuncHse
# Unpack the shock distribution
self.TranShks_next = self.IncShkDstn.X[1]
self.Risky_next = self.RiskyDstn.X
# Flag for whether the natural borrowing constraint is zero
self.zero_bound = np.min(self.TranShks_next) == 0.0
self.RiskyMax = np.max(self.Risky_next)
self.RiskyMin = np.min(self.Risky_next)
self.tmp_fac_A = (
((1.0 - self.RntHseShare) ** (1.0 - self.RntHseShare))
* (self.RntHseShare ** self.RntHseShare)
) ** (1.0 - self.CRRA)
# Shock positions in ShockDstn
self.PermShkPos = 0
self.TranShkPos = 1
self.HseShkPos = 2
def prepare_to_solve(self):
"""
Perform preparatory work.
"""
self.set_and_update_values()
def prepare_to_calc_EndOfPrdvP(self):
"""
Prepare to calculate end-of-period marginal values by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period.
"""
# bNrm represents R*a, balances after asset return shocks but before income.
# This just uses the highest risky return as a rough shifter for the aXtraGrid.
if self.zero_bound:
self.aNrmGrid = self.aXtraGrid
self.bNrmGrid = np.insert(
self.RiskyMax * self.aXtraGrid,
0,
self.RiskyMin * self.aXtraGrid[0],
)
else:
# Add an asset point at exactly zero
self.aNrmGrid = np.insert(self.aXtraGrid, 0, 0.0)
self.bNrmGrid = self.RiskyMax * np.insert(self.aXtraGrid, 0, 0.0)
# Get grid and shock sizes, for easier indexing
self.aNrm_N = self.aNrmGrid.size
self.Share_N = self.ShareGrid.size
self.House_N = self.HouseGrid.size
# Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncShkDstn
self.bNrm_tiled, self.House_tiled = np.meshgrid(
self.bNrmGrid, self.HouseGrid, indexing="ij"
)
self.aNrm_2tiled, self.House_2tiled = np.meshgrid(
self.aNrmGrid, self.HouseGrid, indexing="ij"
)
# Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn
self.aNrm_3tiled, self.House_3tiled, self.Share_3tiled = np.meshgrid(
self.aNrmGrid, self.HouseGrid, self.ShareGrid, indexing="ij"
)
def m_nrm_next(self, shocks, b_nrm):
"""
Calculate future realizations of market resources
"""
return (
b_nrm / (self.PermGroFac * shocks[self.PermShkPos])
+ shocks[self.TranShkPos]
)
def hse_nrm_next(self, shocks, hse_nrm):
"""
Calculate future realizations of house size
"""
return self.HseGroFac * hse_nrm / shocks[self.PermShkPos]
def m_rnt_nrm_next(self, shocks, m_nrm, hse_nrm):
"""
Calculate future realizations of market resources
including house liquidation
"""
return m_nrm + shocks[self.HseShkPos] * hse_nrm
def calc_EndOfPrdvP(self):
"""
Calculate end-of-period marginal value of assets and shares at each point
in aNrm and ShareGrid. Does so by taking expectation of next period marginal
values across income and risky return shocks.
"""
def dvdb_dist(shocks, b_nrm, hse_nrm):
"""
Evaluate realizations of marginal value of market resources next period
"""
mNrm_next = self.m_nrm_next(shocks, b_nrm)
hseNrm_next = self.hse_nrm_next(shocks, hse_nrm)
mRntNrm_next = self.m_rnt_nrm_next(shocks, mNrm_next, hseNrm_next)
dvdmRnt_next = self.tmp_fac_A * self.vPfuncRnt_next(mRntNrm_next)
if self.RentPrb < 1.0:
dvdmHse_next = self.vPfuncHse_next(mNrm_next, hseNrm_next)
# Combine by adjustment probability
dvdm_next = (
self.RentPrb * dvdmRnt_next + (1.0 - self.RentPrb) * dvdmHse_next
)
else: # Don't bother evaluating if there's no chance that household keeps house
dvdm_next = dvdmRnt_next
return (self.PermGroFac * shocks[self.PermShkPos]) ** (
-self.CRRA
) * dvdm_next
# Evaluate realizations of marginal value of risky share next period
# No marginal value of Share if it's a free choice!
# Calculate intermediate marginal value of bank balances by taking expectations over income shocks
dvdb_intermed = calc_expectation(
self.ShockDstn, dvdb_dist, self.bNrm_tiled, self.House_tiled
)
dvdb_intermed = dvdb_intermed[:, :, 0]
dvdbNvrs_intermed = self.uPinv(dvdb_intermed)
dvdbNvrsFunc_intermed = BilinearInterp(
dvdbNvrs_intermed, self.bNrmGrid, self.HouseGrid
)
dvdbFunc_intermed = MargValueFuncCRRA(dvdbNvrsFunc_intermed, self.CRRA)
def EndOfPrddvda_dist(shock, a_nrm, hse_nrm, share):
# Calculate future realizations of bank balances bNrm
Rxs = shock - self.Rfree
Rport = self.Rfree + share * Rxs
b_nrm_next = Rport * a_nrm
return Rport * dvdbFunc_intermed(b_nrm_next, hse_nrm)
def EndOfPrddvds_dist(shock, a_nrm, hse_nrm, share):
# Calculate future realizations of bank balances bNrm
Rxs = shock - self.Rfree
Rport = self.Rfree + share * Rxs
b_nrm_next = Rport * a_nrm
# No marginal value of Share if it's a free choice!
return Rxs * a_nrm * dvdbFunc_intermed(b_nrm_next, hse_nrm)
# Calculate end-of-period marginal value of assets by taking expectations
EndOfPrddvda = (
self.DiscFac
* self.LivPrb
* calc_expectation(
self.RiskyDstn,
EndOfPrddvda_dist,
self.aNrm_3tiled,
self.House_3tiled,
self.Share_3tiled,
)
)
EndOfPrddvda = EndOfPrddvda[:, :, :, 0]
temp_fac_hse = (1.0 - self.HouseShare) * self.House_3tiled ** (
self.HouseShare * (1.0 - self.CRRA)
)
c_opt = EndOfPrddvda / temp_fac_hse
self.c_opt = c_opt ** (
1 / (-self.CRRA * (1.0 - self.HouseShare) - self.HouseShare)
)
# Calculate end-of-period marginal value of risky portfolio share by taking expectations
EndOfPrddvds = (
self.DiscFac
* self.LivPrb
* calc_expectation(
self.RiskyDstn,
EndOfPrddvds_dist,
self.aNrm_3tiled,
self.House_3tiled,
self.Share_3tiled,
)
)
EndOfPrddvds = EndOfPrddvds[:, :, :, 0]
self.EndOfPrddvds = EndOfPrddvds
def optimize_share(self):
"""
Optimization of Share on continuous interval [0,1]
"""
# Initialize to putting everything in safe asset
self.Share_now = np.zeros((self.aNrm_N, self.House_N))
self.cNrmHse_now = np.zeros((self.aNrm_N, self.House_N))
# For each value of hNrm, find the value of Share such that FOC-Share == 0.
for h in range(self.House_N):
# For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them
FOC_s = self.EndOfPrddvds[:, h]
# If agent wants to put more than 100% into risky asset, he is constrained
constrained_top = FOC_s[:, -1] > 0.0
# Likewise if he wants to put less than 0% into risky asset
constrained_bot = FOC_s[:, 0] < 0.0
# so far FOC never greater than 0.0
self.Share_now[constrained_top, h] = 1.0
if not self.zero_bound:
# aNrm=0, so there's no way to "optimize" the portfolio
self.Share_now[0, h] = 1.0
# Consumption when aNrm=0 does not depend on Share
self.cNrmHse_now[0, h] = self.c_opt[0, h, -1]
# Mark as constrained so that there is no attempt at optimization
constrained_top[0] = True
# Get consumption when share-constrained
self.cNrmHse_now[constrained_top, h] = self.c_opt[constrained_top, h, -1]
self.cNrmHse_now[constrained_bot, h] = self.c_opt[constrained_bot, h, 0]
# For each value of aNrm, find the value of Share such that FOC-Share == 0.
# This loop can probably be eliminated, but it's such a small step that it won't speed things up much.
crossing = np.logical_and(FOC_s[:, 1:] <= 0.0, FOC_s[:, :-1] >= 0.0)
for j in range(self.aNrm_N):
if not (constrained_top[j] or constrained_bot[j]):
idx = np.argwhere(crossing[j, :])[0][0]
bot_s = self.ShareGrid[idx]
top_s = self.ShareGrid[idx + 1]
bot_f = FOC_s[j, idx]
top_f = FOC_s[j, idx + 1]
bot_c = self.c_opt[j, h, idx]
top_c = self.c_opt[j, h, idx + 1]
alpha = 1.0 - top_f / (top_f - bot_f)
self.Share_now[j, h] = (1.0 - alpha) * bot_s + alpha * top_s
self.cNrmHse_now[j, h] = (1.0 - alpha) * bot_c + alpha * top_c
def optimize_share_discrete(self):
# Major method fork: discrete vs continuous choice of risky portfolio share
if self.DiscreteShareBool:
# Optimization of Share on the discrete set ShareGrid
opt_idx = np.argmax(self.EndOfPrdv, axis=2)
# Best portfolio share is one with highest value
Share_now = self.ShareGrid[opt_idx]
# Take cNrm at that index as well
cNrmHse_now = self.c_opt[
np.arange(self.aNrm_N), np.arange(self.House_N), opt_idx
]
if not self.zero_bound:
# aNrm=0, so there's no way to "optimize" the portfolio
Share_now[0] = 1.0
# Consumption when aNrm=0 does not depend on Share
cNrmHse_now[0] = self.c_opt[0, :, -1]
def make_basic_solution(self):
"""
Given end of period assets and end of period marginal values, construct
the basic solution for this period.
"""
# Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio
self.mNrmHse_now = (
self.aNrm_2tiled
+ self.cNrmHse_now
+ (self.HseGroFac - (1.0 - self.HseDiscFac))
* self.HseInitPrice
* self.House_2tiled
)
self.mNrmMin = (
(self.HseGroFac - (1.0 - self.HseDiscFac))
* self.HseInitPrice
* self.HouseGrid
)
# Construct the consumption function when the agent can adjust
cNrmHse_by_hse = []
cNrmHse_now = np.insert(self.cNrmHse_now, 0, 0.0, axis=0)
mNrmHse_now_temp = np.insert(self.mNrmHse_now, 0, self.mNrmMin, axis=0)
for h in range(self.House_N):
cNrmHse_by_hse.append(
LinearInterp(mNrmHse_now_temp[:, h], cNrmHse_now[:, h])
)
self.cFuncHse_now = LinearInterpOnInterp1D(cNrmHse_by_hse, self.HouseGrid)
# Construct the marginal value (of mNrm) function when the agent can adjust
# this needs to be reworked
self.vPfuncHse_now = MargValueFuncHousing(
self.cFuncHse_now, self.HouseGrid, self.CRRA, self.HouseShare
)
def make_ShareFuncHse(self):
"""
Construct the risky share function when the agent can adjust
"""
if self.zero_bound:
Share_lower_bound = self.ShareLimit
else:
Share_lower_bound = 1.0
Share_now = np.insert(self.Share_now, 0, Share_lower_bound, axis=0)
mNrmHse_now_temp = np.insert(self.mNrmHse_now, 0, self.mNrmMin, axis=0)
ShareFuncHse_by_hse = []
for j in range(self.House_N):
ShareFuncHse_by_hse.append(
LinearInterp(
mNrmHse_now_temp[:, j],
Share_now[:, j],
intercept_limit=self.ShareLimit,
slope_limit=0.0,
)
)
self.ShareFuncHse_now = LinearInterpOnInterp1D(
ShareFuncHse_by_hse, self.HouseGrid
)
def make_ShareFuncHse_discrete(self):
# TODO
mNrmHse_mid = (self.mNrmHse_now[1:] + self.mNrmHse_now[:-1]) / 2
mNrmHse_plus = mNrmHse_mid * (1.0 + 1e-12)
mNrmHse_comb = (np.transpose(np.vstack((mNrmHse_mid, mNrmHse_plus)))).flatten()
mNrmHse_comb = np.append(np.insert(mNrmHse_comb, 0, 0.0), self.mNrmHse_now[-1])
Share_comb = (
np.transpose(np.vstack((self.Share_now, self.Share_now)))
).flatten()
self.ShareFuncHse_now = LinearInterp(mNrmHse_comb, Share_comb)
def add_vFunc(self):
"""
Creates the value function for this period and adds it to the solution.
"""
self.make_EndOfPrdvFunc()
self.make_vFunc()
def make_EndOfPrdvFunc(self):
"""
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
"""
# If the value function has been requested, evaluate realizations of value
def v_intermed_dist(shocks, b_nrm, hse_nrm):
mNrm_next = self.m_nrm_next(shocks, b_nrm)
hseNrm_next = self.hse_nrm_next(shocks, hse_nrm)
mRntNrm = self.m_rnt_nrm_next(shocks, mNrm_next, hseNrm_next)
vRnt_next = self.tmp_fac_A * self.vFuncRnt_next(mRntNrm)
if self.RentPrb < 1.0:
# Combine by adjustment probability
vHse_next = self.vFuncHse_next(mNrm_next, hseNrm_next)
v_next = self.RentPrb * vRnt_next + (1.0 - self.RentPrb) * vHse_next
else: # Don't bother evaluating if there's no chance that household keeps house
v_next = vRnt_next
return (self.PermGroFac * shocks[self.PermShkPos]) ** (
1.0 - self.CRRA
) * v_next
# Calculate intermediate value by taking expectations over income shocks
v_intermed = calc_expectation(
self.ShockDstn, v_intermed_dist, self.bNrm_tiled, self.House_tiled
)
v_intermed = v_intermed[:, :, 0]
vNvrs_intermed = self.uinv(v_intermed)
vNvrsFunc_intermed = BilinearInterp(
vNvrs_intermed, self.bNrmGrid, self.HouseGrid
)
vFunc_intermed = ValueFuncCRRA(vNvrsFunc_intermed, self.CRRA)
def EndOfPrdv_dist(shock, a_nrm, hse_nrm, share):
# Calculate future realizations of bank balances bNrm
Rxs = shock - self.Rfree
Rport = self.Rfree + share * Rxs
b_nrm_next = Rport * a_nrm
return vFunc_intermed(b_nrm_next, hse_nrm)
# Calculate end-of-period value by taking expectations
self.EndOfPrdv = (
self.DiscFac
* self.LivPrb
* calc_expectation(
self.RiskyDstn,
EndOfPrdv_dist,
self.aNrm_3tiled,
self.House_3tiled,
self.Share_3tiled,
)
)
self.EndOfPrdv = self.EndOfPrdv[:, :, :, 0]
self.EndOfPrdvNvrs = self.uinv(self.EndOfPrdv)
def make_vFunc(self):
"""
Creates the value functions for this period, defined over market
resources m when agent can adjust his portfolio, and over market
resources and fixed share when agent can not adjust his portfolio.
self must have the attribute EndOfPrdvFunc in order to execute.
"""
# First, make an end-of-period value function over aNrm and Share
EndOfPrdvNvrsFunc = TrilinearInterp(
self.EndOfPrdvNvrs, self.aNrmGrid, self.HouseGrid, self.ShareGrid
)
EndOfPrdvFunc = ValueFuncCRRA(EndOfPrdvNvrsFunc, self.CRRA)
# Construct the value function when the agent can adjust his portfolio
# Just use aXtraGrid as our grid of mNrm values
mNrm = self.aXtraGrid
mNrm_tiled, House_tiled = | np.meshgrid(mNrm, self.HouseGrid, indexing="ij") | numpy.meshgrid |
########################################################################
#
# Copyright 2014 Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contact: <EMAIL>
# Website: http://turbulence.pha.jhu.edu/
#
########################################################################
import numpy
import scipy
import scipy.spatial
def points_on_sphere(
N,
origin = numpy.zeros(3),
radius = 1.):
""" Generate N evenly distributed points on the unit sphere centered at
the origin. Uses the 'Golden Spiral'.
Code by <NAME> from the numpy-discussion list.
"""
phi = (1 + numpy.sqrt(5)) / 2 # the golden ratio
long_incr = 2*numpy.pi / phi # how much to increment the longitude
dz = 2.0 / float(N) # a unit sphere has diameter 2
bands = numpy.arange(N) # each band will have one point placed on it
z = bands * dz - 1 + (dz/2) # the height z of each band/point
r = | numpy.sqrt(1 - z*z) | numpy.sqrt |
from __future__ import print_function
import numpy as np
import unittest
import discretize
np.random.seed(182)
MESHTYPES = ['uniformTensorMesh', 'randomTensorMesh']
TOLERANCES = [0.9, 0.5, 0.5]
call1 = lambda fun, xyz: fun(xyz)
call2 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, -1])
call3 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 1], xyz[:, 2])
cart_row2 = lambda g, xfun, yfun: np.c_[call2(xfun, g), call2(yfun, g)]
cart_row3 = lambda g, xfun, yfun, zfun: np.c_[call3(xfun, g), call3(yfun, g), call3(zfun, g)]
cartF2 = lambda M, fx, fy: np.vstack((cart_row2(M.gridFx, fx, fy), cart_row2(M.gridFy, fx, fy)))
cartF2Cyl = lambda M, fx, fy: np.vstack((cart_row2(M.gridFx, fx, fy), cart_row2(M.gridFz, fx, fy)))
cartE2 = lambda M, ex, ey: np.vstack((cart_row2(M.gridEx, ex, ey), cart_row2(M.gridEy, ex, ey)))
cartE2Cyl = lambda M, ex, ey: cart_row2(M.gridEy, ex, ey)
cartF3 = lambda M, fx, fy, fz: np.vstack((cart_row3(M.gridFx, fx, fy, fz), cart_row3(M.gridFy, fx, fy, fz), cart_row3(M.gridFz, fx, fy, fz)))
cartE3 = lambda M, ex, ey, ez: np.vstack((cart_row3(M.gridEx, ex, ey, ez), cart_row3(M.gridEy, ex, ey, ez), cart_row3(M.gridEz, ex, ey, ez)))
TOL = 1e-7
class TestInterpolation1D(discretize.Tests.OrderTest):
LOCS = np.random.rand(50)*0.6+0.2
name = "Interpolation 1D"
meshTypes = MESHTYPES
tolerance = TOLERANCES
meshDimension = 1
meshSizes = [8, 16, 32, 64, 128]
def getError(self):
funX = lambda x: np.cos(2*np.pi*x)
ana = call1(funX, self.LOCS)
if 'CC' == self.type:
grid = call1(funX, self.M.gridCC)
elif 'N' == self.type:
grid = call1(funX, self.M.gridN)
comp = self.M.getInterpolationMat(self.LOCS, self.type)*grid
err = np.linalg.norm((comp - ana), 2)
return err
def test_orderCC(self):
self.type = 'CC'
self.name = 'Interpolation 1D: CC'
self.orderTest()
def test_orderN(self):
self.type = 'N'
self.name = 'Interpolation 1D: N'
self.orderTest()
class TestOutliersInterp1D(unittest.TestCase):
def setUp(self):
pass
def test_outliers(self):
M = discretize.TensorMesh([4])
Q = M.getInterpolationMat(np.array([[0], [0.126], [0.127]]), 'CC', zerosOutside=True)
x = np.arange(4)+1
self.assertTrue(np.linalg.norm(Q*x - np.r_[1, 1.004, 1.008]) < TOL)
Q = M.getInterpolationMat(np.array([[-1], [0.126], [0.127]]), 'CC', zerosOutside=True)
self.assertTrue(np.linalg.norm(Q*x - np.r_[0, 1.004, 1.008]) < TOL)
class TestInterpolation2d(discretize.Tests.OrderTest):
name = "Interpolation 2D"
LOCS = np.random.rand(50, 2)*0.6+0.2
meshTypes = MESHTYPES
tolerance = TOLERANCES
meshDimension = 2
meshSizes = [8, 16, 32, 64]
def getError(self):
funX = lambda x, y: np.cos(2*np.pi*y)
funY = lambda x, y: np.cos(2*np.pi*x)
if 'x' in self.type:
ana = call2(funX, self.LOCS)
elif 'y' in self.type:
ana = call2(funY, self.LOCS)
else:
ana = call2(funX, self.LOCS)
if 'F' in self.type:
Fc = cartF2(self.M, funX, funY)
grid = self.M.projectFaceVector(Fc)
elif 'E' in self.type:
Ec = cartE2(self.M, funX, funY)
grid = self.M.projectEdgeVector(Ec)
elif 'CC' == self.type:
grid = call2(funX, self.M.gridCC)
elif 'N' == self.type:
grid = call2(funX, self.M.gridN)
comp = self.M.getInterpolationMat(self.LOCS, self.type)*grid
err = np.linalg.norm((comp - ana), np.inf)
return err
def test_orderCC(self):
self.type = 'CC'
self.name = 'Interpolation 2D: CC'
self.orderTest()
def test_orderN(self):
self.type = 'N'
self.name = 'Interpolation 2D: N'
self.orderTest()
def test_orderFx(self):
self.type = 'Fx'
self.name = 'Interpolation 2D: Fx'
self.orderTest()
def test_orderFy(self):
self.type = 'Fy'
self.name = 'Interpolation 2D: Fy'
self.orderTest()
def test_orderEx(self):
self.type = 'Ex'
self.name = 'Interpolation 2D: Ex'
self.orderTest()
def test_orderEy(self):
self.type = 'Ey'
self.name = 'Interpolation 2D: Ey'
self.orderTest()
class TestInterpolation2dCyl_Simple(unittest.TestCase):
def test_simpleInter(self):
M = discretize.CylMesh([4, 1, 1])
locs = np.r_[0, 0, 0.5]
fx = np.array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
self.assertTrue( np.all(fx == M.getInterpolationMat(locs, 'Fx').todense()) )
fz = np.array([[ 0., 0., 0., 0., 0.5, 0., 0., 0., 0.5, 0., 0., 0.]])
self.assertTrue( np.all(fz == M.getInterpolationMat(locs, 'Fz').todense()) )
def test_exceptions(self):
M = discretize.CylMesh([4, 1, 1])
locs = np.r_[0, 0, 0.5]
self.assertRaises(Exception, lambda:M.getInterpolationMat(locs, 'Fy'))
self.assertRaises(Exception, lambda:M.getInterpolationMat(locs, 'Ex'))
self.assertRaises(Exception, lambda:M.getInterpolationMat(locs, 'Ez'))
class TestInterpolation2dCyl(discretize.Tests.OrderTest):
name = "Interpolation 2D"
LOCS = np.c_[np.random.rand(4)*0.6+0.2, np.zeros(4), np.random.rand(4)*0.6+0.2]
meshTypes = ['uniformCylMesh'] # MESHTYPES +
tolerance = 0.6
meshDimension = 2
meshSizes = [32, 64, 128, 256]
def getError(self):
funX = lambda x, y: np.cos(2*np.pi*y)
funY = lambda x, y: np.cos(2*np.pi*x)
if 'x' in self.type:
ana = call2(funX, self.LOCS)
elif 'y' in self.type:
ana = call2(funY, self.LOCS)
elif 'z' in self.type:
ana = call2(funY, self.LOCS)
else:
ana = call2(funX, self.LOCS)
if 'Fx' == self.type:
Fc = cartF2Cyl(self.M, funX, funY)
Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
grid = self.M.projectFaceVector(Fc)
elif 'Fz' == self.type:
Fc = cartF2Cyl(self.M, funX, funY)
Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
grid = self.M.projectFaceVector(Fc)
elif 'E' in self.type:
Ec = cartE2Cyl(self.M, funX, funY)
grid = Ec[:, 1]
elif 'CC' == self.type:
grid = call2(funX, self.M.gridCC)
elif 'N' == self.type:
grid = call2(funX, self.M.gridN)
comp = self.M.getInterpolationMat(self.LOCS, self.type)*grid
err = np.linalg.norm((comp - ana), np.inf)
return err
def test_orderCC(self):
self.type = 'CC'
self.name = 'Interpolation 2D CYLMESH: CC'
self.orderTest()
def test_orderN(self):
self.type = 'N'
self.name = 'Interpolation 2D CYLMESH: N'
self.orderTest()
def test_orderFx(self):
self.type = 'Fx'
self.name = 'Interpolation 2D CYLMESH: Fx'
self.orderTest()
def test_orderFz(self):
self.type = 'Fz'
self.name = 'Interpolation 2D CYLMESH: Fz'
self.orderTest()
def test_orderEy(self):
self.type = 'Ey'
self.name = 'Interpolation 2D CYLMESH: Ey'
self.orderTest()
class TestInterpolation3D(discretize.Tests.OrderTest):
name = "Interpolation"
LOCS = np.random.rand(50, 3)*0.6+0.2
meshTypes = MESHTYPES
tolerance = TOLERANCES
meshDimension = 3
meshSizes = [8, 16, 32, 64]
def getError(self):
funX = lambda x, y, z: | np.cos(2*np.pi*y) | numpy.cos |
import numpy as np
from matplotlib import pyplot as plt
import time
import utils as ut
def print_loss(iteration_results,final_results,n_chessboard):
'''
iteration_results: list of the list containg the value of the loss function at each iteration for every chessboard
final_results: list of the final loss function value for each chessboard
n_chessboard: number of processed chessboard
'''
#plot of the descent of loss function for each chessboard
for i in range(n_chessboard):
plt.plot(*zip(*iteration_results[i]),label = "Chessboard {}".format(i))
plt.title('Comparsion between the descent of the loss of every analyzed chessboard')
#plot settings
plt.legend()
plt.xlabel('Number of iterations')
plt.ylabel('Loss function')
plt.show()
plt.ylabel('Final value of loss function')
plt.title('Final value of the loss for every chessboard')
#plot of the final values of the loss function for each chessboard
plt.plot(*zip(*final_results),markersize=10,marker='x',linewidth=0, color='r')
plt.tight_layout()
plt.show()
def generate_starting_points(point, stepsize):
'''
point: point from which it is generate the orthogonal base
stepsize: lenght of the sides of simplex
'''
num = point.shape[0]
identity = np.eye(num)
starting_points = [point]
#generation of initial simplex
for i in range(num):
starting_points.append(point + stepsize * identity[i,:].T)
return starting_points
def centroid_calculation(simplex,loss_function,m,w):
'''
simplex: current simplex
loss_function: function of loss
m: image_points used as loss function parameter
w: worlds point used as loss function parameter
'''
centroid = np.zeros(len(simplex)-1)
for i in range(len(simplex)-1):
centroid += simplex[i][1]
centroid /= float( len(simplex)-1 )
centroid_value = loss_function(m,np.reshape(centroid,(3,3)),w)
return (centroid_value,centroid)
def reflection(worst,centroid,coeff,loss_function,m,w):
'''
worst: tuple of the worst point of the simplex
centroid: current centroid tuple of the simplex
coeff: reflection coefficent
loss_function: function of loss
m: image_points used as loss function parameter
w: worlds point used as loss function parameter
'''
reflection_point = centroid[1] * ( 1.0 + coeff ) - coeff * worst[1]
reflection_value = loss_function(m, np.reshape(reflection_point,(3,3)) ,w)
return (reflection_value, reflection_point)
def expansion(reflection,centroid,coeff,loss_function,m,w):
'''
reflection: reflected tuple of the simplex
centroid: current centroid tuple of the simplex
coeff: expansion coefficent
loss_function: function of loss
m: image_points used as loss function parameter
w: worlds point used as loss function parameter
'''
expansion_point = centroid[1] * (1-coeff) + coeff*reflection[1]
expansion_value = loss_function(m, np.reshape(expansion_point,(3,3)) ,w)
return (expansion_value,expansion_point)
def outer_contraction(reflection,centroid,coeff,loss_function,m,w):
'''
reflection: reflected tuple of the simplex
centroid: current centroid tuple of the simplex
coeff: contraction coefficent
loss_function: function of loss
m: image_points used as loss function parameter
w: worlds point used as loss function parameter
'''
contraction_point = centroid[1] + coeff * (reflection[1] - centroid[1])
contraction_value = loss_function(m, np.reshape(contraction_point,(3,3)), w)
return (contraction_value,contraction_point)
def inner_contraction(reflection,centroid,coeff,loss_function,m,w):
'''
reflection: reflected tuple of the simplex
centroid: current centroid tuple of the simplex
coeff: contraction coefficent
loss_function: function of loss
m: image_points used as loss function parameter
w: worlds point used as loss function parameter
'''
contraction_point = centroid[1] - coeff * (reflection[1] - centroid[1])
contraction_value = loss_function(m, | np.reshape(contraction_point,(3,3)) | numpy.reshape |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = | np.zeros((boxes.shape[0], 1)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 15:58:22 2020
@author: vivek
"""
### statsmodels vs sklearn
# both packages are frequently tagged with python, statistics, and data-analysis
# differences between them highlight what each in particular has to offer:
# scikit-learn’s other popular topics are machine-learning and data-science;
# StatsModels are econometrics, generalized-linear-models, timeseries-analysis, and regression-models
### Introduction
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Example 1
# Load data
dat = sm.datasets.get_rdataset("Guerry", "HistData").data
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# Example 2
# Generate artificial data (2 regressors + constant)
X = np.random.random((100, 2))
X = sm.add_constant(X)
beta = [1, .1, .5]
e = | np.random.random(100) | numpy.random.random |
"""Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * | np.cos(lon) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
from bcdi.experiment.detector import create_detector
from bcdi.experiment.setup import Setup
import bcdi.preprocessing.bcdi_utils as bu
import bcdi.graph.graph_utils as gu
import bcdi.utils.utilities as util
import bcdi.utils.validation as valid
helptext = """
Open a series of rocking curve data and track the position of the Bragg peak over the
series. Supported beamlines: ESRF ID01, PETRAIII P10, SOLEIL SIXS, SOLEIL CRISTAL,
MAX IV NANOMAX.
"""
scans = np.arange(1460, 1475 + 1, step=3) # list or array of scan numbers
scans = np.concatenate((scans, np.arange(1484, 1586 + 1, 3)))
scans = np.concatenate((scans, np.arange(1591, 1633 + 1, 3)))
scans = np.concatenate((scans, np.arange(1638, 1680 + 1, 3)))
root_folder = "D:/data/P10_OER/data/"
sample_name = "dewet2_2" # list of sample names. If only one name is indicated,
# it will be repeated to match the number of scans
save_dir = "D:/data/P10_OER/analysis/candidate_12/"
# images will be saved here, leave it to None otherwise (default to root_folder)
x_axis = [0.740 for _ in range(16)]
for _ in range(10):
x_axis.append(0.80)
for _ in range(15):
x_axis.append(-0.05)
for _ in range(15):
x_axis.append(0.3)
for _ in range(15):
x_axis.append(0.8)
# values against which the Bragg peak center of mass evolution will be plotted,
# leave [] otherwise
x_label = "voltage (V)" # label for the X axis in plots, leave '' otherwise
comment = "_BCDI_RC" # comment for the saving filename, should start with _
strain_range = 0.00005 # range for the plot of the q value
peak_method = (
"max_com" # Bragg peak determination: 'max', 'com', 'max_com' (max then com)
)
debug = False # set to True to see more plots
###############################
# beamline related parameters #
###############################
beamline = (
"P10" # name of the beamline, used for data loading and normalization by monitor
)
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10'
custom_scan = False # True for a stack of images acquired without scan,
# e.g. with ct in a macro (no info in spec file)
custom_images = np.arange(11353, 11453, 1) # list of image numbers for the custom_scan
custom_monitor = np.ones(
len(custom_images)
) # monitor values for normalization for the custom_scan
custom_motors = {
"eta": np.linspace(16.989, 18.989, num=100, endpoint=False),
"phi": 0,
"nu": -0.75,
"delta": 36.65,
}
# ID01: eta, phi, nu, delta
# CRISTAL: mgomega, gamma, delta
# P10: om, phi, chi, mu, gamma, delta
# SIXS: beta, mu, gamma, delta
rocking_angle = "outofplane" # "outofplane" or "inplane"
is_series = False # specific to series measurement at P10
specfile_name = ""
# template for ID01: name of the spec file without '.spec'
# template for SIXS_2018: full path of the alias dictionnary,
# typically root_folder + 'alias_dict_2019.txt'
# template for all other beamlines: ''
###############################
# detector related parameters #
###############################
detector = "Eiger4M" # "Eiger2M" or "Maxipix" or "Eiger4M"
x_bragg = 1387 # horizontal pixel number of the Bragg peak,
# can be used for the definition of the ROI
y_bragg = 809 # vertical pixel number of the Bragg peak,
# can be used for the definition of the ROI
roi_detector = [
y_bragg - 200,
y_bragg + 200,
x_bragg - 400,
x_bragg + 400,
] # [Vstart, Vstop, Hstart, Hstop]
# leave it as None to use the full detector.
# Use with center_fft='skip' if you want this exact size.
debug_pix = 40 # half-width in pixels of the ROI centered on the Bragg peak
hotpixels_file = None # root_folder + 'hotpixels.npz' # non empty file path or None
flatfield_file = (
None # root_folder + "flatfield_8.5kev.npz" # non empty file path or None
)
template_imagefile = "_master.h5"
# template for ID01: 'data_mpx4_%05d.edf.gz' or 'align_eiger2M_%05d.edf.gz'
# template for SIXS_2018: 'align.spec_ascan_mu_%05d.nxs'
# template for SIXS_2019: 'spare_ascan_mu_%05d.nxs'
# template for Cristal: 'S%d.nxs'
# template for P10: '_master.h5'
# template for NANOMAX: '%06d.h5'
# template for 34ID: 'Sample%dC_ES_data_51_256_256.npz'
####################################
# q calculation related parameters #
####################################
convert_to_q = True # True to convert from pixels to q values using parameters below
beam_direction = (1, 0, 0) # beam along z
directbeam_x = 476 # x horizontal, cch2 in xrayutilities
directbeam_y = 1374 # y vertical, cch1 in xrayutilities
direct_inplane = -2.0 # outer angle in xrayutilities
direct_outofplane = 0.8
sdd = 1.83 # sample to detector distance in m
energy = 10300 # in eV, offset of 6eV at ID01
##################################
# end of user-defined parameters #
##################################
###################
# define colormap #
###################
bad_color = "1.0" # white
bckg_color = "0.7" # grey
colormap = gu.Colormap(bad_color=bad_color)
my_cmap = colormap.cmap
########################################
# check and initialize some parameters #
########################################
print(f"\n{len(scans)} scans: {scans}")
print(f"\n {len(x_axis)} x_axis values provided:")
if len(x_axis) == 0:
x_axis = np.arange(len(scans))
if len(x_axis) != len(scans):
raise ValueError("the length of x_axis should be equal to the number of scans")
if isinstance(sample_name, str):
sample_name = [sample_name for idx in range(len(scans))]
valid.valid_container(
sample_name,
container_types=(tuple, list),
length=len(scans),
item_types=str,
name="preprocess_bcdi",
)
if peak_method not in [
"max",
"com",
"max_com",
]:
raise ValueError('invalid value for "peak_method" parameter')
int_sum = [] # integrated intensity in the detector ROI
int_max = [] # maximum intensity in the detector ROI
zcom = [] # center of mass for the first data axis
ycom = [] # center of mass for the second data axis
xcom = [] # center of mass for the third data axis
tilt_com = [] # center of mass for the incident rocking angle
q_com = [] # q value of the center of mass
check_roi = [] # a small ROI around the Bragg peak will be stored for each scan,
# to see if the peak is indeed
# captured by the rocking curve
#######################
# Initialize detector #
#######################
detector = create_detector(
name=detector,
template_imagefile=template_imagefile,
roi=roi_detector,
)
####################
# Initialize setup #
####################
setup = Setup(
beamline=beamline,
detector=detector,
energy=energy,
rocking_angle=rocking_angle,
distance=sdd,
beam_direction=beam_direction,
custom_scan=custom_scan,
custom_images=custom_images,
custom_monitor=custom_monitor,
custom_motors=custom_motors,
is_series=is_series,
)
########################################
# print the current setup and detector #
########################################
print("\n##############\nSetup instance\n##############")
print(setup)
print("\n#################\nDetector instance\n#################")
print(detector)
###############################################
# load recursively the scans and update lists #
###############################################
flatfield = util.load_flatfield(flatfield_file)
hotpix_array = util.load_hotpixels(hotpixels_file)
for scan_idx, scan_nb in enumerate(scans, start=1):
tmp_str = f"Scan {scan_idx}/{len(scans)}: S{scan_nb}"
print(f'\n{"#" * len(tmp_str)}\n' + tmp_str + "\n" + f'{"#" * len(tmp_str)}')
# initialize the paths
setup.init_paths(
sample_name=sample_name[scan_idx - 1],
scan_number=scan_nb,
root_folder=root_folder,
save_dir=save_dir,
verbose=True,
specfile_name=specfile_name,
template_imagefile=template_imagefile,
)
# override the saving directory, we want to save results at the same place
detector.savedir = save_dir
logfile = setup.create_logfile(
scan_number=scan_nb, root_folder=root_folder, filename=detector.specfile
)
data, mask, frames_logical, monitor = bu.load_bcdi_data(
logfile=logfile,
scan_number=scan_nb,
detector=detector,
setup=setup,
flatfield=flatfield,
hotpixels=hotpix_array,
normalize=True,
debugging=debug,
)
tilt, grazing, inplane, outofplane = setup.diffractometer.goniometer_values(
frames_logical=frames_logical, logfile=logfile, scan_number=scan_nb, setup=setup
)
nbz, nby, nbx = data.shape
if peak_method == "max":
piz, piy, pix = np.unravel_index(data.argmax(), shape=(nbz, nby, nbx))
elif peak_method == "com":
piz, piy, pix = center_of_mass(data)
else: # 'max_com'
max_z, max_y, max_x = np.unravel_index(data.argmax(), shape=data.shape)
com_z, com_y, com_x = center_of_mass(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
]
)
# correct the pixel offset due to the ROI defined by debug_pix around the max
piz = com_z # the data was not cropped along the first axis
piy = com_y + max_y - debug_pix
pix = com_x + max_x - debug_pix
if debug:
fig, _, _ = gu.multislices_plot(
data,
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60, 0.30, f"(piz, piy, pix) = ({piz:.1f}, {piy:.1f}, {pix:.1f})", size=12
)
plt.draw()
if peak_method == "max_com":
fig, _, _ = gu.multislices_plot(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
],
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60,
0.30,
f"(com_z, com_y, com_x) = ({com_z:.1f}, {com_y:.1f}, {com_x:.1f})",
size=12,
)
plt.draw()
print("")
zcom.append(piz)
ycom.append(piy)
xcom.append(pix)
int_sum.append(data.sum())
int_max.append(data.max())
check_roi.append(
data[:, :, int(pix) - debug_pix : int(pix) + debug_pix].sum(axis=1)
)
interp_tilt = interp1d(np.arange(data.shape[0]), tilt, kind="linear")
tilt_com.append(interp_tilt(piz))
##############################
# convert pixels to q values #
##############################
if convert_to_q:
(
setup.outofplane_angle,
setup.inplane_angle,
setup.tilt_angle,
setup.grazing_angle,
) = (outofplane, inplane, tilt, grazing)
# calculate the position of the Bragg peak in full detector pixels
bragg_x = detector.roi[2] + pix
bragg_y = detector.roi[0] + piy
# calculate the position of the direct beam at 0 detector angles
x_direct_0 = directbeam_x + setup.inplane_coeff * (
direct_inplane * np.pi / 180 * sdd / detector.pixelsize_x
) # inplane_coeff is +1 or -1
y_direct_0 = (
directbeam_y
- setup.outofplane_coeff
* direct_outofplane
* np.pi
/ 180
* sdd
/ detector.pixelsize_y
) # outofplane_coeff is +1 or -1
# calculate corrected detector angles for the Bragg peak
bragg_inplane = setup.inplane_angle + setup.inplane_coeff * (
detector.pixelsize_x * (bragg_x - x_direct_0) / sdd * 180 / np.pi
) # inplane_coeff is +1 or -1
bragg_outofplane = (
setup.outofplane_angle
- setup.outofplane_coeff
* detector.pixelsize_y
* (bragg_y - y_direct_0)
/ sdd
* 180
/ np.pi
) # outofplane_coeff is +1 or -1
print(
f"\nBragg angles before correction (gam, del): ({setup.inplane_angle:.4f}, "
f"{setup.outofplane_angle:.4f})"
)
print(
f"Bragg angles after correction (gam, del): ({bragg_inplane:.4f}, "
f"{bragg_outofplane:.4f})"
)
# update setup with the corrected detector angles
setup.inplane_angle = bragg_inplane
setup.outofplane_angle = bragg_outofplane
##############################################################
# wavevector transfer calculations (in the laboratory frame) #
##############################################################
kin = 2 * np.pi / setup.wavelength * np.asarray(beam_direction)
# in lab frame z downstream, y vertical, x outboard
kout = (
setup.exit_wavevector
) # in lab.frame z downstream, y vertical, x outboard
q = (kout - kin) / 1e10 # convert from 1/m to 1/angstrom
q_com.append(np.linalg.norm(q))
print(f"Wavevector transfer of Bragg peak: {q}, Qnorm={np.linalg.norm(q):.4f}")
##########################################################
# plot the ROI centered on the Bragg peak for each scan #
##########################################################
plt.ion()
# plot maximum 7x7 ROIs per figure
nb_fig = 1 + len(scans) // 49
if nb_fig == 1:
nb_rows = np.floor(np.sqrt(len(scans)))
nb_columns = np.ceil(len(scans) / nb_rows)
else:
nb_rows = 7
nb_columns = 7
scan_counter = 0
for fig_idx in range(nb_fig):
fig = plt.figure(figsize=(12, 9))
for idx in range(min(49, len(scans) - scan_counter)):
axis = plt.subplot(nb_rows, nb_columns, idx + 1)
axis.imshow(np.log10(check_roi[scan_counter]))
axis.set_title("S{:d}".format(scans[scan_counter]))
scan_counter = scan_counter + 1
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + f"check-roi{fig_idx+1}" + comment + ".png")
##########################################################
# plot the evolution of the center of mass and intensity #
##########################################################
fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(
nrows=2, ncols=3, figsize=(12, 9)
)
ax0.plot(scans, x_axis, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel(x_label)
ax1.scatter(x_axis, int_sum, s=24, c=scans, cmap=my_cmap)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Integrated intensity")
ax1.set_facecolor(bckg_color)
ax2.scatter(x_axis, int_max, s=24, c=scans, cmap=my_cmap)
ax2.set_xlabel(x_label)
ax2.set_ylabel("Maximum intensity")
ax2.set_facecolor(bckg_color)
ax3.scatter(x_axis, xcom, s=24, c=scans, cmap=my_cmap)
ax3.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax3.set_ylabel("xcom (pixels)")
else: # 'max'
ax3.set_ylabel("xmax (pixels)")
ax3.set_facecolor(bckg_color)
ax4.scatter(x_axis, ycom, s=24, c=scans, cmap=my_cmap)
ax4.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax4.set_ylabel("ycom (pixels)")
else: # 'max'
ax4.set_ylabel("ymax (pixels)")
ax4.set_facecolor(bckg_color)
plt5 = ax5.scatter(x_axis, zcom, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt5, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax5.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax5.set_ylabel("zcom (pixels)")
else: # 'max'
ax5.set_ylabel("zmax (pixels)")
ax5.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "summary" + comment + ".png")
############################################
# plot the evolution of the incident angle #
############################################
tilt_com = np.asarray(tilt_com)
x_axis = np.asarray(x_axis)
uniq_xaxis = np.unique(x_axis)
mean_tilt = np.empty(len(uniq_xaxis))
std_tilt = np.empty(len(uniq_xaxis))
for idx, item in enumerate(uniq_xaxis):
mean_tilt[idx] = np.mean(tilt_com[x_axis == item])
std_tilt[idx] = np.std(tilt_com[x_axis == item])
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(12, 9))
ax0.plot(scans, tilt_com, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel("Bragg angle (deg)")
ax1.errorbar(
uniq_xaxis,
mean_tilt,
yerr=std_tilt,
elinewidth=2,
capsize=6,
capthick=2,
linestyle="",
marker="o",
markersize=6,
markerfacecolor="w",
)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Bragg angle (deg)")
plt2 = ax2.scatter(x_axis, tilt_com, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt2, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax2.set_xlabel(x_label)
ax2.set_ylabel("Bragg angle (deg)")
ax2.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "Bragg angle" + comment + ".png")
##############################################
# plot the evolution of the diffusion vector #
##############################################
if convert_to_q:
q_com = np.asarray(q_com)
mean_q = np.empty(len(uniq_xaxis))
std_q = np.empty(len(uniq_xaxis))
for idx, item in enumerate(uniq_xaxis):
mean_q[idx] = np.mean(q_com[x_axis == item])
std_q[idx] = | np.std(q_com[x_axis == item]) | numpy.std |
import numpy as np
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import seaborn as sns
import sys
import matplotlib.pyplot as plt
from skimage.transform import rescale
sys.path.append('../..')
from config import DIR_FIGS
from os.path import join as opj
cb = '#66ccff'
cr = '#cc0000'
cm = sns.diverging_palette(10, 240, n=1000, as_cmap=True)
def save_fig(fname):
plt.savefig(opj(DIR_FIGS, fname) + '.png')
def cshow(im):
plt.imshow(im, cmap='magma', vmax=0.15, vmin=-0.05)
plt.axis('off')
def plot_2d_samples(sample, color='C0'):
"""Plot 2d sample
Arugments
---------
sample : 2D ndarray or tensor
matrix of spatial coordinates for each sample
"""
if "torch" in str(type(sample)):
sample_np = sample.detach().cpu().numpy()
x = sample_np[:, 0]
y = sample_np[:, 1]
plt.scatter(x, y, color=color)
plt.gca().set_aspect('equal', adjustable='box')
def plot_2d_latent_samples(latent_sample, color='C0'):
"""Plot latent samples select two most highly variable coordinates
Arugments
---------
latent_sample : tensor
matrix of spatial coordinates for each latent sample
"""
latent_dim = latent_sample.size()[1]
stds = []
for i in range(latent_dim):
stds.append(torch.std(latent_sample[:,i]).item())
stds = np.array(stds)
ind = np.argsort(stds)[::-1][:2]
plot_2d_samples(latent_sample[:,list(ind)])
def traverse_line(idx, model, n_samples=100, n_latents=2, data=None, max_traversal=10):
"""Return a (size, latent_size) latent sample, corresponding to a traversal
of a latent variable indicated by idx.
Parameters
----------
idx : int
Index of continuous dimension to traverse. If the continuous latent
vector is 10 dimensional and idx = 7, then the 7th dimension
will be traversed while all others are fixed.
n_samples : int
Number of samples to generate.
data : torch.Tensor or None, optional
Data to use for computing the posterior. If `None`
then use the mean of the prior (all zeros) for all other dimensions.
"""
model.eval()
if data is None:
# mean of prior for other dimensions
samples = torch.zeros(n_samples, n_latents)
traversals = torch.linspace(-2, 2, steps=n_samples)
else:
if data.size(0) > 1:
raise ValueError("Every value should be sampled from the same posterior, but {} datapoints given.".format(data.size(0)))
with torch.no_grad():
post_mean, post_logvar = model.encoder(data.to(device))
samples = model.reparameterize(post_mean, post_logvar)
samples = samples.cpu().repeat(n_samples, 1)
post_mean_idx = post_mean.cpu()[0, idx]
post_std_idx = torch.exp(post_logvar / 2).cpu()[0, idx]
# travers from the gaussian of the posterior in case quantile
traversals = torch.linspace(post_mean_idx - max_traversal,
post_mean_idx + max_traversal,
steps=n_samples)
for i in range(n_samples):
samples[i, idx] = traversals[i]
return samples
def traversals(model,
data=None,
n_samples=100,
n_latents=2,
max_traversal=1.):
"""
"""
latent_samples = [traverse_line(dim, model, n_samples, n_latents, data=data, max_traversal=max_traversal) for dim in range(n_latents)]
decoded_traversal = model.decoder(torch.cat(latent_samples, dim=0).to(device))
decoded_traversal = decoded_traversal.detach().cpu()
return decoded_traversal
def plot_traversals(model,
data,
lb=0,
ub=2000,
num=100,
draw_data=False,
draw_recon=False,
traversal_samples=100,
n_latents=4,
max_traversal=1.):
if draw_data is True:
plot_2d_samples(data[:,:2], color='C0')
if draw_recon is True:
recon_data, _, _ = model(data)
plot_2d_samples(recon_data[:,:2], color='C8')
ranges = np.arange(lb, ub)
samples_index = np.random.choice(ranges, num, replace=False)
for i in samples_index:
decoded_traversal = traversals(model, data=data[i:i+1], n_samples=traversal_samples, n_latents=n_latents,
max_traversal=max_traversal)
decoded_traversal0 = decoded_traversal[:,:2]
plot_2d_samples(decoded_traversal0[:100], color='C2')
plot_2d_samples(decoded_traversal0[100:200], color='C3')
plot_2d_samples(decoded_traversal0[200:300], color='C4')
plot_2d_samples(decoded_traversal0[300:400], color='C5')
def viz_filters(tensors, n_row=4, n_col=8, resize_fac=2, normalize=True, vmax=None, vmin=None, title=None):
plt.figure(figsize=(10,10))
# plot filters
p = tensors.shape[2] + 2
mosaic = np.zeros((p*n_row,p*n_col))
indx = 0
for i in range(n_row):
for j in range(n_col):
im = tensors.data.cpu().numpy()[indx].squeeze()
if normalize:
im = (im-np.min(im))
im = im/ | np.max(im) | numpy.max |
"""
These are designed to be unit-tests of the wrapper functionality. They do not test for
correctness of simulations,
but whether different parameter options work/don't work as intended.
"""
import pytest
import numpy as np
from py21cmfast import wrapper
@pytest.fixture(scope="module")
def perturb_field_lowz(ic, low_redshift):
"""A default perturb_field"""
return wrapper.perturb_field(redshift=low_redshift, init_boxes=ic, write=True)
@pytest.fixture(scope="module")
def ionize_box(perturb_field):
"""A default ionize_box"""
return wrapper.ionize_box(perturbed_field=perturb_field, write=True)
@pytest.fixture(scope="module")
def ionize_box_lowz(perturb_field_lowz):
"""A default ionize_box at lower redshift."""
return wrapper.ionize_box(perturbed_field=perturb_field_lowz, write=True)
@pytest.fixture(scope="module")
def spin_temp(perturb_field):
"""A default perturb_field"""
return wrapper.spin_temperature(perturbed_field=perturb_field, write=True)
def test_perturb_field_no_ic(default_user_params, redshift, perturb_field):
"""Run a perturb field without passing an init box"""
pf = wrapper.perturb_field(redshift=redshift, user_params=default_user_params)
assert len(pf.density) == pf.user_params.HII_DIM == default_user_params.HII_DIM
assert pf.redshift == redshift
assert pf.random_seed != perturb_field.random_seed
assert not np.all(pf.density == 0)
assert pf != perturb_field
assert pf._seedless_repr() == perturb_field._seedless_repr()
def test_ib_no_z(ic):
with pytest.raises(ValueError):
wrapper.ionize_box(init_boxes=ic)
def test_pf_unnamed_param():
"""Try using an un-named parameter."""
with pytest.raises(TypeError):
wrapper.perturb_field(7)
def test_perturb_field_ic(perturb_field, ic):
# this will run perturb_field again, since by default regenerate=True for tests.
# BUT it should produce exactly the same as the default perturb_field since it has
# the same seed.
pf = wrapper.perturb_field(redshift=perturb_field.redshift, init_boxes=ic)
assert len(pf.density) == len(ic.lowres_density)
assert pf.cosmo_params == ic.cosmo_params
assert pf.user_params == ic.user_params
assert not np.all(pf.density == 0)
assert pf.user_params == perturb_field.user_params
assert pf.cosmo_params == perturb_field.cosmo_params
assert pf == perturb_field
def test_cache_exists(default_user_params, perturb_field, tmpdirec):
pf = wrapper.PerturbedField(
redshift=perturb_field.redshift,
cosmo_params=perturb_field.cosmo_params,
user_params=default_user_params,
)
assert pf.exists(tmpdirec)
pf.read(tmpdirec)
assert np.all(pf.density == perturb_field.density)
assert pf == perturb_field
def test_pf_new_seed(perturb_field, tmpdirec):
pf = wrapper.perturb_field(
redshift=perturb_field.redshift,
user_params=perturb_field.user_params,
random_seed=1,
)
# we didn't write it, and this has a different seed
assert not pf.exists(direc=tmpdirec)
assert pf.random_seed != perturb_field.random_seed
assert not np.all(pf.density == perturb_field.density)
def test_ib_new_seed(ionize_box_lowz, perturb_field_lowz, tmpdirec):
# this should fail because perturb_field has a seed set already, which isn't 1.
with pytest.raises(ValueError):
wrapper.ionize_box(
perturbed_field=perturb_field_lowz,
random_seed=1,
)
ib = wrapper.ionize_box(
cosmo_params=perturb_field_lowz.cosmo_params,
redshift=perturb_field_lowz.redshift,
user_params=perturb_field_lowz.user_params,
random_seed=1,
)
# we didn't write it, and this has a different seed
assert not ib.exists(direc=tmpdirec)
assert ib.random_seed != ionize_box_lowz.random_seed
assert not np.all(ib.xH_box == ionize_box_lowz.xH_box)
def test_st_new_seed(spin_temp, perturb_field, tmpdirec):
# this should fail because perturb_field has a seed set already, which isn't 1.
with pytest.raises(ValueError):
wrapper.spin_temperature(
perturbed_field=perturb_field,
random_seed=1,
)
st = wrapper.spin_temperature(
cosmo_params=spin_temp.cosmo_params,
user_params=spin_temp.user_params,
astro_params=spin_temp.astro_params,
flag_options=spin_temp.flag_options,
redshift=spin_temp.redshift,
random_seed=1,
)
# we didn't write it, and this has a different seed
assert not st.exists(direc=tmpdirec)
assert st.random_seed != spin_temp.random_seed
assert not np.all(st.Ts_box == spin_temp.Ts_box)
def test_st_from_z(perturb_field_lowz, spin_temp):
# This one has all the same parameters as the nominal spin_temp, but is evaluated
# with an interpolated perturb_field
st = wrapper.spin_temperature(
perturbed_field=perturb_field_lowz,
astro_params=spin_temp.astro_params,
flag_options=spin_temp.flag_options,
redshift=spin_temp.redshift, # Higher redshift
)
assert st != spin_temp
assert not np.all(st.Ts_box == spin_temp.Ts_box)
def test_ib_from_pf(perturb_field):
ib = wrapper.ionize_box(perturbed_field=perturb_field)
assert ib.redshift == perturb_field.redshift
assert ib.user_params == perturb_field.user_params
assert ib.cosmo_params == perturb_field.cosmo_params
def test_ib_from_z(default_user_params, perturb_field):
ib = wrapper.ionize_box(
redshift=perturb_field.redshift,
user_params=default_user_params,
regenerate=False,
)
assert ib.redshift == perturb_field.redshift
assert ib.user_params == perturb_field.user_params
assert ib.cosmo_params == perturb_field.cosmo_params
assert ib.cosmo_params is not perturb_field.cosmo_params
def test_ib_override_z(perturb_field):
with pytest.raises(ValueError):
wrapper.ionize_box(
redshift=perturb_field.redshift + 1,
perturbed_field=perturb_field,
)
def test_ib_override_z_heat_max(perturb_field):
# save previous z_heat_max
zheatmax = wrapper.global_params.Z_HEAT_MAX
wrapper.ionize_box(
redshift=perturb_field.redshift,
perturbed_field=perturb_field,
z_heat_max=12.0,
)
assert wrapper.global_params.Z_HEAT_MAX == zheatmax
def test_ib_bad_st(ic, redshift):
with pytest.raises(ValueError):
wrapper.ionize_box(redshift=redshift, spin_temp=ic)
def test_bt(ionize_box, spin_temp, perturb_field):
with pytest.raises(TypeError): # have to specify param names
wrapper.brightness_temperature(ionize_box, spin_temp, perturb_field)
# this will fail because ionized_box was not created with spin temperature.
with pytest.raises(ValueError):
wrapper.brightness_temperature(
ionized_box=ionize_box, perturbed_field=perturb_field, spin_temp=spin_temp
)
bt = wrapper.brightness_temperature(
ionized_box=ionize_box, perturbed_field=perturb_field
)
assert bt.cosmo_params == perturb_field.cosmo_params
assert bt.user_params == perturb_field.user_params
assert bt.flag_options == ionize_box.flag_options
assert bt.astro_params == ionize_box.astro_params
def test_coeval_against_direct(ic, perturb_field, ionize_box):
coeval = wrapper.run_coeval(perturb=perturb_field, init_box=ic)
assert coeval.init_struct == ic
assert coeval.perturb_struct == perturb_field
assert coeval.ionization_struct == ionize_box
def test_lightcone(lc, default_user_params, redshift, max_redshift):
assert lc.lightcone_redshifts[-1] >= max_redshift
assert np.isclose(lc.lightcone_redshifts[0], redshift, atol=1e-4)
assert lc.cell_size == default_user_params.BOX_LEN / default_user_params.HII_DIM
def test_lightcone_quantities(ic, max_redshift, perturb_field):
lc = wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=max_redshift,
lightcone_quantities=("dNrec_box", "density", "brightness_temp"),
global_quantities=("density", "Gamma12_box"),
)
assert hasattr(lc, "dNrec_box")
assert hasattr(lc, "density")
assert hasattr(lc, "global_density")
assert hasattr(lc, "global_Gamma12")
# dNrec is not filled because we're not doing INHOMO_RECO
assert lc.dNrec_box.max() == lc.dNrec_box.min() == 0
# density should be filled with not zeros.
assert lc.density.min() != lc.density.max() != 0
# Simply ensure that different quantities are not getting crossed/referred to each other.
assert lc.density.min() != lc.brightness_temp.min() != lc.brightness_temp.max()
# Raise an error since we're not doing spin temp.
with pytest.raises(ValueError):
wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=20.0,
lightcone_quantities=("Ts_box", "density"),
)
# And also raise an error for global quantities.
with pytest.raises(ValueError):
wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=20.0,
global_quantities=("Ts_box",),
)
def test_run_lf():
muv, mhalo, lf = wrapper.compute_luminosity_function(redshifts=[7, 8, 9], nbins=100)
assert np.all(lf[~np.isnan(lf)] > -30)
assert lf.shape == (3, 100)
# Check that memory is in-tact and a second run also works:
muv, mhalo, lf2 = wrapper.compute_luminosity_function(
redshifts=[7, 8, 9], nbins=100
)
assert lf2.shape == (3, 100)
assert np.allclose(lf2[~np.isnan(lf2)], lf[~np.isnan(lf)])
muv_minih, mhalo_minih, lf_minih = wrapper.compute_luminosity_function(
redshifts=[7, 8, 9],
nbins=100,
component=0,
flag_options={"USE_MINI_HALOS": True},
mturnovers=[7.0, 7.0, 7.0],
mturnovers_mini=[5.0, 5.0, 5.0],
)
assert np.all(lf_minih[~np.isnan(lf_minih)] > -30)
assert lf_minih.shape == (3, 100)
def test_coeval_st(ic, perturb_field):
coeval = wrapper.run_coeval(
init_box=ic,
perturb=perturb_field,
flag_options={"USE_TS_FLUCT": True},
)
assert isinstance(coeval.spin_temp_struct, wrapper.TsBox)
def _global_Tb(coeval_box):
assert isinstance(coeval_box, wrapper.Coeval)
global_Tb = coeval_box.brightness_temp.mean(dtype=np.float128).astype(np.float32)
assert np.isclose(global_Tb, coeval_box.brightness_temp_struct.global_Tb)
return global_Tb
def test_coeval_callback(ic, max_redshift, perturb_field):
lc, coeval_output = wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=max_redshift,
lightcone_quantities=("brightness_temp",),
global_quantities=("brightness_temp",),
coeval_callback=_global_Tb,
)
assert isinstance(lc, wrapper.LightCone)
assert isinstance(coeval_output, list)
assert len(lc.node_redshifts) == len(coeval_output)
assert np.allclose(
lc.global_brightness_temp, np.array(coeval_output, dtype=np.float32)
)
def test_coeval_callback_redshifts(ic, redshift, max_redshift, perturb_field):
coeval_callback_redshifts = np.array(
[max_redshift, max_redshift, (redshift + max_redshift) / 2, redshift],
dtype=np.float32,
)
lc, coeval_output = wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=max_redshift,
coeval_callback=lambda x: x.redshift,
coeval_callback_redshifts=coeval_callback_redshifts,
)
assert len(coeval_callback_redshifts) - 1 == len(coeval_output)
computed_redshifts = [
lc.node_redshifts[np.argmin(np.abs(i - lc.node_redshifts))]
for i in coeval_callback_redshifts[1:]
]
assert np.allclose(coeval_output, computed_redshifts)
def Heaviside(x):
return 1 if x > 0 else 0
def test_coeval_callback_exceptions(ic, redshift, max_redshift, perturb_field):
# should output warning in logs and not raise an error
lc, coeval_output = wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=max_redshift,
coeval_callback=lambda x: 1
/ Heaviside(x.redshift - (redshift + max_redshift) / 2),
coeval_callback_redshifts=[max_redshift, redshift],
)
# should raise an error
with pytest.raises(RuntimeError) as excinfo:
lc, coeval_output = wrapper.run_lightcone(
init_box=ic,
perturb=perturb_field,
max_redshift=max_redshift,
coeval_callback=lambda x: 1 / 0,
coeval_callback_redshifts=[max_redshift, redshift],
)
assert "coeval_callback computation failed on first trial" in str(excinfo.value)
def test_coeval_vs_low_level(ic):
coeval = wrapper.run_coeval(
redshift=20,
init_box=ic,
zprime_step_factor=1.1,
regenerate=True,
flag_options={"USE_TS_FLUCT": True},
write=False,
)
st = wrapper.spin_temperature(
redshift=20,
init_boxes=ic,
zprime_step_factor=1.1,
regenerate=True,
flag_options={"USE_TS_FLUCT": True},
write=False,
)
assert np.allclose(coeval.Tk_box, st.Tk_box)
assert np.allclose(coeval.Ts_box, st.Ts_box)
assert | np.allclose(coeval.x_e_box, st.x_e_box) | numpy.allclose |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
H = df_data
#df_data.to_excel('Synthetic_demand_pathflows/cX.xlsx')
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path8'}, inplace=True)
df_data.rename(columns={4:'Path14'}, inplace=True)
df_data.rename(columns={5:'Path3'}, inplace=True)
df_data.rename(columns={6:'BPA_wind'}, inplace=True)
df_data.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data.rename(columns={8:'Weekday'}, inplace=True)
df_data.rename(columns={9:'Salem_HDD'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
NWPaths_p= np.zeros((len(cX),num_lines))
NWPaths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name='jan_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='feb_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='mar_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='apr_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='may_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jun_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jul_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='aug_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='sep_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='oct_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='nov_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='dec_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
name='jan_reg_NW' + str(line)
locals()[name].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
name='feb_reg_NW' + str(line)
locals()[name].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
name='mar_reg_NW' + str(line)
locals()[name].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
name='apr_reg_NW' + str(line)
locals()[name].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
name='may_reg_NW' + str(line)
locals()[name].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
name='jun_reg_NW' + str(line)
locals()[name].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
name='jul_reg_NW' + str(line)
locals()[name].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
name='aug_reg_NW' + str(line)
locals()[name].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
name='sep_reg_NW' + str(line)
locals()[name].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
name='oct_reg_NW' + str(line)
locals()[name].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
name='nov_reg_NW' + str(line)
locals()[name].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
name='dec_reg_NW' + str(line)
locals()[name].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jan_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='feb_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='mar_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='apr_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='may_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jun_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jul_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='aug_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='sep_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='oct_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='nov_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='dec_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
NWPaths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
NWPaths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsNWPaths = export_residuals
###############################
# Other CA PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/OtherCA_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path61'}, inplace=True)
df_data.rename(columns={4:'Path42'}, inplace=True)
df_data.rename(columns={5:'Path24'}, inplace=True)
df_data.rename(columns={6:'Path45'}, inplace=True)
df_data.rename(columns={7:'BPA_wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
OtherCA_Paths_p= np.zeros((len(cX),num_lines))
OtherCA_Paths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
OtherCA_Paths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
OtherCA_Paths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsOtherCA_Paths = export_residuals
##########################
# PATH 65 & 66
##########################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/Path65_66_regression_data.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path65'}, inplace=True)
df_data.rename(columns={4:'Path66'}, inplace=True)
df_data.rename(columns={5:'Wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path65','Path66']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
Path65_66_p = np.zeros((len(cX),num_lines))
Path65_66_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'Wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'Wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'Wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'Wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'Wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'Wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'Wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'Wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'Wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'Wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'Wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'Wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
Path65_66_p[:,line_index] = predicted
Path65_66_y[:,line_index] = y.values
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
#
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
#R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
Residuals65_66 = export_residuals[730:,:]
#####################################################################
# Residual Analysis
#####################################################################
R = np.column_stack((ResidualsLoad,ResidualsNWPaths,ResidualsOtherCA_Paths,Residuals46,Residuals65_66))
rc = np.shape(R)
cols = rc[1]
mus = np.zeros((cols,1))
stds = np.zeros((cols,1))
R_w = np.zeros(np.shape(R))
sim_days = len(R_w)
#whiten residuals
for i in range(0,cols):
mus[i] = np.mean(R[:,i])
stds[i] = np.std(R[:,i])
R_w[:,i] = (R[:,i] - mus[i])/stds[i]
#Vector autoregressive model on residuals
model = VAR(R_w)
results = model.fit(1)
sim_residuals = np.zeros((sim_days,cols))
errors = np.zeros((sim_days,cols))
p = results.params
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,sim_days)
ys = np.zeros((cols,1))
# Generate cross correlated residuals
for i in range(0,sim_days):
for j in range(1,cols+1):
name='y' + str(j)
locals()[name]= p[0,j-1] + p[1,j-1]*y_seeds[0]+ p[2,j-1]*y_seeds[1]+ p[3,j-1]*y_seeds[2]+ p[4,j-1]*y_seeds[3]+ p[5,j-1]*y_seeds[4]+ p[6,j-1]*y_seeds[5]+ p[7,j-1]*y_seeds[6]+ p[8,j-1]*y_seeds[7]+ p[9,j-1]*y_seeds[8]+ p[10,j-1]*y_seeds[9]+ p[11,j-1]*y_seeds[10]+ p[12,j-1]*y_seeds[11]+ p[13,j-1]*y_seeds[12]+ p[13,j-1]*y_seeds[12]+ p[14,j-1]*y_seeds[13]+ p[15,j-1]*y_seeds[14]+E[i,j-1]
for j in range(1,cols+1):
name='y' + str(j)
y_seeds[j-1]=locals()[name]
sim_residuals[i,:] = [y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15]
for i in range(0,cols):
sim_residuals[:,i] = sim_residuals[:,i]*stds[i]*(1/np.std(sim_residuals[:,i])) + mus[i]
#validation
Y = np.column_stack((np.reshape(BPA_y[0:3*365],(1095,1)),np.reshape(SDGE_y[0:3*365],(1095,1)),np.reshape(SCE_y[0:3*365],(1095,1)),np.reshape(PGEV_y[0:3*365],(1095,1)),np.reshape(PGEB_y[0:3*365],(1095,1)),NWPaths_y,OtherCA_Paths_y,np.reshape(Path46_y[730:],(1095,1)),np.reshape(Path65_66_y[730:,:],(1095,2))))
combined_BPA = np.reshape(sim_residuals[:,0],(1095,1)) + np.reshape(BPA_p[0:3*365],(1095,1))
combined_SDGE = np.reshape(sim_residuals[:,1],(1095,1)) + np.reshape(SDGE_p[0:3*365],(1095,1))
combined_SCE = np.reshape(sim_residuals[:,2],(1095,1)) + np.reshape(SCE_p[0:3*365],(1095,1))
combined_PGEV = np.reshape(sim_residuals[:,3],(1095,1)) + np.reshape(PGEV_p[0:3*365],(1095,1))
combined_PGEB = np.reshape(sim_residuals[:,4],(1095,1)) + np.reshape(PGEB_p[0:3*365],(1095,1))
combined_Path8 = np.reshape(sim_residuals[:,5],(1095,1)) + np.reshape(NWPaths_p[:,0],(1095,1))
combined_Path14 = np.reshape(sim_residuals[:,6],(1095,1)) + np.reshape(NWPaths_p[:,1],(1095,1))
combined_Path3 = np.reshape(sim_residuals[:,7],(1095,1)) + np.reshape(NWPaths_p[:,2],(1095,1))
combined_Path61 = np.reshape(sim_residuals[:,8],(1095,1)) + np.reshape(OtherCA_Paths_p[:,0],(1095,1))
combined_Path42 = np.reshape(sim_residuals[:,9],(1095,1)) + np.reshape(OtherCA_Paths_p[:,1],(1095,1))
combined_Path24 = np.reshape(sim_residuals[:,10],(1095,1)) + np.reshape(OtherCA_Paths_p[:,2],(1095,1))
combined_Path45 = np.reshape(sim_residuals[:,11],(1095,1)) + np.reshape(OtherCA_Paths_p[:,3],(1095,1))
combined_Path46 = np.reshape(sim_residuals[:,12],(1095,1)) + np.reshape(Path46_p[730:],(1095,1))
combined_Path65 = np.reshape(sim_residuals[:,13],(1095,1)) + np.reshape(Path65_66_p[730:,0],(1095,1))
combined_Path66 = np.reshape(sim_residuals[:,14],(1095,1)) + np.reshape(Path65_66_p[730:,1],(1095,1))
combined = np.column_stack((combined_BPA,combined_SDGE,combined_SCE,combined_PGEV,combined_PGEB,combined_Path8,combined_Path14,combined_Path3,combined_Path61,combined_Path42,combined_Path24,combined_Path45,combined_Path46,combined_Path65,combined_Path66))
rc = np.shape(Y)
cols = rc[1]
names = ['BPA','SDGE','SCE','PGEV','PGEB','Path8','Path14','Path3','Path61','Path42','Path24','Path45','Path46','Path65','Path66']
#for n in names:
#
# n_index = names.index(n)
#
# plt.figure()
# plt.plot(combined[:,n_index],'r')
# plt.plot(Y[:,n_index],'b')
# plt.title(n)
#
##########################################################################################################################################################
#Simulating demand and path
#########################################################################################################################################################
#Sim Residual
simulation_length=len(sim_weather)
syn_residuals = np.zeros((simulation_length,cols))
errors = np.zeros((simulation_length,cols))
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,simulation_length)
ys = np.zeros((cols,1))
for i in range(0,simulation_length):
for n in range(0,cols):
ys[n] = p[0,n]
for m in range(0,cols):
ys[n] = ys[n] + p[m+1,n]*y_seeds[n]
ys[n] = ys[n] + E[i,n]
for n in range(0,cols):
y_seeds[n] = ys[n]
syn_residuals[i,:] = np.reshape([ys],(1,cols))
for i in range(0,cols):
syn_residuals[:,i] = syn_residuals[:,i]*stds[i]*(1/np.std(syn_residuals[:,i])) + mus[i]
##################################################
# PATH NW
##################################################
#This only uses BPA wind and hydro
col_nw_T =['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_nw_W =['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>']
num_cities = len(col_nw_T)
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T_F=(NW_sim_T * 9/5) +32
NW_sim_W =NW_sim_W *2.23694
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-NW_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,NW_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(NW_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(NW_sim_W,binary_HDD_sim)
#Need Month,Day,Year,8 14 3 BPA_wind,BPA_hydro
sim_BPA_hydro = pd.read_csv('PNW_hydro/FCRPS/Path_dams.csv',header=None)
sim_BPA_hydro=sim_BPA_hydro.values
sim_BPA_hydro=np.sum(sim_BPA_hydro,axis=1)/24
#What is the common length
effect_sim_year=int(len(sim_BPA_hydro)/365)
sim_month=sim_month[:len(sim_BPA_hydro)]
sim_day=sim_day[:len(sim_BPA_hydro)]
sim_year=sim_year[:len(sim_BPA_hydro)]
sim_dow= sim_dow[:len(sim_BPA_hydro)]
sim_wind_power=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_BPA_wind_power= sim_wind_power.loc[:,'BPA']/24
sim_wind_daily = np.zeros((effect_sim_year*365,1))
for i in range(0,effect_sim_year*365):
sim_wind_daily[i] = np.sum((sim_BPA_wind_power.loc[i*24:i*24+24]))
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path8'}, inplace=True)
df_data_sim.rename(columns={4:'Path14'}, inplace=True)
df_data_sim.rename(columns={5:'Path3'}, inplace=True)
df_data_sim.rename(columns={6:'BPA_wind'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data_sim.rename(columns={8:'Weekday'}, inplace=True)
df_data_sim.rename(columns={9:'Salem_HDD'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
upper = [1900,1500,1900]
lower = [-600,-900,-2200]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_NW' + str(line)
name_2='feb_reg_NW' + str(line)
name_3='mar_reg_NW' + str(line)
name_4='apr_reg_NW' + str(line)
name_5='may_reg_NW' + str(line)
name_6='jun_reg_NW' + str(line)
name_7='jul_reg_NW' + str(line)
name_8='aug_reg_NW' + str(line)
name_9='sep_reg_NW' + str(line)
name_10='oct_reg_NW' + str(line)
name_11='nov_reg_NW' + str(line)
name_12='dec_reg_NW' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path8=predicted_Path8+syn_residuals[:effect_sim_year*365,5]
syn_Path14=predicted_Path14+syn_residuals[:effect_sim_year*365,6]
syn_Path3=predicted_Path3+syn_residuals[:effect_sim_year*365,7]
bias = np.mean(syn_Path8) - np.mean(NWPaths_y[:,0])
syn_Path8 = syn_Path8 - bias
bias = np.mean(syn_Path14) - np.mean(NWPaths_y[:,1])
syn_Path14 = syn_Path14 - bias
bias = np.mean(syn_Path3) - np.mean(NWPaths_y[:,2])
syn_Path3 = syn_Path3 - bias
S = df_data_sim.values
HO = H.values
stats = np.zeros((69,4))
for i in range(0,69):
stats[i,0] = np.mean(S[:,i])
stats[i,1] = np.mean(HO[:,i])
stats[i,2] = np.std(S[:,i])
stats[i,3] = np.std(HO[:,i])
################################################################################
###################################################
## PATH 65 & 66
###################################################
col_6566_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_6566_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>_W']
num_cities = len(col_6566_T)
P6566_sim_T=sim_weather[col_6566_T].values
P6566_sim_W=sim_weather[col_6566_W].values
P6566_sim_W =P6566_sim_W*2.23694
sim_days = len(sim_weather)
P6566_sim_T_F=(P6566_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P6566_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P6566_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P6566_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P6566_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,syn_Path3,syn_Path8,syn_Path14,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path65'}, inplace=True)
df_data_sim.rename(columns={4:'Path66'}, inplace=True)
df_data_sim.rename(columns={5:'Wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path65','Path66']
upper = [3100,4300]
lower = [-2210,-500]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'Wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Wind':]
s = | np.reshape(s[:,None],(1,n)) | numpy.reshape |
import estimators
import matplotlib.pyplot as plt
import numpy as np
import argparse
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from time import time
def annStructureTest():
sample_list = [1e3, 1e4, 1e5]
estimator_list, descriptions = estimators.get()
figure, ax_list = plt.subplots(len(sample_list), len(
estimator_list), sharex=True, sharey=True)
figure.suptitle("ANN regression fit of $y=x^2$")
for sample_idx, num_samples in enumerate(sample_list):
X = np.linspace(-10, 10, num_samples).reshape(-1, 1)
y = np.ravel(np.square(X))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,
random_state=0)
ax_list[sample_idx, 0].set_ylabel(
str(len(X_train)) + " training samples")
for estimator_idx, est in enumerate(estimator_list):
print("Training with " + str(len(X_train)) +
" samples and " + descriptions[estimator_idx] + "...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
ax_list[sample_idx, estimator_idx].scatter(
X_test, y_est, color='r')
ax_list[sample_idx, estimator_idx].plot(X, y)
ax_list[sample_idx, estimator_idx].set_title(
descriptions[estimator_idx])
ax_list[sample_idx, estimator_idx].set_xlabel(
"$\epsilon_\mu=${:.2f} $\epsilon_{{max}}=${:.2f}".format(np.mean(err), np.max(err)))
plt.tight_layout()
def extrapolationTest():
num_samples = 1e5
est = MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True)
plt.figure()
plt.title(
"ANN regression fit of $y=x^2$, with extrapolation outside of [-10 10]")
X_train = np.linspace(-10, 10, num_samples).reshape(-1, 1)
y_train = np.ravel(np.square(X_train))
X_test = np.linspace(-100, 100, 10e3).reshape(-1, 1)
y_test = np.ravel(np.square(X_test))
plt.ylabel(str(len(X_train)) + " training samples")
print("Training with " + str(len(X_train)) +
" samples and 50 neurons, 2 layers...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
plt.scatter(
X_test, y_est, color='r')
plt.plot(X_test, y_test)
plt.xlabel(
"$\epsilon_\mu=${:.2f} $\epsilon_{{max}}=${:.2f}".format(np.mean(err), np.max(err)))
plt.tight_layout()
def interpolationTest():
num_samples = 1e5
est = MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True)
plt.figure()
plt.title(
"ANN regression fit of $y=x^2$, with void interpolation in [-10, 10]")
X_train = np.linspace(-100, 100, num_samples)
X_train = X_train[np.where(np.abs(X_train) > 10)].reshape(-1, 1)
y_train = np.ravel(np.square(X_train))
X_test = np.linspace(-100, 100, 10e3).reshape(-1, 1)
y_test = np.ravel( | np.square(X_test) | numpy.square |
# -*- coding: UTF-8 -*-
from ZUI_MDP_solution import *
from unittest import TestCase
import itertools as it
import numpy as np
import numpy.testing as nptest
# Taken from http://www.neuraldump.net/2017/06/how-to-suppress-python-unittest-warnings/.
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class TestGridWorld2x2(TestCase):
rtol = 1e-4 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('2x2')
def test_is_obstacle_at(self):
self.assertFalse(self.gw._is_obstacle([0, 0]))
self.assertFalse(self.gw._is_obstacle([0, 1]))
self.assertFalse(self.gw._is_obstacle([1, 0]))
self.assertFalse(self.gw._is_obstacle([1, 1]))
def test_is_on_grid_true(self):
self.assertTrue(self.gw._is_on_grid([0, 0]),msg='The point [{},{}] should be on the grid.'.format(0, 0))
self.assertTrue(self.gw._is_on_grid([1, 1]), msg='The point [{},{}] should be on the grid.'.format(1, 1))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [2,0], [0,2], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_2x2_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q_from_V_zeros(self):
V = np.zeros((self.gw.n_states + 1,))
desired_Q = np.array([[-0.04, -0.04, -0.04, -0.04],
[1., 1., 1., 1.],
[-0.04, -0.04, -0.04, -0.04],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_ones(self):
V = np.ones((self.gw.n_states + 1,))
desired_Q = np.array([[0.96, 0.96, 0.96, 0.96],
[2., 2., 2., 2.],
[0.96, 0.96, 0.96, 0.96],
[0., 0., 0., 0.],
[1., 1., 1., 1.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_init(self):
V = np.max(self.gw.rewards,axis=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.array([0.9178081, 1., 0.66027364, -1., 0., ])
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q,policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = self.gw.Q2policy(desired_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x3(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x3')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,3], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x3_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_3x3_V_single_iter.npy')
Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x4(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x4')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if i == 1 and j == 1:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,4], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x4_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = | np.load('./test_data/test_gw_3x4_V_single_iter.npy') | numpy.load |
import argparse
from design_search import RobotDesignEnv, make_graph, build_normalized_robot, presimulate, simulate
import mcts
import numpy as np
import os
import pyrobotdesign as rd
import random
import tasks
import time
class CameraTracker(object):
def __init__(self, viewer, sim, robot_idx):
self.viewer = viewer
self.sim = sim
self.robot_idx = robot_idx
self.reset()
def update(self, time_step):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
# Update camera position to track the robot smoothly
target_pos = 0.5 * (lower + upper)
camera_pos = self.viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
self.viewer.camera_params.position = camera_pos
def reset(self):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
self.viewer.camera_params.position = 0.5 * (lower + upper)
def run_trajectory(sim, robot_idx, input_sequence, task, step_callback):
step_callback(0)
for j in range(input_sequence.shape[1]):
for k in range(task.interval):
step_idx = j * task.interval + k
sim.set_joint_targets(robot_idx, input_sequence[:,j].reshape(-1, 1))
task.add_noise(sim, step_idx)
sim.step()
step_callback(step_idx + 1)
def view_trajectory(sim, robot_idx, input_sequence, task):
record_step_indices = set()
sim.save_state()
viewer = rd.GLFWViewer()
# Get robot bounds
lower = np.zeros(3)
upper = | np.zeros(3) | numpy.zeros |
"""Module containing low-level functions to classify gridded
radar / lidar measurements.
"""
from typing import List
import numpy as np
import skimage
from numpy import ma
from cloudnetpy import utils
from cloudnetpy.categorize import droplet, falling, freezing, insects, melting
from cloudnetpy.categorize.containers import ClassData, ClassificationResult
def classify_measurements(data: dict) -> ClassificationResult:
"""Classifies radar/lidar observations.
This function classifies atmospheric scatterers from the input data.
The input data needs to be averaged or interpolated to the common
time / height grid before calling this function.
Args:
data: Containing :class:`Radar`, :class:`Lidar`, :class:`Model` and :class:`Mwr` instances.
Returns:
A :class:`ClassificationResult` instance.
References:
The Cloudnet classification scheme is based on methodology proposed by
<NAME>. and <NAME>., 2004, https://bit.ly/2Yjz9DZ and its
proprietary Matlab implementation.
Notes:
Some of the individual classification methods are changed in this Python
implementation compared to the original Cloudnet methodology.
Especially methods classifying insects, melting layer and liquid droplets.
"""
obs = ClassData(data)
bits: List[np.ndarray] = [np.array([])] * 6
liquid = droplet.find_liquid(obs)
bits[3] = melting.find_melting_layer(obs)
bits[2] = freezing.find_freezing_region(obs, bits[3])
bits[0] = droplet.correct_liquid_top(obs, liquid, bits[2], limit=500)
bits[5] = insects.find_insects(obs, bits[3], bits[0])
bits[1] = falling.find_falling_hydrometeors(obs, bits[0], bits[5])
bits, filtered_ice = _filter_falling(bits)
for _ in range(5):
bits[3] = _fix_undetected_melting_layer(bits)
bits = _filter_insects(bits)
bits[4] = _find_aerosols(obs, bits[1], bits[0])
bits[4][filtered_ice] = False
return ClassificationResult(
_bits_to_integer(bits), obs.is_rain, obs.is_clutter, liquid["bases"], obs.rain_rate
)
def fetch_quality(data: dict, classification: ClassificationResult, attenuations: dict) -> dict:
"""Returns Cloudnet quality bits.
Args:
data: Containing :class:`Radar` and :class:`Lidar` instances.
classification: A :class:`ClassificationResult` instance.
attenuations: Dictionary containing keys `liquid_corrected`, `liquid_uncorrected`.
Returns:
Dictionary containing `quality_bits`, an integer array with the bits:
- bit 0: Pixel contains radar data
- bit 1: Pixel contains lidar data
- bit 2: Pixel contaminated by radar clutter
- bit 3: Molecular scattering present (currently not implemented!)
- bit 4: Pixel was affected by liquid attenuation
- bit 5: Liquid attenuation was corrected
- bit 6: Data gap in radar or lidar data
"""
bits: List[np.ndarray] = [np.ndarray([])] * 7
radar_echo = data["radar"].data["Z"][:]
bits[0] = ~radar_echo.mask
bits[1] = ~data["lidar"].data["beta"][:].mask
bits[2] = classification.is_clutter
bits[4] = attenuations["liquid_corrected"] | attenuations["liquid_uncorrected"]
bits[5] = attenuations["liquid_corrected"]
qbits = _bits_to_integer(bits)
return {"quality_bits": qbits}
def _find_aerosols(obs: ClassData, is_falling: np.ndarray, is_liquid: np.ndarray) -> np.ndarray:
"""Estimates aerosols from lidar backscattering.
Aerosols are lidar signals that are: a) not falling, b) not liquid droplets.
Args:
obs: A :class:`ClassData` instance.
is_falling: 2-D boolean array of falling hydrometeors.
is_liquid: 2-D boolean array of liquid droplets.
Returns:
2-D boolean array containing aerosols.
"""
is_beta = ~obs.beta.mask
return is_beta & ~is_falling & ~is_liquid
def _fix_undetected_melting_layer(bits: list) -> np.ndarray:
melting_layer = bits[3]
drizzle_and_falling = _find_drizzle_and_falling(*bits[:3])
transition = ma.diff(drizzle_and_falling, axis=1) == -1
melting_layer[:, 1:][transition] = True
return melting_layer
def _find_drizzle_and_falling(
is_liquid: np.ndarray, is_falling: np.ndarray, is_freezing: np.ndarray
) -> np.ndarray:
"""Classifies pixels as falling, drizzle and others.
Args:
is_liquid: 2D boolean array denoting liquid layers.
is_falling: 2D boolean array denoting falling pixels.
is_freezing: 2D boolean array denoting subzero temperatures.
Returns:
2D array where values are 1 (falling, drizzle, supercooled liquids),
2 (drizzle), and masked (all others).
"""
falling_dry = is_falling & ~is_liquid
supercooled_liquids = is_liquid & is_freezing
drizzle = falling_dry & ~is_freezing
drizzle_and_falling = falling_dry.astype(int) + drizzle.astype(int)
drizzle_and_falling = ma.copy(drizzle_and_falling)
drizzle_and_falling[supercooled_liquids] = 1
drizzle_and_falling[drizzle_and_falling == 0] = ma.masked
return drizzle_and_falling
def _bits_to_integer(bits: list) -> np.ndarray:
"""Creates array of integers from individual boolean arrays.
Args:
bits: List of bit fields (of similar sizes) to be saved in the resulting array of integers.
bits[0] is saved as bit 0, bits[1] as bit 1, etc.
Returns:
Array of integers containing the information of the individual boolean arrays.
"""
int_array = np.zeros_like(bits[0], dtype=int)
for n, bit in enumerate(bits):
ind = np.where(bit) # works also if bit is None
int_array[ind] = utils.setbit(int_array[ind].astype(int), n)
return int_array
def _filter_insects(bits: list) -> list:
is_melting_layer = bits[3]
is_insects = bits[5]
is_falling = bits[1]
# Remove above melting layer
above_melting = utils.ffill(is_melting_layer)
ind = np.where(is_insects & above_melting)
is_falling[ind] = True
is_insects[ind] = False
# remove around melting layer:
original_insects = np.copy(is_insects)
n_gates = 5
for x, y in zip(*np.where(is_melting_layer)):
try:
# change insects to drizzle below melting layer pixel
ind1 = np.arange(y - n_gates, y)
ind11 = np.where(original_insects[x, ind1])[0]
n_drizzle = sum(is_falling[x, :y])
if n_drizzle > 5:
is_falling[x, ind1[ind11]] = True
is_insects[x, ind1[ind11]] = False
else:
continue
# change insects on the right and left of melting layer pixel to drizzle
ind1 = np.arange(x - n_gates, x + n_gates + 1)
ind11 = np.where(original_insects[ind1, y])[0]
is_falling[ind1[ind11], y - 1 : y + 2] = True
is_insects[ind1[ind11], y - 1 : y + 2] = False
except IndexError:
continue
bits[1] = is_falling
bits[5] = is_insects
return bits
def _filter_falling(bits: list) -> tuple:
# filter falling ice speckle noise
is_freezing = bits[2]
is_falling = bits[1]
is_falling_filtered = skimage.morphology.remove_small_objects(is_falling, 10, connectivity=1)
is_filtered = is_falling & ~ | np.array(is_falling_filtered) | numpy.array |
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Description:
# Compresses and pads the weigths. It also calculates the scales and packs with the biases.
import math
from collections import namedtuple
from typing import Tuple
import numpy as np
from .api import NpuBlockTraversal
from .architecture_features import Accelerator
from .architecture_features import ArchitectureFeatures
from .data_type import DataType
from .errors import UnsupportedFeatureError
from .nn_graph import SchedulingStrategy
from .numeric_util import round_up
from .numeric_util import round_up_divide
from .operation import NpuBlockType
from .operation import Op
from .scaling import quantise_scale
from .scaling import reduced_quantise_scale
from .tensor import create_equivalence_id
from .tensor import TensorBlockTraversal
from .tensor import TensorFormat
from .tensor import TensorPurpose
from .tensor import TensorSubPurpose
from ethosu import mlw_codec
# Contains meta info for a weight compression. If two tensors have identical weight compression config,
# then they also will have identical compressed weights.
WeightCompressionConfig = namedtuple(
"WeightCompressionConfig", ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "value_id"]
)
def encode_weights(
accelerator: Accelerator,
weights_volume: np.ndarray,
dilation_xy: Tuple[int, int],
ifm_bitdepth: int,
ofm_block_depth: int,
is_depthwise: bool,
block_traversal: NpuBlockTraversal,
):
"""
Internal implementation of the public facing API to use weight encoding.
:param accelerator: architecture_features.Accelerator enum to pick the correct Ethos-U accelerator
:param weights_volume: numpy.ndarray in OHWI layout with a shape of four
:param dilation_xy: a two element tuple of dilation attributes in x,y dimension
:param ifm_bitdepth: the bitdepth of input feature map
:param ofm_block_depth: the depth of blocks for Ethos-U processing
:param is_depthwise: a boolean indicating these weights are used for a depthwise traversal
:param block_traversal: indicates how these weights are traversed on sub-kernel basis
:return: a bytearray of compressed weights
"""
# Check arg types
assert isinstance(accelerator, Accelerator)
assert isinstance(weights_volume, np.ndarray)
assert isinstance(dilation_xy, tuple)
assert isinstance(ifm_bitdepth, int)
assert isinstance(ofm_block_depth, int)
assert isinstance(is_depthwise, bool)
assert isinstance(block_traversal, NpuBlockTraversal)
# Checks for weight layout
assert len(weights_volume.shape) == 4, "weights ndarray should have a shape of 4"
# It cannot be both partkernel and depthwise
assert not (
is_depthwise and block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST
), "encode_weights :: partkernel and depthwise are mutually exclusive"
# Check valid values for dilation
assert dilation_xy[0] in (1, 2), "encode_weights :: dilation x should be 1 or 2 not {}".format(dilation_xy[0])
assert dilation_xy[1] in (1, 2), "encode_weights :: dilation y should be 1 or 2 not {}".format(dilation_xy[1])
ifm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ifm_ublock
ofm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ofm_ublock
raw_stream = generate_brick(
ifm_ublock=ifm_ublock,
ofm_ublock=ofm_ublock,
brick_weights=weights_volume,
ofm_block_depth=ofm_block_depth,
is_depthwise=is_depthwise,
is_partkernel=block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST,
ifm_bitdepth=ifm_bitdepth,
dilation=dilation_xy,
)
encoded_stream = encode(raw_stream)
return encoded_stream
def encode_bias(bias: np.int64, scale: int, shift: int):
"""
Internal implementation of public facing API to pack bias and scale values as required by the Ethos-U
:param bias: 64bit signed number that includes 40bit signed bias
:param scale: 32bit scale value
:param shift: 6bit shift value
:return: packed 80bit [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)]
"""
# Check arg types
assert isinstance(bias, np.int64)
assert isinstance(scale, int)
assert isinstance(shift, int)
assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range
assert 0 <= scale < (1 << 32) # unsigned 32-bit range
assert 0 <= shift < (1 << 6) # unsigned 6-bit range
data = bytearray(10)
data[0] = (bias >> (0 * 8)) & 0xFF
data[1] = (bias >> (1 * 8)) & 0xFF
data[2] = (bias >> (2 * 8)) & 0xFF
data[3] = (bias >> (3 * 8)) & 0xFF
data[4] = (bias >> (4 * 8)) & 0xFF
data[5] = (scale >> (0 * 8)) & 0xFF
data[6] = (scale >> (1 * 8)) & 0xFF
data[7] = (scale >> (2 * 8)) & 0xFF
data[8] = (scale >> (3 * 8)) & 0xFF
data[9] = shift & 0x3F
return data
def create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
# Note: for an ofm block only its depth is used in weight compression.
# And block depth > ofm depth gives same result as block depth == ofm depth
block_depth = min(ofm_block_depth, tens.quant_values.shape[-1])
return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, tens.value_id)
def set_storage_shape(tens):
# Sets the storage shape depending on the tensor's sub purpose
if tens.sub_purpose == TensorSubPurpose.DoubleBuffer and len(tens.compressed_values) > 2:
offset = 2 * np.amax([len(x) for x in tens.compressed_values])
assert offset % 16 == 0
else:
offset = tens.weight_compressed_offsets[-1]
tens.storage_shape = [1, 1, 1, offset]
class CompressedWeightCache:
# Contains weight compressions for all weight tensors in a graph
def __init__(self):
self.cache = {} # maps from WeightCompressionConfig to a tensor clone containing compressed weights
def get_tensor_with_same_compression(self, wcc):
return self.cache.get(wcc)
def add(self, tens):
# Adds the compressed weights from the tensor to the cache
wcc = tens.weight_compression_config
# Clone the tensor to make sure that nothing related to the weight compression is modified
tens_clone = tens.clone("_weights{}_{}".format(wcc.ofm_block_depth, wcc.ofm_depth_step))
self.cache[wcc] = tens_clone
def encode(weight_stream):
if len(weight_stream) == 0:
return []
assert np.amin(weight_stream) >= -255
assert np.amax(weight_stream) <= 255
# Encode flattened signed weight stream
compressed = mlw_codec.encode(weight_stream)
# pad with 0xFF as needed so the length of the weight stream
# is a multiple of 16
while (len(compressed) % 16) != 0:
compressed.append(0xFF)
return compressed
def generate_brick(
ifm_ublock, ofm_ublock, brick_weights, ofm_block_depth, is_depthwise, is_partkernel, ifm_bitdepth, dilation
):
decomp_h = ArchitectureFeatures.SubKernelMax.height // dilation[0]
decomp_w = ArchitectureFeatures.SubKernelMax.width // dilation[1]
# Expect weights formatted OHWI
ofm_depth = brick_weights.shape[-4]
ifm_depth = brick_weights.shape[-1]
kernel_width = brick_weights.shape[-2]
kernel_height = brick_weights.shape[-3]
# IFM block depth
if is_partkernel or (ifm_bitdepth == 16):
# IFM block depth is always 16 for part-kernel-first
ifm_block_depth = 16
elif ifm_bitdepth == 8:
ifm_block_depth = 32
else:
assert False
stream = []
# Top level striping - OFM blocks in the entire brick's depth
for ofm_block_z in range(0, ofm_depth, ofm_block_depth):
clipped_ofm_block_depth = min(ofm_block_depth, ofm_depth - ofm_block_z)
# IFM blocks required for the brick
for ifm_block_z in range(0, (1 if is_depthwise else ifm_depth), ifm_block_depth):
if is_depthwise:
clipped_ifm_block_depth = ifm_ublock.depth
else:
clipped_ifm_block_depth = (
min(ifm_block_depth, ifm_depth - ifm_block_z) if is_partkernel else ifm_block_depth
)
# Weight decomposition
# Subkernel Splitting (H)
for subkernel_y in range(0, kernel_height, decomp_h):
sub_height = min(kernel_height - subkernel_y, decomp_h)
# Subkernel splitting (W)
for subkernel_x in range(0, kernel_width, decomp_w):
sub_width = min(kernel_width - subkernel_x, decomp_w)
subkernel_elements = sub_width * sub_height
# Part kernel first works across the kernel H/W and needs padding
if is_partkernel:
if ifm_bitdepth == 16 and subkernel_elements % 2 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 2) * 2)
elif ifm_bitdepth == 8 and subkernel_elements % 4 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 4) * 4)
# Depthwise Conv requires multiple of 4 kernel elements in its weight block
# this is different from normal conv which is considered "weights depth-first"
elif is_depthwise:
subkernel_elements = int(math.ceil(subkernel_elements / 4.0) * 4)
ifm_block_depth_outer = clipped_ifm_block_depth if is_partkernel else 1
ifm_block_depth_inner = 1 if is_partkernel else clipped_ifm_block_depth
# IFM Ublocks in IFM-block over depth for part-kernel-first mode
# For depth-first IFM Ublocks are traversed after subkernel elements so this loop is ignored.
for ifm_ublk_outer in range(0, ifm_block_depth_outer, ifm_ublock.depth):
# OFM Ublocks in OFM-block over depth
for ofm_ublk in range(0, clipped_ofm_block_depth, ofm_ublock.depth):
# HW Kernel element traversal - cannot be a H/W loop due to element
# padding requirement on depthwise/part-kernel configurations
for element in range(subkernel_elements):
kx = element % sub_width
ky = element // sub_width
# IFM Ublocks in IFM-block over depth (only 1 ublock if depthwise)
# In case of part-kernel-first IFM Ublock traversal have already been handled
# and this loop is ignored.
for ifm_ublk_inner in range(0, ifm_block_depth_inner, ifm_ublock.depth):
# Feed OFM ublock elements
for ofm_ublock_z in range(ofm_ublock.depth):
# Source IFM ublock elements (only 1 element deep if depthwise)
for ifm_ublock_z in range(1 if is_depthwise else ifm_ublock.depth):
# Source position within the current subkernel
wx = subkernel_x + kx
wy = subkernel_y + ky
# Source IFM/OFM slices
ifm_ublk = ifm_ublk_inner + ifm_ublk_outer
ifm_z = ifm_block_z + ifm_ublk + ifm_ublock_z
ofm_z = ofm_block_z + ofm_ublk + ofm_ublock_z
if (ifm_z >= ifm_depth) or (ofm_z >= ofm_depth) or (ky >= sub_height):
stream.append(0)
else:
stream.append(brick_weights[ofm_z][wy][wx][ifm_z])
return stream
def core_deinterleave(hwio, core, ncores):
# Put weights back into OHWI
ohwi = np.transpose(hwio, (3, 0, 1, 2))
return ohwi[core : ohwi.shape[0] : ncores]
# Compress the weights
def compress_weights(arch, nng, tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
assert tens.purpose == TensorPurpose.Weights
# Check the weight cache
if nng.weight_cache is None:
nng.weight_cache = CompressedWeightCache()
wcc = create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation)
tens.weight_compression_config = wcc
# Reassign equivalence id such that tensors with same weight compression get identical equivalence ids,
# but tensors with the same values but different compression get different equivalence ids
tens.equivalence_id = create_equivalence_id(wcc)
tens_cached = nng.weight_cache.get_tensor_with_same_compression(wcc)
if tens_cached is not None:
# Cache hit, copy weights from the cache
tens.copy_compressed_weight_info(tens_cached)
set_storage_shape(tens)
return
# No cache hit, perform the compression
assert tens.quantization is not None
assert tens.quantization.scale_f32 is not None
assert tens.quantization.zero_point is not None
zero_point = tens.quantization.zero_point
quant_buf = tens.quant_values.astype(np.int64)
# Early zero-point correction
weights = quant_buf - zero_point
if len(weights.shape) == 2:
weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0)
compression_scales = []
compressed_offsets = []
encoded_streams = []
encoded_streams_substream_offsets = []
offset = 0
max_single_buffer_len = 0
ifm_bitdepth = tens.consumer_list[0].inputs[0].dtype.size_in_bits()
ifm_depth = weights.shape[-2]
if npu_block_type == NpuBlockType.ConvolutionDepthWise:
tens.block_traversal = TensorBlockTraversal.DepthWise
if npu_block_type == NpuBlockType.ConvolutionMxN:
# Determine which block traversal strategy has better DPU utilization
kernel_size = weights.shape[0] * weights.shape[1]
depth_utilization = weights.shape[2] / round_up(weights.shape[2], 32 if ifm_bitdepth == 8 else 16)
part_kernel_utilization = (weights.shape[2] / round_up(weights.shape[2], 8)) * (
kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
)
if part_kernel_utilization >= depth_utilization or ifm_depth <= 8:
# Part-kernel first is always better for ifm depths <= 8
tens.block_traversal = TensorBlockTraversal.PartKernelFirst
else:
tens.block_traversal = TensorBlockTraversal.DepthFirst
is_depthwise = tens.block_traversal == TensorBlockTraversal.DepthWise
if tens.block_traversal == TensorBlockTraversal.PartKernelFirst:
block_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
else:
block_traversal = NpuBlockTraversal.DEPTH_FIRST
if tens.consumer_list[0].type == Op.Conv2DBackpropInputSwitchedBias:
# Transpose Convoluion, reverse weights in H and W axes
weights = np.flip(weights, axis=(0, 1))
# Calculate brick size
brick_size = (weights.shape[0], weights.shape[1], weights.shape[2], min(tens.shape[-1], ofm_depth_step))
elements_in_brick = np.prod(brick_size)
# Slice weight stream up depth-ways into bricks and compress
full_ofm_depth = quant_buf.shape[-1]
for idx in range(0, full_ofm_depth, ofm_depth_step):
# Get the weights necessary for this brick
count = min(full_ofm_depth - idx, ofm_depth_step)
brick_weights = weights[:, :, :, idx : idx + count]
substream_offsets = [0]
encoded_stream = []
# For each core, deinterleave weights from the larger volume
# and generate separate compressed streams.
for core in range(0, min(arch.ncores, full_ofm_depth)):
core_weights = core_deinterleave(brick_weights, core, arch.ncores)
block_depth = (ofm_block_depth + arch.ncores - 1 - core) // arch.ncores
encoded_substream = []
if block_depth != 0:
encoded_substream = encode_weights(
accelerator=arch.accelerator_config,
weights_volume=core_weights,
dilation_xy=dilation,
ifm_bitdepth=ifm_bitdepth,
ofm_block_depth=block_depth,
is_depthwise=is_depthwise,
block_traversal=block_traversal,
)
encoded_stream.extend(encoded_substream)
substream_offsets.append(len(encoded_stream))
encoded_streams.append(encoded_stream)
encoded_streams_substream_offsets.append(substream_offsets)
# Remember maximum encoded length for DoubleBuffering
max_single_buffer_len = max(max_single_buffer_len, len(encoded_stream))
# Remember where we put it for linear addressing
compressed_offsets.append(offset)
offset += len(encoded_stream)
assert offset % 16 == 0
# Compression scale tracking
compression_scales.append(len(encoded_stream) / elements_in_brick)
# Track total length as last element of the offsets array
compressed_offsets.append(offset)
tens.weight_compression_scales = compression_scales
tens.weight_compressed_offsets = compressed_offsets
tens.compression_scale_for_worst_weight_stream = np.amax(compression_scales)
tens.storage_compression_scale = tens.bandwidth_compression_scale = np.average(compression_scales)
tens.compressed_values = encoded_streams
tens.compressed_values_substream_offsets = encoded_streams_substream_offsets
tens.brick_size = brick_size
set_storage_shape(tens)
nng.weight_cache.add(tens)
def calc_scales_and_pack_biases(tens, arch, ofm_depth_step, rescale_for_faf=False):
assert tens.purpose in [TensorPurpose.FeatureMap, TensorPurpose.FSBias]
assert tens.format == TensorFormat.NHWC
# the connected operator should expect a bias input unless it is a FullyConnected
assert tens.consumer_list[0].type.needs_bias()
# the input bias tensor is the same as that connected to the operator
bias_tens = tens.consumer_list[0].bias
assert tens is bias_tens
# the operator should only have a single output
assert len(tens.consumer_list[0].outputs) == 1
biases = tens.quant_values
first_consumer_op = tens.consumer_list[0]
ifm_dtype = first_consumer_op.inputs[0].dtype
ifm_scale = first_consumer_op.inputs[0].quantization.scale_f32
ofm_scale = first_consumer_op.get_output_quantization().scale_f32
weight_scales = first_consumer_op.inputs[1].quantization.scale_f32
# biases can have multiple consumers for rnn cells. if so, then check that they are all the same
for op in tens.consumer_list[1:]:
assert ifm_scale == op.inputs[0].quantization.scale_f32
assert ofm_scale == op.get_output_quantization().scale_f32
assert weight_scales == op.inputs[1].quantization.scale_f32
if not hasattr(weight_scales, "__iter__"):
# If weight_scales is not already an iterable make it into a list
weight_scales = [weight_scales]
# Convert scales to np.double (from np.float32) to conform to TensorFlow Lite which
# uses double during scaling calculations
# TensorFlow Lite casts the scales slightly differently for uint8 and int8
if not rescale_for_faf:
if ifm_dtype == DataType.uint8:
scales = [np.double(ifm_scale * weight_scale) / np.double(ofm_scale) for weight_scale in weight_scales]
elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
scales = [
(np.double(ifm_scale) * np.double(weight_scale)) / np.double(ofm_scale)
for weight_scale in weight_scales
]
else:
raise UnsupportedFeatureError(
"Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name)
)
else:
if ifm_dtype == DataType.uint8:
scales = [np.double(ifm_scale * weight_scale * 0x3000) for weight_scale in weight_scales]
elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
scales = [(np.double(ifm_scale * 0x3000) * | np.double(weight_scale) | numpy.double |
#!/usr/bin/env python
# Copyright (C) 2009-2010 <NAME> (<EMAIL>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Music structure segmentation using SI-PLCA
This module contains an implementation of the algorithm for music
structure segmentation described in [1]. It is based on
Shift-invariant Probabilistic Latent Component Analysis, a variant of
convolutive non-negative matrix factorization (NMF). See plca.py for
more details.
Examples
--------
>>> import segmenter
>>> wavfile = '/path/to/come_together.wav'
>>> rank = 4 # rank corresponds to the number of segments
>>> win = 60 # win controls the length of each chroma pattern
>>> niter = 200 # number of iterations to perform
>>> np.random.seed(123) # Make this reproduceable
>>> labels = segmenter.segment_wavfile(wavfile, win=win, rank=rank,
... niter=niter, plotiter=10)
INFO:plca:Iteration 0: divergence = 10.065992
INFO:plca:Iteration 50: divergence = 9.468196
INFO:plca:Iteration 100: divergence = 9.421632
INFO:plca:Iteration 150: divergence = 9.409279
INFO:root:Iteration 199: final divergence = 9.404961
INFO:segmenter:Removing 2 segments shorter than 32 frames
.. image::come_together-segmentation.png
>>> print labels
0.0000 21.7480 segment0
21.7480 37.7640 segment1
37.7640 55.1000 segment0
55.1000 76.1440 segment1
76.1440 95.1640 segment0
95.1640 121.2360 segment1
121.2360 158.5360 segment2
158.5360 180.8520 segment1
180.8520 196.5840 segment0
196.5840 255.8160 segment3
See Also
--------
segmenter.extract_features : Beat-synchronous chroma feature extraction
segmenter.segment_song : Performs segmentation
segmenter.evaluate_segmentation : Evaluate frame-wise segmentation
segmenter.convert_labels_to_segments : Generate HTK formatted list of segments
from frame-wise labels
plca.SIPLCA : Implementation of Shift-invariant PLCA
References
----------
[1] <NAME> and <NAME>. "Identifying Repeated Patterns in
Music Using Sparse Convolutive Non-Negative Matrix
Factorization". In Proc. International Conference on Music
Information Retrieval (ISMIR), 2010.
Copyright (C) 2009-2010 <NAME> <<EMAIL>>
LICENSE: This module is licensed under the GNU GPL. See COPYING for details.
"""
import logging
import numpy as np
import sys, os
from os.path import join, basename, splitext
import msaf
import msaf.input_output as io
from msaf.algorithms.interface import SegmenterInterface
# Local stuff
import plca
SAVE = False
saveto = '/Users/mitian/Documents/hg/phd-docs/thesis/analysis/plca'
logging.basicConfig(level=logging.INFO,
format='%(levelname)s %(name)s %(asctime)s '
'%(filename)s:%(lineno)d %(message)s')
logger = logging.getLogger('segmenter')
def segment_song(seq, rank=4, win=32, seed=None,
nrep=1, minsegments=3, maxlowen=10, maxretries=5,
uninformativeWinit=False, uninformativeHinit=True,
viterbi_segmenter=False, align_downbeats=False, **kwargs):
"""Segment the given feature sequence using SI-PLCA
Parameters
----------
seq : array, shape (F, T)
Feature sequence to segment.
rank : int
Number of patterns (unique segments) to search for.
win : int
Length of patterns in frames.
seed : int
Random number generator seed. Defaults to None.
nrep : int
Number of times to repeat the analysis. The repetition with
the lowest reconstrucion error is returned. Defaults to 1.
minsegments : int
Minimum number of segments in the output. The analysis is
repeated until the output contains at least `minsegments`
segments is or `maxretries` is reached. Defaults to 3.
maxlowen : int
Maximum number of low energy frames in the SIPLCA
reconstruction. The analysis is repeated if it contains too
many gaps. Defaults to 10.
maxretries : int
Maximum number of retries to perform if `minsegments` or
`maxlowen` are not satisfied. Defaults to 5.
uninformativeWinit : boolean
If True, `W` is initialized to have a flat distribution.
Defaults to False.
uninformativeHinit : boolean
If True, `H` is initialized to have a flat distribution.
Defaults to True.
viterbi_segmenter : boolean
If True uses uses the Viterbi algorithm to convert SIPLCA
decomposition into segmentation, otherwises uses the process
described in [1]. Defaults to False.
align_downbeats : boolean
If True, postprocess the SIPLCA analysis to find the optimal
alignments of the components of W with V. I.e. try to align
the first column of W to the downbeats in the song. Defaults
to False.
kwargs : dict
Keyword arguments passed to plca.SIPLCA.analyze. See
plca.SIPLCA for more details.
Returns
-------
labels : array, length `T`
Segment label for each frame of `seq`.
W : array, shape (`F`, `rank`, `win`)
Set of `F` x `win` shift-invariant basis functions found in `seq`.
Z : array, length `rank`
Set of mixing weights for each basis.
H : array, shape (`rank`, `T`)
Activations of each basis in time.
segfun : array, shape (`rank`, `T`)
Raw segmentation function used to generate segment labels from
SI-PLCA decomposition. Corresponds to $\ell_k(t)$ in [1].
norm : float
Normalization constant to make `seq` sum to 1.
Notes
-----
The experimental results reported in [1] were found using the
default values for all keyword arguments while varying kwargs.
"""
seq = seq.copy()
#logger.debug('Using random seed %s.', seed)
np.random.seed(seed)
if 'alphaWcutoff' in kwargs and 'alphaWslope' in kwargs:
kwargs['alphaW'] = create_sparse_W_prior((seq.shape[0], win),
kwargs['alphaWcutoff'],
kwargs['alphaWslope'])
del kwargs['alphaWcutoff']
del kwargs['alphaWslope']
F, T = seq.shape
if uninformativeWinit:
kwargs['initW'] = np.ones((F, rank, win)) / (F*win)
if uninformativeHinit:
kwargs['initH'] = np.ones((rank, T)) / T
outputs = []
for n in xrange(nrep):
outputs.append(plca.SIPLCA.analyze(seq, rank=rank, win=win, **kwargs))
div = [x[-1] for x in outputs]
W, Z, H, norm, recon, div = outputs[np.argmin(div)]
# Need to rerun segmentation if there are too few segments or
# if there are too many gaps in recon (i.e. H)
lowen = seq.shape[0] * np.finfo(float).eps
nlowen_seq = np.sum(seq.sum(0) <= lowen)
if nlowen_seq > maxlowen:
maxlowen = nlowen_seq
nlowen_recon = np.sum(recon.sum(0) <= lowen)
nretries = maxretries
while (len(Z) < minsegments or nlowen_recon > maxlowen) and nretries > 0:
nretries -= 1
#logger.debug('Redoing SIPLCA analysis (len(Z) = %d, number of '
#'low energy frames = %d).', len(Z), nlowen_recon)
outputs = []
for n in xrange(nrep):
outputs.append(plca.SIPLCA.analyze(seq, rank=rank, win=win,
**kwargs))
div = [x[-1] for x in outputs]
W, Z, H, norm, recon, div = outputs[np.argmin(div)]
nlowen_recon = np.sum(recon.sum(0) <= lowen)
if viterbi_segmenter:
segmentation_function = nmf_analysis_to_segmentation_using_viterbi_path
else:
segmentation_function = nmf_analysis_to_segmentation
labels, segfun = segmentation_function(seq, win, W, Z, H, **kwargs)
return labels, W, Z, H, segfun, norm
def create_sparse_W_prior(shape, cutoff, slope):
"""Constructs sparsity parameters for W (alphaW) to learn pattern length
Follows equation (6) in the ISMIR paper referenced in this
module's docstring.
"""
# W.shape is (ndim, nseg, nwin)
prior = np.zeros(shape[-1])
prior[cutoff:] = prior[0] + slope * np.arange(shape[-1] - cutoff)
alphaW = np.zeros((shape[0], 1, shape[-1]))
alphaW[:,:] = prior
return alphaW
def nmf_analysis_to_segmentation(seq, win, W, Z, H, min_segment_length=32,
use_Z_for_segmentation=True, **ignored_kwargs):
if not use_Z_for_segmentation:
Z = np.ones(Z.shape)
segfun = []
for n, (w,z,h) in enumerate(zip(np.transpose(W, (1, 0, 2)), Z, H)):
reconz = plca.SIPLCA.reconstruct(w, z, h)
score = np.sum(reconz, 0)
# Smooth it out
score = np.convolve(score, | np.ones(min_segment_length) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 13 13:59:45 2016
@author: sksuram
"""
import os,sys,numpy as np,matplotlib.pyplot as plt,sys
sys.path.append(r'E:\GitHub\JCAPDataProcess\AuxPrograms')
from fcns_io import *
from DBPaths import *
sys.path.append(r'E:\GitHub\JCAPDataProcess\AnalysisFunctions')
sys.path.append(r'E:\python-codes\CALTECH\datahandling')
from Analysis_Master import *
from datamanipulation import *
xyfolder=r'K:\experiments\xrds\Lan\Oxynitride\30801_TaLaON_postRTP\pmpy24p3_line\integration\bkg_subtraction'
plateidstr=xyfolder.split(os.sep)[-4].split('_')[0][0:-1]
Normalize=0.
write_udi=1
CompType='random' #options are 'random','calibrated','raw'
replace_xynan=False
replace_Int_lessthanzeros_val=1E-05
replace_Int_lessthanzeros=True
add_fn_str='_normalized' if Normalize else ''
udifl=os.path.join(xyfolder,r'udifl'+add_fn_str+'.txt')
fn=lambda x: os.path.join(xyfolder,x)
xyd={}
wl_CuKa=1.5406*0.1
replace_X_nan=50.; replace_Y_nan=47.23
xyfiles=filter(lambda x: os.path.splitext(x)[-1]=='.xy',map(fn,os.listdir(xyfolder)))
specinds=[]
xarr=[];yarr=[]
for xyind,xyfile in enumerate(xyfiles):
bxyf=os.path.basename(xyfile).split('.xy')[0]
xyfileinfo,xyd[xyind]=text2dict(xyfile,header_row=1,delimiter=' ',skip_footer=0,obscolnum=None,headerfromdata=False,header=['twoth','Inte'])
xarr+=[float(bxyf.split('pmpx')[-1].split('_')[0])]
yarr+=[float(bxyf.split('pmpy')[-1].split('_')[0])]
if 'specind' in bxyf:
specinds+=[int(bxyf.split('specind')[-1].split('_')[0])]
else:
specinds+=[int(xyind)]
xarr=np.array(xarr);yarr=np.array(yarr)
naninds_xarr=list(np.where(np.isnan(xarr))[0])
naninds_yarr=list(np.where(np.isnan(yarr))[0])
if replace_xynan:
xarr[naninds_xarr]=replace_X_nan
yarr[naninds_yarr]=replace_Y_nan
elif len(naninds_xarr)>0 or len(naninds_yarr)>0:
raise ValueError('Nans exist for positions, no replacement values are provided')
infofiled=importinfo(plateidstr)
udi_dict={}
udi_dict['ellabels']=[x for x in getelements_plateidstr(plateidstr) if x not in ['','O','Ar','N']]#,print_id=2 removed on 20161019 because no longer valid
udi_dict['X']=xarr
udi_dict['Y']=yarr
udi_dict['specind']=specinds
udi_dict['sample_no']=[]
udi_dict['plate_id']=[int(plateidstr)]
udi_dict['mX']=np.ones(np.shape(udi_dict['X']))*np.nan
udi_dict['mY']=np.ones(np.shape(udi_dict['Y']))*np.nan
getplatemappath_plateid(plateidstr)
pmap_path=getplatemappath_plateid(plateidstr)
pmpdl=readsingleplatemaptxt(pmap_path)
pmpx,pmpy,smplist=[[iter_d[fom] for iter_d in pmpdl] for fom in ['x','y','sample_no']]
udi_dict['sample_no']=[smplist[np.argmin((np.array(pmpx)-udi_dict['X'][row])**2+(np.array(pmpy)-udi_dict['Y'][row])**2)] for row in xrange(np.shape(udi_dict['X'])[0])]
intsn_twoth_range=np.arange(np.max([x['twoth'][0] for x in xyd.values()]),np.min([x['twoth'][-1] for x in xyd.values()]),0.005)
#intsn_twoth_range=np.arange(60.,np.min([x['twoth'][-1] for x in xyd.values()]),0.005)
for key in xyd.keys():
xyd[key]['interp_Inte']=np.interp(intsn_twoth_range,xyd[key]['twoth'],xyd[key]['Inte'])
Qarr=(4*np.pi*np.sin(np.radians(intsn_twoth_range/2.)))/(wl_CuKa)
Iarr=np.zeros([len(xyd.keys()),len(intsn_twoth_range)])
for idx,key in enumerate(sorted(xyd.keys())):
Iarr[idx,:]=xyd[key]['interp_Inte']
if replace_Int_lessthanzeros:
Iarr[idx,np.where(Iarr[idx,:]<0)[0]]=replace_Int_lessthanzeros_val
if Normalize:
for idx in xrange(np.shape(Iarr)[0]):
Iarr[idx,:]=Iarr[idx,:]/np.max(Iarr[idx,:])
udi_dict['Iarr']=Iarr
udi_dict['Q']=Qarr
udi_dict['Normalize']=False
if CompType=='random':
udi_dict['CompType']=CompType
num_samples=len(xarr)
comps=np.r_[[ | np.random.random(num_samples) | numpy.random.random |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or array-like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return np.linalg.solve(hm, sn), hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : integer
The maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
<NAME>, <NAME>, <NAME>. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[ | np.abs(mean_params) | numpy.abs |
r"""
Special Functions
.................
This following standard C99 math functions are available:
M_PI, M_PI_2, M_PI_4, M_SQRT1_2, M_E:
$\pi$, $\pi/2$, $\pi/4$, $1/\sqrt{2}$ and Euler's constant $e$
exp, log, pow(x,y), expm1, log1p, sqrt, cbrt:
Power functions $e^x$, $\ln x$, $x^y$, $e^x - 1$, $\ln 1 + x$,
$\sqrt{x}$, $\sqrt[3]{x}$. The functions expm1(x) and log1p(x)
are accurate across all $x$, including $x$ very close to zero.
sin, cos, tan, asin, acos, atan:
Trigonometry functions and inverses, operating on radians.
sinh, cosh, tanh, asinh, acosh, atanh:
Hyperbolic trigonometry functions.
atan2(y,x):
Angle from the $x$\ -axis to the point $(x,y)$, which is equal to
$\tan^{-1}(y/x)$ corrected for quadrant. That is, if $x$ and $y$ are
both negative, then atan2(y,x) returns a value in quadrant III where
atan(y/x) would return a value in quadrant I. Similarly for
quadrants II and IV when $x$ and $y$ have opposite sign.
fabs(x), fmin(x,y), fmax(x,y), trunc, rint:
Floating point functions. rint(x) returns the nearest integer.
NAN:
NaN, Not a Number, $0/0$. Use isnan(x) to test for NaN. Note that
you cannot use :code:`x == NAN` to test for NaN values since that
will always return false. NAN does not equal NAN! The alternative,
:code:`x != x` may fail if the compiler optimizes the test away.
INFINITY:
$\infty, 1/0$. Use isinf(x) to test for infinity, or isfinite(x)
to test for finite and not NaN.
erf, erfc, tgamma, lgamma: **do not use**
Special functions that should be part of the standard, but are missing
or inaccurate on some platforms. Use sas_erf, sas_erfc and sas_gamma
instead (see below). Note: lgamma(x) has not yet been tested.
Some non-standard constants and functions are also provided:
M_PI_180, M_4PI_3:
$\frac{\pi}{180}$, $\frac{4\pi}{3}$
SINCOS(x, s, c):
Macro which sets s=sin(x) and c=cos(x). The variables *c* and *s*
must be declared first.
square(x):
$x^2$
cube(x):
$x^3$
sas_sinx_x(x):
$\sin(x)/x$, with limit $\sin(0)/0 = 1$.
powr(x, y):
$x^y$ for $x \ge 0$; this is faster than general $x^y$ on some GPUs.
pown(x, n):
$x^n$ for $n$ integer; this is faster than general $x^n$ on some GPUs.
FLOAT_SIZE:
The number of bytes in a floating point value. Even though all
variables are declared double, they may be converted to single
precision float before running. If your algorithm depends on
precision (which is not uncommon for numerical algorithms), use
the following::
#if FLOAT_SIZE>4
... code for double precision ...
#else
... code for single precision ...
#endif
SAS_DOUBLE:
A replacement for :code:`double` so that the declared variable will
stay double precision; this should generally not be used since some
graphics cards do not support double precision. There is no provision
for forcing a constant to stay double precision.
The following special functions and scattering calculations are defined.
These functions have been tuned to be fast and numerically stable down
to $q=0$ even in single precision. In some cases they work around bugs
which appear on some platforms but not others, so use them where needed.
Add the files listed in :code:`source = ["lib/file.c", ...]` to your *model.py*
file in the order given, otherwise these functions will not be available.
polevl(x, c, n):
Polynomial evaluation $p(x) = \sum_{i=0}^n c_i x^i$ using Horner's
method so it is faster and more accurate.
$c = \{c_n, c_{n-1}, \ldots, c_0 \}$ is the table of coefficients,
sorted from highest to lowest.
p1evl(x, c, n):
Evaluate normalized polynomial $p(x) = x^n + \sum_{i=0}^{n-1} c_i x^i$
using Horner's method so it is faster and more accurate.
$c = \{c_{n-1}, c_{n-2} \ldots, c_0 \}$ is the table of coefficients,
sorted from highest to lowest.
sas_gamma(x):
Gamma function $\text{sas_gamma}(x) = \Gamma(x)$.
The standard math function, tgamma(x) is unstable for $x < 1$
on some platforms.
sas_gammaln(x):
log gamma function sas_gammaln\ $(x) = \log \Gamma(|x|)$.
The standard math function, lgamma(x), is incorrect for single
precision on some platforms.
sas_gammainc(a, x), sas_gammaincc(a, x):
Incomplete gamma function
sas_gammainc\ $(a, x) = \int_0^x t^{a-1}e^{-t}\,dt / \Gamma(a)$
and complementary incomplete gamma function
sas_gammaincc\ $(a, x) = \int_x^\infty t^{a-1}e^{-t}\,dt / \Gamma(a)$
sas_erf(x), sas_erfc(x):
Error function
$\text{sas_erf}(x) = \frac{2}{\sqrt\pi}\int_0^x e^{-t^2}\,dt$
and complementary error function
$\text{sas_erfc}(x) = \frac{2}{\sqrt\pi}\int_x^{\infty} e^{-t^2}\,dt$.
The standard math functions erf(x) and erfc(x) are slower and broken
on some platforms.
sas_J0(x):
Bessel function of the first kind $\text{sas_J0}(x)=J_0(x)$ where
$J_0(x) = \frac{1}{\pi}\int_0^\pi \cos(x\sin(\tau))\,d\tau$.
The standard math function j0(x) is not available on all platforms.
sas_J1(x):
Bessel function of the first kind $\text{sas_J1}(x)=J_1(x)$ where
$J_1(x) = \frac{1}{\pi}\int_0^\pi \cos(\tau - x\sin(\tau))\,d\tau$.
The standard math function j1(x) is not available on all platforms.
sas_JN(n, x):
Bessel function of the first kind and integer order $n$:
$\text{sas_JN}(n, x)=J_n(x)$ where
$J_n(x) = \frac{1}{\pi}\int_0^\pi \cos(n\tau - x\sin(\tau))\,d\tau$.
If $n$ = 0 or 1, it uses sas_J0(x) or sas_J1(x), respectively.
The standard math function jn(n, x) is not available on all platforms.
sas_Si(x):
Sine integral $\text{Si}(x) = \int_0^x \tfrac{\sin t}{t}\,dt$.
This function uses Taylor series for small and large arguments:
For large arguments,
.. math::
\text{Si}(x) \sim \frac{\pi}{2}
- \frac{\cos(x)}{x}
\left(1 - \frac{2!}{x^2} + \frac{4!}{x^4} - \frac{6!}{x^6} \right)
- \frac{\sin(x)}{x}
\left(\frac{1}{x} - \frac{3!}{x^3} + \frac{5!}{x^5} - \frac{7!}{x^7}\right)
For small arguments,
.. math::
\text{Si}(x) \sim x
- \frac{x^3}{3\times 3!} + \frac{x^5}{5 \times 5!} - \frac{x^7}{7 \times 7!}
+ \frac{x^9}{9\times 9!} - \frac{x^{11}}{11\times 11!}
sas_3j1x_x(x):
Spherical Bessel form
$\text{sph_j1c}(x) = 3 j_1(x)/x = 3 (\sin(x) - x \cos(x))/x^3$,
with a limiting value of 1 at $x=0$, where $j_1(x)$ is the spherical
Bessel function of the first kind and first order.
This function uses a Taylor series for small $x$ for numerical accuracy.
sas_2J1x_x(x):
Bessel form $\text{sas_J1c}(x) = 2 J_1(x)/x$, with a limiting value
of 1 at $x=0$, where $J_1(x)$ is the Bessel function of first kind
and first order.
gauss76.n, gauss76.z[i], gauss76.w[i]:
Points $z_i$ and weights $w_i$ for 76-point Gaussian quadrature, respectively,
computing $\int_{-1}^1 f(z)\,dz \approx \sum_{i=1}^{76} w_i\,f(z_i)$.
When translating the model to C, include 'lib/gauss76.c' in the source
and use :code:`GAUSS_N`, :code:`GAUSS_Z`, and :code:`GAUSS_W`.
Similar arrays are available in :code:`gauss20` for 20-point quadrature
and :code:`gauss150.c` for 150-point quadrature. By using
:code:`import gauss76 as gauss` it is easy to change the number of
points in the integration.
"""
# pylint: disable=unused-import
import numpy as np
# Functions to add to our standard set
from numpy import degrees, radians
# C99 standard math library functions
from numpy import exp, log, power as pow, expm1, log1p, sqrt, cbrt
from numpy import sin, cos, tan, arcsin as asin, arccos as acos, arctan as atan
from numpy import sinh, cosh, tanh, arcsinh as asinh, arccosh as acosh, arctanh as atanh
from numpy import arctan2 as atan2
from numpy import fabs, fmin, fmax, trunc, rint
from numpy import pi, nan, inf
from scipy.special import gamma as sas_gamma
from scipy.special import gammaln as sas_gammaln
from scipy.special import gammainc as sas_gammainc
from scipy.special import gammaincc as sas_gammaincc
from scipy.special import erf as sas_erf
from scipy.special import erfc as sas_erfc
from scipy.special import j0 as sas_J0
from scipy.special import j1 as sas_J1
from scipy.special import jn as sas_JN
# erf, erfc, tgamma, lgamma **do not use**
# C99 standard math constants
M_PI, M_PI_2, M_PI_4, M_SQRT1_2, M_E = np.pi, np.pi/2, np.pi/4, np.sqrt(0.5), np.e
NAN = nan
INFINITY = inf
# non-standard constants
M_PI_180, M_4PI_3 = M_PI/180, 4*M_PI/3
# can't do SINCOS in python; use "s, c = SINCOS(x)" instead
def SINCOS(x):
"""return sin(x), cos(x)"""
return sin(x), cos(x)
sincos = SINCOS
def square(x):
"""return x^2"""
return x*x
def cube(x):
"""return x^3"""
return x*x*x
def sas_sinx_x(x):
"""return sin(x)/x"""
from numpy import sinc as _sinc
return _sinc(x/M_PI)
def powr(x, y):
"""return x^y for x>0"""
return x**y
def pown(x, n):
"""return x^n for n integer"""
return x**n
FLOAT_SIZE = 8
def polevl(x, c, n):
"""return p(x) for polynomial p of degree n-1 with coefficients c"""
return np.polyval(c[:n], x)
def p1evl(x, c, n):
"""return x^n + p(x) for polynomial p of degree n-1 with coefficients c"""
return np.polyval(np.hstack(([1.], c))[:n], x)
def sas_Si(x):
"""return Si(x)"""
from scipy.special import sici
return sici(x)[0]
def sas_j1(x):
"""return j1(x)"""
if np.isscalar(x):
retvalue = (sin(x) - x* | cos(x) | numpy.cos |
import os
import torch
import cv2
import json
import time
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
from copy import deepcopy
from tqdm import tqdm
from config import system_configs
from PIL import Image, ImageGrab
while(True):
image = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = image.shape[0:2]
# gps_track = np.array([[[168,84,243]]]).repeat(600, axis=0).repeat(800, axis=1)
height, width, _ = image.shape
select_r = np.array(image[:,:,2]==168, dtype=np.uint8)
select_g = | np.array(image[:,:,1]==84, dtype=np.uint8) | numpy.array |
"""
climt/LICENSE
@mcgibbon
BSD License
Copyright (c) 2016, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cftime
import datetime
import numpy as np
from typing import Union, TypeVar
import xarray as xr
RAD_PER_DEG = np.pi / 180.0
T = TypeVar("T", xr.DataArray, np.ndarray, float)
def _ensure_units_of_degrees(da):
units = da.attrs.get("units", "").lower()
if "rad" in units:
return np.rad2deg(da).assign_attrs(units="degrees")
else:
return da
def cos_zenith_angle(
time: Union[T, datetime.datetime, cftime.DatetimeJulian], lon: T, lat: T,
) -> T:
"""
Cosine of sun-zenith angle for lon, lat at time (UTC).
If DataArrays are provided for the lat and lon arguments, their units will
be assumed to be in degrees, unless they have a units attribute that
contains "rad"; in that case they will automatically be converted to having
units of degrees.
Args:
time: time in UTC
lon: float or np.ndarray in degrees (E/W), or xr.DataArray
lat: float or np.ndarray in degrees (N/S), or xr.DataArray
Returns:
float, np.ndarray, or xr.DataArray
"""
if isinstance(lon, xr.DataArray):
lon = _ensure_units_of_degrees(lon)
lat = _ensure_units_of_degrees(lat)
return (
xr.apply_ufunc(
cos_zenith_angle,
time,
lon,
lat,
dask="parallelized",
output_dtypes=[np.float64],
)
.rename("cos_zenith_angle")
.assign_attrs(units="")
)
else:
lon_rad, lat_rad = lon * RAD_PER_DEG, lat * RAD_PER_DEG
return _star_cos_zenith(time, lon_rad, lat_rad)
def _days_from_2000(model_time):
"""Get the days since year 2000.
"""
date_type = type(np.asarray(model_time).ravel()[0])
if date_type not in [datetime.datetime, cftime.DatetimeJulian]:
raise ValueError(
f"model_time has an invalid date type. It must be either "
f"datetime.datetime or cftime.DatetimeJulian. Got {date_type}."
)
return _total_days(model_time - date_type(2000, 1, 1, 12, 0))
def _total_days(time_diff):
"""
Total time in units of days
"""
return | np.asarray(time_diff) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import os, sys
import pandas
from Bio import Phylo
import utility_functions_beast as beast_utils
import utility_functions_simulated_data as sim_utils
from plot_defaults import *
def read_treetime_results_dataset(fname):
"""
Read results of the TreeTime simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'R', 'R2_int']
df = pandas.read_csv(fname, names=columns)
#filter obviously failed simulations
df = df[[len(str(k)) > 10 for k in df.File]]
df = df[df.R > 0.1]
# some very basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def read_lsd_results_dataset(fname):
"""
Read results of the LSd simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'obj']
df = pandas.read_csv(fname, names=columns)
# Filter out obviously wrong data
df = df[[len(k) > 10 for k in df.File]]
#Some basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def create_lsd_tt_pivot(df, T_over_N=None, mean_or_median='median'):
if T_over_N is not None:
DF = df[ df["T"] / df["N"] == T_over_N ]
else:
DF = df
N_MUS = | np.unique(DF.Nmu) | numpy.unique |
# - * - coding: utf-8 - * -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
from ..signal import signal_smooth
from ..signal import signal_zerocrossings
def ecg_findpeaks(ecg_cleaned, sampling_rate=1000, method="neurokit", show=False):
"""Find R-peaks in an ECG signal.
Low-level function used by `ecg_peaks()` to identify R-peaks in an ECG signal using a different set of algorithms. See `ecg_peaks()` for details.
Parameters
----------
ecg_cleaned : list, array or Series
The cleaned ECG channel as returned by `ecg_clean()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
method : string
The algorithm to be used for R-peak detection. Can be one of 'neurokit' (default),
'pamtompkins1985', 'hamilton2002', 'christov2004', 'gamboa2008', 'elgendi2010', 'engzeemod2012', 'kalidas2017', 'martinez2003' or 'rodrigues2020'.
show : bool
If True, will return a plot to visualizing the thresholds used in the
algorithm. Useful for debugging.
Returns
-------
info : dict
A dictionary containing additional information, in this case the
samples at which R-peaks occur, accessible with the key "ECG_R_Peaks".
See Also
--------
ecg_clean, signal_fixpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
>>> cleaned = nk.ecg_clean(ecg, sampling_rate=1000)
>>> info = nk.ecg_findpeaks(cleaned)
>>> nk.events_plot(info["ECG_R_Peaks"], cleaned)
>>>
>>> # Different methods
>>> neurokit = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="neurokit"), method="neurokit")
>>> pantompkins1985 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="pantompkins1985"), method="pantompkins1985")
>>> hamilton2002 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="hamilton2002"), method="hamilton2002")
>>> christov2004 = nk.ecg_findpeaks(cleaned, method="christov2004")
>>> gamboa2008 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="gamboa2008"), method="gamboa2008")
>>> elgendi2010 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="elgendi2010"), method="elgendi2010")
>>> engzeemod2012 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="engzeemod2012"), method="engzeemod2012")
>>> kalidas2017 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="kalidas2017"), method="kalidas2017")
>>> martinez2003 = nk.ecg_findpeaks(cleaned, method="martinez2003")
>>>
>>> # Visualize
>>> nk.events_plot([neurokit["ECG_R_Peaks"],
pantompkins1985["ECG_R_Peaks"],
hamilton2002["ECG_R_Peaks"],
christov2004["ECG_R_Peaks"],
gamboa2008["ECG_R_Peaks"],
elgendi2010["ECG_R_Peaks"],
engzeemod2012["ECG_R_Peaks"],
kalidas2017["ECG_R_Peaks"]],
martinez2003["ECG_R_Peaks"]], cleaned)
References
--------------
- <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
- <NAME>, <NAME>, <NAME>, and <NAME>. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003, pages 259–262, 2003.
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
- <NAME> and <NAME>. A Real-Time QRS Detection Algorithm. In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
- <NAME>, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
# Try retrieving right column
if isinstance(ecg_cleaned, pd.DataFrame):
try:
ecg_cleaned = ecg_cleaned["ECG_Clean"]
except NameError:
try:
ecg_cleaned = ecg_cleaned["ECG_Raw"]
except NameError:
ecg_cleaned = ecg_cleaned["ECG"]
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
rpeaks = _ecg_findpeaks_neurokit(ecg_cleaned, sampling_rate,
show=show)
elif method in ["pantompkins", "pantompkins1985"]:
rpeaks = _ecg_findpeaks_pantompkins(ecg_cleaned, sampling_rate)
elif method in ["gamboa2008", "gamboa"]:
rpeaks = _ecg_findpeaks_gamboa(ecg_cleaned, sampling_rate)
elif method in ["ssf", "slopesumfunction", "zong", "zong2003"]:
rpeaks = _ecg_findpeaks_ssf(ecg_cleaned, sampling_rate)
elif method in ["hamilton", "hamilton2002"]:
rpeaks = _ecg_findpeaks_hamilton(ecg_cleaned, sampling_rate)
elif method in ["christov", "christov2004"]:
rpeaks = _ecg_findpeaks_christov(ecg_cleaned, sampling_rate)
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
rpeaks = _ecg_findpeaks_engzee(ecg_cleaned, sampling_rate)
elif method in ["elgendi", "elgendi2010"]:
rpeaks = _ecg_findpeaks_elgendi(ecg_cleaned, sampling_rate)
elif method in ["kalidas2017", "swt", "kalidas", "kalidastamil", "kalidastamil2017"]:
rpeaks = _ecg_findpeaks_kalidas(ecg_cleaned, sampling_rate)
elif method in ["martinez2003", "martinez"]:
rpeaks = _ecg_findpeaks_WT(ecg_cleaned, sampling_rate)
elif method in ["rodrigues2020", "rodrigues", "asi"]:
rpeaks = _ecg_findpeaks_rodrigues(ecg_cleaned, sampling_rate)
else:
raise ValueError("NeuroKit error: ecg_findpeaks(): 'method' should be "
"one of 'neurokit' or 'pamtompkins'.")
# Prepare output.
info = {"ECG_R_Peaks": rpeaks}
return info
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_findpeaks_neurokit(signal, sampling_rate=1000, smoothwindow=.1, avgwindow=.75,
gradthreshweight=1.5, minlenweight=0.4, mindelay=0.3,
show=False):
"""
All tune-able parameters are specified as keyword arguments. The `signal`
must be the highpass-filtered raw ECG with a lowcut of .5 Hz.
"""
if show is True:
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
# Compute the ECG's gradient as well as the gradient threshold. Run with
# show=True in order to get an idea of the threshold.
grad = np.gradient(signal)
absgrad = np.abs(grad)
smooth_kernel = int(np.rint(smoothwindow * sampling_rate))
avg_kernel = int(np.rint(avgwindow * sampling_rate))
smoothgrad = signal_smooth(absgrad, kernel="boxcar", size=smooth_kernel)
avggrad = signal_smooth(smoothgrad, kernel="boxcar", size=avg_kernel)
gradthreshold = gradthreshweight * avggrad
mindelay = int(np.rint(sampling_rate * mindelay))
if show is True:
ax1.plot(signal)
ax2.plot(smoothgrad)
ax2.plot(gradthreshold)
# Identify start and end of QRS complexes.
qrs = smoothgrad > gradthreshold
beg_qrs = np.where(np.logical_and(np.logical_not(qrs[0:-1]), qrs[1:]))[0]
end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0]
# Throw out QRS-ends that precede first QRS-start.
end_qrs = end_qrs[end_qrs > beg_qrs[0]]
# Identify R-peaks within QRS (ignore QRS that are too short).
num_qrs = min(beg_qrs.size, end_qrs.size)
min_len = | np.mean(end_qrs[:num_qrs] - beg_qrs[:num_qrs]) | numpy.mean |
#!/usr/bin/env python3
# extract srt form of subtitles from dji movie (caption setting needs
# to be turned on when movie is recorded)
#
# ffmpeg -txt_format text -i input_file.MOV output_file.srt
import argparse
import cv2
import datetime
import skvideo.io # pip3 install scikit-video
import math
import fractions
import json
from matplotlib import pyplot as plt
import numpy as np
import os
import pyexiv2
import re
import sys
from scipy import interpolate # strait up linear interpolation, nothing fancy
from auracore import wgs84
from aurauas_flightdata import flight_loader, flight_interp
from props import PropertyNode
import props_json
import djilog
parser = argparse.ArgumentParser(description='extract and geotag dji movie frames.')
parser.add_argument('--video', required=True, help='input video')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='down',
help='approximate camera mounting orientation')
parser.add_argument('--interval', type=float, default=1.0, help='extraction interval')
parser.add_argument('--distance', type=float, help='max extraction distance interval')
parser.add_argument('--start-time', type=float, help='begin frame grabbing at this time.')
parser.add_argument('--end-time', type=float, help='end frame grabbing at this time.')
parser.add_argument('--start-counter', type=int, default=1, help='first image counter')
parser.add_argument('--ground', type=float, help='ground altitude in meters')
parser.add_argument('--djicsv', help='name of dji exported csv log file from the flight, see https://www.phantomhelp.com/logviewer/upload/')
args = parser.parse_args()
r2d = 180.0 / math.pi
match_ratio = 0.75
scale = 0.4
filter_method = 'homography'
tol = 3.0
overlap = 0.25
djicsv = djilog.djicsv()
djicsv.load(args.djicsv)
class Fraction(fractions.Fraction):
"""Only create Fractions from floats.
>>> Fraction(0.3)
Fraction(3, 10)
>>> Fraction(1.1)
Fraction(11, 10)
"""
def __new__(cls, value, ignore=None):
"""Should be compatible with Python 2.6, though untested."""
return fractions.Fraction.from_float(value).limit_denominator(99999)
def dms_to_decimal(degrees, minutes, seconds, sign=' '):
"""Convert degrees, minutes, seconds into decimal degrees.
>>> dms_to_decimal(10, 10, 10)
10.169444444444444
>>> dms_to_decimal(8, 9, 10, 'S')
-8.152777777777779
"""
return (-1 if sign[0] in 'SWsw' else 1) * (
float(degrees) +
float(minutes) / 60 +
float(seconds) / 3600
)
def decimal_to_dms(decimal):
"""Convert decimal degrees into degrees, minutes, seconds.
>>> decimal_to_dms(50.445891)
[Fraction(50, 1), Fraction(26, 1), Fraction(113019, 2500)]
>>> decimal_to_dms(-125.976893)
[Fraction(125, 1), Fraction(58, 1), Fraction(92037, 2500)]
"""
remainder, degrees = math.modf(abs(decimal))
remainder, minutes = math.modf(remainder * 60)
return [Fraction(n) for n in (degrees, minutes, remainder * 60)]
# find affine transform between matching keypoints in pixel
# coordinate space. fullAffine=True means unconstrained to
# include best warp/shear. fullAffine=False means limit the
# matrix to only best rotation, translation, and scale.
def findAffine(src, dst, fullAffine=False):
affine_minpts = 7
#print("src:", src)
#print("dst:", dst)
if len(src) >= affine_minpts:
# affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
else:
affine = None
#print str(affine)
return affine
def decomposeAffine(affine):
if affine is None:
return (0.0, 0.0, 0.0, 1.0, 1.0)
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_deg = math.atan2(-b,a) * 180.0/math.pi
if rotate_deg < -180.0:
rotate_deg += 360.0
if rotate_deg > 180.0:
rotate_deg -= 360.0
return (rotate_deg, tx, ty, sx, sy)
def filterMatches(kp1, kp2, matches):
mkp1, mkp2 = [], []
idx_pairs = []
used = np.zeros(len(kp2), np.bool_)
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * match_ratio:
#print " dist[0] = %d dist[1] = %d" % (m[0].distance, m[1].distance)
m = m[0]
# FIXME: ignore the bottom section of movie for feature detection
#if kp1[m.queryIdx].pt[1] > h*0.75:
# continue
if not used[m.trainIdx]:
used[m.trainIdx] = True
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
idx_pairs.append( (m.queryIdx, m.trainIdx) )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs, idx_pairs, mkp1
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = None
status = | np.ones(total) | numpy.ones |
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from kartothek.core.common_metadata import make_meta, read_schema_metadata
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import (
create_empty_dataset_header,
store_dataframes_as_dataset,
write_single_partition,
)
from kartothek.io.testing.write import * # noqa: F40
from kartothek.io_components.metapartition import MetaPartition
def _store_dataframes(dfs, **kwargs):
# Positional arguments in function but `None` is acceptable input
for kw in ("dataset_uuid", "store"):
if kw not in kwargs:
kwargs[kw] = None
return store_dataframes_as_dataset(dfs=dfs, **kwargs)
@pytest.fixture()
def bound_store_dataframes():
return _store_dataframes
def test_write_single_partition(store_factory, mock_uuid, metadata_version):
create_empty_dataset_header(
store=store_factory(),
schema=pd.DataFrame({"col": [1]}),
dataset_uuid="some_dataset",
metadata_version=metadata_version,
table_name="table1",
)
new_data = pd.DataFrame({"col": [1, 2]})
keys_in_store = set(store_factory().keys())
new_mp = write_single_partition(
store=store_factory,
dataset_uuid="some_dataset",
data=new_data,
table_name="table1",
)
keys_in_store.add("some_dataset/table1/auto_dataset_uuid.parquet")
assert set(store_factory().keys()) == keys_in_store
expected_mp = MetaPartition(
label="auto_dataset_uuid", # this will be a hash of the input
file="some_dataset/table1/auto_dataset_uuid.parquet",
metadata_version=4,
schema=make_meta(pd.DataFrame({"col": [1, 2]}), origin="table1"),
)
assert new_mp == expected_mp
with pytest.raises(ValueError):
# col is an integer column so this is incompatible.
new_data = pd.DataFrame({"col": [datetime.date(2010, 1, 1)]})
write_single_partition(
store=store_factory,
dataset_uuid="some_dataset",
data=new_data,
table_name="table1",
)
def test_create_dataset_header_minimal_version(store, metadata_storage_format):
with pytest.raises(NotImplementedError):
create_empty_dataset_header(
store=store,
schema=pd.DataFrame({"col": [1]}),
dataset_uuid="new_dataset_uuid",
metadata_storage_format=metadata_storage_format,
metadata_version=3,
)
create_empty_dataset_header(
store=store,
schema=pd.DataFrame({"col": [1]}),
dataset_uuid="new_dataset_uuid",
metadata_storage_format=metadata_storage_format,
metadata_version=4,
)
def test_create_dataset_header(store, metadata_storage_format, frozen_time):
schema = make_meta(pd.DataFrame({"col": [1]}), origin="1")
new_dataset = create_empty_dataset_header(
store=store,
schema=schema,
dataset_uuid="new_dataset_uuid",
metadata_storage_format=metadata_storage_format,
metadata_version=4,
)
expected_dataset = DatasetMetadata(
uuid="new_dataset_uuid",
metadata_version=4,
explicit_partitions=False,
schema=schema,
)
assert new_dataset == expected_dataset
storage_keys = list(store.keys())
assert len(storage_keys) == 2
loaded = DatasetMetadata.load_from_store(store=store, uuid="new_dataset_uuid")
assert loaded == expected_dataset
# If the read succeeds, the schema is written
read_schema_metadata(dataset_uuid=new_dataset.uuid, store=store)
# TODO: move `store_dataframes_as_dataset` tests to generic tests or remove if redundant
def test_store_dataframes_as_dataset_no_pipeline_partition_on(store):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
dataset = store_dataframes_as_dataset(
store=store,
dataset_uuid="dataset_uuid",
dfs=[df],
partition_on="P",
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 10
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_partition_on_inconsistent(store):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": | np.arange(0, 10) | numpy.arange |
import os
import sys
import copy
import time
import textwrap as tw
import itertools as it
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import parser
import filehandle as fh
def compress_size (size):
""" Compress binary array into integers comprising bits """
sim_t = np.bool8
sim_bits = 1
# Computes the best type to compress boolean data with `size`
for i in it.count():
nbytes = 1 << i
nbits = nbytes << 3
# If size cannot fit into bits
if size & (nbits - 1) != 0:
break
try:
# Create numpy dtype
sim_t = np.dtype("u{}".format(nbytes))
# Save new bits per type element, e.g., 4 for an integer
sim_bits = nbits
except TypeError:
break
return sim_t, sim_bits
def binary_combinations (size):
""" Build all binary combinations of `size` bits """
exp_size = 1 << size
dtype, dbits = compress_size(exp_size)
numbers = (exp_size + dbits - 1) // dbits
result = np.empty(( size, numbers ), dtype)
vals = np.arange(exp_size)
temp = np.empty_like(vals)
for i in range(size - 1, -1, -1):
np.bitwise_and(1 << i, vals, temp)
compress(temp, dtype, dbits, result[i])
return result
def compress (data, dtype, dbits, out):
# Convert only if type is not boolean
if dtype is not np.bool8:
numbers = (data.shape[0] + dbits - 1) // dbits
data = data.reshape(numbers, dbits)
bits_shift = np.arange(dbits, dtype = dtype)
np.left_shift(data > 0, bits_shift, out = data)
out = np.bitwise_or.reduce(data, axis = 1, out = out)
else:
out[:] = data
dgen = np.int64
class CGPException (Exception):
""" Class to represent a CGP Exception """
pass
class CGP (object):
""" Class to represent the Cartesian Genetic Programming """
__slots__ = [
"__p_find_active_loop", "__p_simulate_loop", "__p_energy_loop",
"__p_arr_nodes", "__p_arr_cols", "__p_arr_size",
"__p_funcs", "__p_arities", "__p_ffmts", "__p_famap", "__p_feval",
"__p_integer_mul", "__p_integer_add", "__p___sims_data",
"__seed", "__random", "__popsize", "__fnames", "__arity",
"__ni", "__no", "__nc", "__gen", "__act", "__fit", "__generation",
"__inp_data", "__exp_data", "__best_curve", "__best_gener",
"__better", "__worse", "__equal", "__same", "__times", "__time",
"__module", "neutral_drift", "mutate_gene"
]
def init_properties (self):
""" Initialize CGP's properties """
self.__p_find_active_loop = self.__p_simulate_loop = None
self.__p_energy_loop = self.__p_arr_nodes = self.__p_arr_cols = None
self.__p_arr_size = self.__p_funcs = self.__p_arities = None
self.__p_ffmts = self.__p_famap = self.__p_feval = None
self.__p_integer_mul = self.__p_integer_add = None
self.__p___sims_data = None
def __init__ (self, popsize, gates = (), neutral_drift = True, seed = 0):
""" Constructs CGP """
self.init_properties()
self.__seed = seed
self.__popsize = popsize
self.__fnames = np.sort(gates)
self.__fnames.setflags(write = False)
self.__arity = self.arities.max()
self.__module = "circuit"
self.neutral_drift = neutral_drift
def random_genotype (self, amount = 1):
""" Generate a random genotype """
return self.to_integer(self.__random.random_sample(
( amount, self.nodes_size + self.no )
))
def setup_one_lambda (self, c_seed, kappa, mutation):
""" Setup 1+lambda ES """
self.__module, cinps, chromo, couts = parser.convert(c_seed)
self.__ni = len(cinps)
self.__no = len(couts)
self.__nc = int(np.ceil(len(chromo) * kappa))
self.mutate_gene = mutation / (self.nc + self.no)
# Generate one base random genotype
self.__gen = self.random_genotype(1)
id_map = np.arange(self.ni + len(chromo))
funs, args, outs = self.break_genotypes(self.__gen)
gates = { name : i for i, name in enumerate(self.fnames) }
# Randomly distributes the original nodes into the random genotype
pieces = np.zeros(self.nc, np.bool8)
pieces[ : len(chromo) ] = True
self.__random.shuffle(pieces)
cpos = 0
for pos in np.flatnonzero(pieces):
cfun, *cargs = chromo[cpos]
if cfun not in gates:
raise CGPException("Seed circuit has unselected "
"function `{}`.".format(cfun))
funs[ ... , pos ] = gates[cfun]
args[ ... , pos , : len(cargs)] = id_map[cargs]
id_map[cpos + self.ni] = pos + self.ni
cpos += 1
outs[:] = id_map[couts]
self.__act = self.find_active(self.__gen)
# Generate combinations
self.__inp_data = binary_combinations(self.ni)
self.__inp_data.setflags(write = False)
self.simulate(self.__gen, self.__act, self.__sims_data[ : 1 ])
self.__exp_data = self.__sims_data[ 0 , -self.no : ].copy()
self.__exp_data.setflags(write = False)
self.__fit = self.get_fitness(self.__gen, self.__act, self.__sims_data[ : 1 ])
def setup (self, c_seed, kappa = 1.0, mutation = 1.0, log = fh.log):
""" Setup CGP """
start_time = time.time()
self.__random = np.random.RandomState(self.__seed)
self.__generation = 0
self.setup_one_lambda(c_seed, kappa, mutation)
self.__best_gener = []
self.__best_curve = []
self.__better = []
self.__equal = []
self.__worse = []
self.__same = []
self.__times = []
self.__time = 0.0
rtime = time.time() - start_time
self.store_data(0, 0, 0, 0, rtime)
self.print_debug(log)
def store_data (self, bet, equ, wor, same, rtime):
""" Stores newly generated data into the class """
best_fit = self.best_fit
if not self.__best_curve or best_fit != self.__best_curve[-1]:
# Add to curve iff changed
self.__best_curve.append(best_fit)
self.__best_gener.append(self.generation)
self.__better.append(bet)
self.__equal.append(equ)
self.__worse.append(wor)
self.__same.append(same)
self.__times.append(rtime)
self.__time += rtime
def one_lambda (self):
""" Run 1+lambda ES """
# Get current best
best_gen, best_act, best_fit = self.best
# Storage for new individuals
off_gen = np.empty(( self.popsize, self.genotype_size ), best_gen.dtype)
off_act = np.empty(self.popsize, np.object)
off_fit = np.empty(self.popsize, np.object)
same = 0
sames = np.zeros(self.popsize, np.bool8)
diff = []
# Generate individuals
for pos in range(self.popsize):
gen = self.mutation(best_gen)
off_gen[pos] = gen
if self.same_critical(best_gen, best_act, gen):
off_act[pos] = best_act
off_fit[pos] = best_fit
sames[pos] = True
same += 1
else:
diff.append(pos)
# Evaluate different individuals
if diff:
diff = np.array(diff)
gen = off_gen[diff]
act = self.find_active(gen)
sim = self.simulate(gen, act, self.__sims_data[ : len(diff) ])
fit = self.get_fitness(gen, act, sim)
for i, pos in enumerate(diff):
off_act[pos] = act[i]
off_fit[pos] = fit[i]
pos = None
better = equal = worse = 0
new_fit = best_fit
# Compare individuals to parent
for i, fit in enumerate(off_fit):
if fit < best_fit:
better += 1
elif best_fit < fit:
worse += 1
else:
equal += 1
change = fit <= new_fit if self.neutral_drift else fit < new_fit
if change:
pos = i
new_fit = fit
# Update best individual
if pos is not None:
self.__gen = off_gen[pos][np.newaxis]
if not sames[pos]:
self.__act = off_act[pos][np.newaxis]
self.__fit = off_fit[pos][np.newaxis]
return better, equal, worse, same
def run (self, merge, on_change, every, log = fh.log):
""" Run 1 generation """
self.__generation += 1
start_time = time.time()
bet, equ, wor, same = self.one_lambda()
rtime = time.time() - start_time
# Save generation data
self.store_data(bet, equ, wor, same, rtime)
debug = True
if every != 0:
# Test if it requires printing
changed = self.__best_gener[-1] == self.generation
debug = (on_change and changed) or (self.generation % every == 0)
else:
# Test if it requires to go back to previous line
back = merge == 0 or (self.generation - 1) % merge != 0
if on_change and back:
back = self.__best_gener[-1] != (self.generation - 1)
if back:
print("\033[F\033[K", end = "", file = log)
if debug:
# Print debug message
self.print_debug(log)
def run_until (self, stop, merge, on_change, every, log = fh.log):
""" Run until stop criteria is met """
merge = max(merge, 0)
while not stop(self):
self.run(merge, on_change, every, log = log)
def to_integer (self, rea, mul = None, add = None):
""" Convert a floating point individual to integer """
mul = self.integer_mul if mul is None else mul
add = self.integer_add if add is None else add
gen = np.empty(rea.shape, dgen)
np.multiply(rea, mul, casting = "unsafe", out = gen)
return np.add(gen, add, out = gen)
def break_genotypes (self, gens):
""" Break genotypes into functions, arguments, and outputs """
shape = gens.shape[ : -1 ]
nod = gens[ ... , : -self.no ].reshape(*shape, -1, self.arity + 1)
out = gens[ ... , -self.no : ]
fun = nod[ ... , 0 ]
arg = nod[ ... , 1 : ]
return fun, arg, out
@property
def find_active_loop (self):
""" Build a numba compiled loop to find active nodes """
if self.__p_find_active_loop is None:
def __find_active_loop (active, active_nod, nod):
""" Compiled loop to find active nodes """
for i in range(active.shape[0]):
old_found = 0
nodi = nod[i]
activei = active[i]
active_nodi = active_nod[i]
# Iterate while there are changes
while True:
new_active = nodi[active_nodi].reshape(-1)
found = new_active.shape[0]
if found == old_found:
break
activei[new_active] = True
old_found = found
# Compile
self.__p_find_active_loop = nb.njit([
nb.void(nb.b1[ :, : ], nb.b1[ :, : ], nb.i8[ :, :, : ])
], nogil = True)(__find_active_loop)
return self.__p_find_active_loop
def find_active (self, gens):
""" Find active nodes on an individual """
active = np.zeros(( gens.shape[0], self.full_size ), np.bool8)
active[ ... , -self.no : ] = True
active_nod = active[ ... , self.ni : -self.no ]
# Set outputs as active
lines = np.arange(gens.shape[0]).reshape(-1, 1)
active[ lines , gens[ ... , -self.no : ]] = True
shape = gens.shape[0]
nod = gens[ ... , : -self.no ].reshape(shape, -1, self.arity + 1)
fun = nod[ ... , 0 ]
nod = nod[ ... , 1 : ].copy()
nod[self.famap[fun]] = self.full_size - 1
# Propagate active status to output's parents
self.find_active_loop(active, active_nod, nod)
return active
@property
def simulate_loop (self):
""" Build a numba compiled loop to simulate circuits """
if self.__p_simulate_loop is None:
arities = self.arities
feval = self.feval
arr_nodes = self.arr_nodes
rel_nodes = self.arr_size
no = self.no
def __simulate_loop (acts_nod, fun, arg, out, sim):
""" Compiled loop to simulate circuits """
for i in range(arg.shape[0]):
acts_nodi = acts_nod[i]
funi = fun[i]
argi = arg[i]
simi = sim[i]
outi = out[i]
for npos in rel_nodes[acts_nodi]:
fpos = funi[npos]
fari = arities[fpos]
farg = argi[npos][ : fari]
feval(fpos, simi, farg, arr_nodes[npos])
for j in range(-1, -no - 1, -1):
simi[j] = simi[outi[j]]
# Compile
self.__p_simulate_loop = nb.njit(nogil = True)(__simulate_loop)
return self.__p_simulate_loop
def simulate (self, gens, acts, sim):
""" Simulate circuits into `sim` """
fun, arg, out = self.break_genotypes(gens)
acts_nod = acts[ ... , self.ni : -self.no ]
self.simulate_loop(acts_nod, fun, arg, out, sim)
return sim
@property
def energy_loop (self):
""" Build a numba compiled loop to evaluate the Landauer's Limit """
if self.__p_energy_loop is None:
rel_nodes = self.arr_size
arities = self.arities
fnames = self.fnames
try:
sim_bits = np.iinfo(self.inp_data.dtype).bits
except ValueError:
sim_bits = 1
nums = self.inp_data.shape[1]
to_prob = 1.0 / (nums * sim_bits)
def __energy_loop (fun, arg, sims, act_nod, sim_nod, ene):
""" Compiled loop to evaluate the Landauer's Limit """
# Iterate over circuits
for i in range(len(ene)):
simi = sims[i]
nods = rel_nodes[act_nod[i]]
# Iterate over active nodes
for j in range(len(nods)):
npos = nods[j]
narg = arg[i][npos]
fari = arities[fun[i][npos]]
c_inp = np.zeros(1 << fari, np.uint64)
c_out = np.zeros(2, np.uint64)
# Iterate over positions on simulation
for k in range(nums):
# Iterate over bits on postion
for l in range(sim_bits):
comb = 0
# Fetch output bit
opos = (sim_nod[ i, npos, k ] >> l) & 1
# Fetch inputs' bits' combinations
for m in range(fari):
comb |= ((simi[narg[m], k] >> l) & 1) << m
# Save combinations
c_inp[comb] += 1
c_out[opos] += 1
# Calculate energy using Shannon's Entropy
energy = 0.0
for v in c_out:
if v > 0:
prob = v * to_prob
energy += prob * np.log2(prob)
for v in c_inp:
if v > 0:
prob = v * to_prob
energy -= prob * np.log2(prob)
ene[i] += energy
# Compile
self.__p_energy_loop = nb.njit(nogil = True)(__energy_loop)
return self.__p_energy_loop
def energy (self, gens, acts, sims):
""" Evaluate the energy from circuits """
fun, arg, _ = self.break_genotypes(gens)
ene = np.zeros(gens.shape[ : -1 ] or 1, np.double)
act_nod = acts[ ... , self.ni : -self.no ]
sim_nod = sims[ ... , self.ni : -self.no , : ]
self.energy_loop(fun, arg, sims, act_nod, sim_nod, ene)
return ene
def get_fitness (self, gens, acts, sims):
""" Get invidual's fitness """
# Find matching outputs
eqs = sims[ ... , -self.no : , : ] == self.exp_data
eqs = eqs.all(axis = ( 1 , 2 ))
# Fill results with infinity
result = np.full(gens.shape[0], np.inf)
for i, ( gen, act, sim, eq ) in enumerate(zip(gens, acts, sims, eqs)):
if eq:
gen = gen[ np.newaxis ]
act = act[ np.newaxis ]
sim = sim[ np.newaxis ]
# Evaluate energy iff all outputs matches
result[i] = self.energy(gen, act, sim)
return result
def print_debug (self, file = fh.log):
""" Print debug data """
best_v = self.__best_curve[-1]
best = ""
if np.isscalar(best_v):
best = "{:.5f}".format(best_v)
else:
best = " ".join(map("{:.5f}".format, best_v))
print("Gen", self.generation, end = ": ", file = file)
print("Best = {}".format(best), end = ", ", file = file)
print("Same = {}".format(self.__same[-1]), end = ", ", file = file)
print("Time = {:.5f}s".format(self.__time), end = ", ", file = file)
print("Diff = {:.5f}s".format(self.__times[-1]), file = file)
def mutation (self, gen):
""" Mutate an indivdual """
randoms = self.__random.random_sample(self.genotype_size)
selected = randoms < self.mutate_gene
child = gen.copy()
changed = self.__random.random_sample(np.count_nonzero(selected))
child[selected] = self.to_integer(
changed, self.integer_mul[selected], self.integer_add[selected]
)
return child
def same_critical (self, gen1, act1, gen2):
""" Test if two genotypes share same critical section """
# If outputs are different, they may be different
if not np.array_equal(gen1[ -self.no : ], gen2[ -self.no : ]):
return False
inactive = ~act1[ self.ni : -self.no ]
nod1 = gen1[ : -self.no ].reshape(-1, self.arity + 1).copy()
nod2 = gen2[ : -self.no ].reshape(-1, self.arity + 1).copy()
nod1[inactive] = 0
nod2[inactive] = 0
fun1 = nod1[ : , 0 ]
fun2 = nod2[ : , 0 ]
# If active functions are different, they may be different
if not np.array_equal(fun1, fun2):
return False
nod1 = nod1[ : , 1 : ]
nod2 = nod2[ : , 1 : ]
# Test remaining nodes
return np.logical_or(nod1 == nod2, self.famap[fun1]).all()
def plot (self, file):
""" Plot energy convergence curve """
plt.figure()
plt.plot(
[ *self.__best_gener, self.generation ],
[ *self.__best_curve, self.best_fit ],
color = "#222222", label = "Energy", lw = 0.5
)
sciargs = {
"style" : "sci",
"scilimits" : ( 0 , 0 ),
"useMathText" : True
}
ax = plt.gca()
gargs = { "axis": "y", "ls": ":" }
# Add minor locators
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
# Add grids
plt.grid(True, which = "minor", lw = 0.1, color = "#CCCCCC", **gargs)
plt.grid(True, which = "major", lw = 0.5, color = "#AAAAAA", **gargs)
# Use scientific notiation on x-axis
plt.ticklabel_format(axis = "x", **sciargs)
# Use scientific notiation on y-axis
plt.ticklabel_format(axis = "y", **sciargs)
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.legend()
plt.savefig(file, dpi = 300, transparent = True)
plt.close()
def verilog (self, gen, act, module = None):
""" Convert individual to verilog """
# Max string size is the maximum possible index,
# + the type identifier 'i' for inputs, 'n' for nodes, 'o' for outputs,
# + the '~' for possible inverted values
max_size = len(str(max(( self.ni, self.nc, self.no )))) + 2
names = np.zeros(self.full_size, "|U{}".format(max_size))
exprs = []
inputs = []
outputs = []
wires = []
fun, nod, out = self.break_genotypes(gen)
# Create inputs' nodes
for node in range(0, self.ni):
names[node] = "i{}".format(node)
inputs.append(node)
# Slice active nodes
act_nod = act[ self.ni : -self.no ]
# Create logical nodes
for node in self.arr_nodes[act_nod]:
npos = node - self.ni
fpos = fun[npos]
ffmt = self.ffmts[fpos]
# NOT gate case
if self.fnames[fpos] == "NOT":
names[node] = parser.invert(names[nod[ npos, 0 ]])
continue
# Create name
names[node] = "n{}".format(len(wires))
wires.append(node)
# Parse args
args = nod[ npos , : self.arities[fpos] ]
exprs.append(( node, ffmt.format(*names[args]) ))
# Create outputs' nodes
for node, inp in enumerate(out, self.nout_size):
names[node] = "o{}".format(node - self.nout_size)
exprs.append(( node, names[inp] ))
outputs.append(node)
names = np.array(names)
# Parse headers
ionames = "module {} ({});".format(
self.__module if not module else module,
", ".join(it.chain(names[inputs], names[outputs]))
)
inames = " input {};".format(", ".join(names[inputs]))
onames = " output {};".format(", ".join(names[outputs]))
# Create wrapper
wrap = tw.TextWrapper(
subsequent_indent = " ",
break_long_words = False,
break_on_hyphens = False,
width = 80
)
# Yield wrapped file
yield from wrap.wrap(ionames)
yield ""
yield from wrap.wrap(inames)
yield from wrap.wrap(onames)
if wires:
wnames = " wire {};".format(", ".join(names[wires]))
yield from wrap.wrap(wnames)
yield ""
for node, expr in exprs:
assign = " assign {} = {};".format(names[node], expr)
yield from wrap.wrap(assign)
yield "endmodule"
def save_curve (self, fname):
""" Save convergence curve to file """
with open(fname, "wb") as file:
np.savez_compressed(file,
best = self.__best_curve,
gene = self.__best_gener,
final = self.generation
)
def save_stats (self, fname):
""" Save evolution stats to file """
with open(fname, "wb") as file:
np.savez_compressed(file,
same = self.same,
better = self.better,
equal = self.equal,
worse = self.worse,
times = self.times
)
def save_best (self, fname):
""" Save best individual and its data to file """
os.makedirs(os.path.dirname(fname), exist_ok = True)
with open(fname, "wb") as file:
np.savez_compressed(file,
best = self.best_gen,
act = self.best_act,
fit = self.best_fit
)
@property
def popsize (self):
""" CGP's lambda """
return self.__popsize
@property
def arr_nodes (self):
""" Array of nodes' indices """
if self.__p_arr_nodes is None:
self.__p_arr_nodes = np.arange(self.ni, self.nout_size)
self.__p_arr_nodes.setflags(write = False)
return self.__p_arr_nodes
@property
def arr_cols (self):
""" Array of CGP's columns' indices """
if self.__p_arr_cols is None:
self.__p_arr_cols = np.arange(self.nc)
self.__p_arr_cols.setflags(write = False)
return self.__p_arr_cols
@property
def arr_size (self):
""" Array of CGP's nodes' indices """
if self.__p_arr_size is None:
self.__p_arr_size = np.arange(self.nc)
self.__p_arr_size.setflags(write = False)
return self.__p_arr_size
@property
def inp_data (self):
""" CGP's input data """
return self.__inp_data
@property
def exp_data (self):
""" CGP's expected data, given its input data """
return self.__exp_data
@property
def generation (self):
""" CGP's generation """
return self.__generation
@property
def best_gen (self):
""" CGP's fittest genotype """
return self.__gen[0]
@property
def best_act (self):
""" CGP's fittest genotype's active nodes """
return self.__act[0]
@property
def best_fit (self):
""" CGP's best fitness """
return self.__fit[0]
@property
def best (self):
""" CGP's fittest genotype's data """
return self.best_gen, self.best_act, self.best_fit
@property
def same (self):
""" Same individual, i.e., the ones with same critical section \
as the parent, per generation """
return self.__same
@property
def better (self):
""" Individuals fitter than the parent per generation """
return self.__better
@property
def equal (self):
""" Individuals with same fitness as the parent per generation """
return self.__equal
@property
def worse (self):
""" Individuals less fitter than the parent per generation """
return self.__worse
@property
def times (self):
""" Generation times """
return self.__times
@property
def time (self):
""" Total time """
return self.__time
@property
def arity (self):
""" Maximum arity """
return self.__arity
@property
def ni (self):
""" Input count """
return self.__ni
@property
def no (self):
""" Output count """
return self.__no
@property
def nc (self):
""" Columns count """
return self.__nc
@property
def nout_size (self):
""" Size of non-outputs """
return self.ni + self.nc
@property
def full_size (self):
""" Full circuit size """
return self.nout_size + self.no
@property
def gene_size (self):
""" Gene size """
return 1 + self.arity
@property
def nodes_size (self):
""" Genotype size of nodes section """
return self.nc * self.gene_size
@property
def genotype_size (self):
""" Full genotype size """
return self.nodes_size + self.no
@property
def fnames (self):
""" Gates' functions' names """
return self.__fnames
@property
def funcs (self):
""" Gates' functions """
if self.__p_funcs is None:
funcs = np.array([ parser.gates[name][0] for name in self.fnames ])
funcs.setflags(write = False)
self.__p_funcs = funcs
return self.__p_funcs
@property
def arities (self):
""" Gates' functions' arities """
if self.__p_arities is None:
arits = np.array([ parser.gates[name][1] for name in self.fnames ])
arits.setflags(write = False)
self.__p_arities = arits
return self.__p_arities
@property
def ffmts (self):
""" Gates' functions' formatting strings """
if self.__p_ffmts is None:
ffmts = np.array([ parser.gates[name][2] for name in self.fnames ])
ffmts.setflags(write = False)
self.__p_ffmts = ffmts
return self.__p_ffmts
@property
def famap (self):
""" Gates' functions' map to disabled inputs """
if self.__p_famap is None:
famap = np.zeros(( len(self.fnames), self.arity ), np.bool8)
for i, arity in enumerate(self.arities):
famap[ i, arity : ] = True
famap.setflags(write = False)
self.__p_famap = famap
return self.__p_famap
@property
def feval (self):
""" Compiled function to evaluate a gate's function """
if self.__p_feval is None:
# Build function's body
func_txt = "def __eval_func (fpos, values, args, out):\n"
fmt = "f{}".format
for i, ( func, fari ) in enumerate(zip(self.funcs, self.arities)):
# Conditions and calls
func_txt += (
" {}if fpos == {}:\n"
" {}({}, values[out])\n"
).format(
("el" if i else ""), i, fmt(i), ", ".join(
"values[args[{}]]".format(j) for j in range(fari)
))
# 'Compile' the function's code to python
scope = { fmt(i): func for i, func in enumerate(self.funcs) }
local = {}
exec(func_txt, scope, local)
# Compile the python's function
self.__p_feval = nb.njit(nogil = True)(local["__eval_func"])
return self.__p_feval
@property
def integer_mul (self):
""" Multiplier to convert genotype from real to integer """
if self.__p_integer_mul is None:
i_mul = np.empty(self.genotype_size, dgen)
i_mul[ -self.no : ].fill(self.nout_size)
# Its value represent the difference between the maximum
# and the minimum possible values for each position on the
# genotype, hence, when multiplied to the real-valued genotype,
# it returns a new genotype on the range [ 0, max - min ]
delta_con = self.ni + self.arr_cols
delta_con[self.arr_cols >= self.nc] = self.nc
delta_con = np.repeat(delta_con, self.arity)
nod = i_mul[ : -self.no ].reshape(-1, self.arity + 1)
nod[ : , 0 ] = len(self.fnames)
nod[ : , 1 : ] = delta_con.reshape(-1, self.arity)
i_mul.setflags(write = False)
self.__p_integer_mul = i_mul
return self.__p_integer_mul
@property
def integer_add (self):
""" Adder to convert genotype from real to integer, \
applied after multiplier """
if self.__p_integer_add is None:
i_add = | np.zeros(self.genotype_size, dgen) | numpy.zeros |
# Training code for the CVAE driver sensor model. Code is adapted from: https://github.com/sisl/EvidentialSparsification and
# https://github.com/StanfordASL/Trajectron-plus-plus.
import os
import time
seed = 123
import numpy as np
np.random.seed(seed)
from matplotlib import pyplot as plt
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
torch.autograd.set_detect_anomaly(True)
from copy import deepcopy
import pdb
import io
import PIL.Image
from tqdm import tqdm
import argparse
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ExponentialLR
from collections import OrderedDict, defaultdict
os.chdir("../..")
from src.utils.utils_model import to_var
from src.driver_sensor_model.models_cvae import VAE
from src.utils.data_generator import *
import time
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
def plot_bar(alpha_p,alpha_q,args):
figure = plt.figure()
plt.bar(np.arange(args.latent_size)-0.1, alpha_p.cpu().data.numpy(), width=0.2, align='center', color='g', label='alpha_p')
plt.bar( | np.arange(args.latent_size) | numpy.arange |
from __future__ import division, absolute_import, print_function
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import TestCase, run_module_suite, assert_
from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, maximum, minimum,
multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
print("Not eq:%s\n%s\n----%s" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
for s in [(4, 3), (6, 2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
self.setUp()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
self.assertTrue(eq(a2d * a2d, a2d * a2dm))
self.assertTrue(eq(a2d + a2d, a2d + a2dm))
self.assertTrue(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
self.assertTrue(eq(-x, -xm))
self.assertTrue(eq(x + y, xm + ym))
self.assertTrue(eq(x - y, xm - ym))
self.assertTrue(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(x / y, xm / ym))
self.assertTrue(eq(a10 + y, a10 + ym))
self.assertTrue(eq(a10 - y, a10 - ym))
self.assertTrue(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(a10 / y, a10 / ym))
self.assertTrue(eq(x + a10, xm + a10))
self.assertTrue(eq(x - a10, xm - a10))
self.assertTrue(eq(x * a10, xm * a10))
self.assertTrue(eq(x / a10, xm / a10))
self.assertTrue(eq(x ** 2, xm ** 2))
self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
self.assertTrue(eq(x ** y, xm ** ym))
self.assertTrue(eq(np.add(x, y), add(xm, ym)))
self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.cos(x), cos(xm)))
self.assertTrue(eq(np.cosh(x), cosh(xm)))
self.assertTrue(eq(np.sin(x), sin(xm)))
self.assertTrue(eq(np.sinh(x), sinh(xm)))
self.assertTrue(eq(np.tan(x), tan(xm)))
self.assertTrue(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
self.assertTrue(eq(np.log(abs(x)), log(xm)))
self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
self.assertTrue(eq(np.exp(x), exp(xm)))
self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
self.assertTrue(eq(np.arccos(z), arccos(zm)))
self.assertTrue(eq(np.arctan(z), arctan(zm)))
self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue(eq(np.absolute(x), absolute(xm)))
self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue(eq(np.less(x, y), less(xm, ym)))
self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(count(ott).dtype.type is np.intp)
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.assertTrue(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
self.assertTrue(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
self.assertTrue(count(ott).dtype.type is np.intp)
self.assertTrue(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
self.assertTrue(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = np.ravel(x) # max doesn't work if shaped
xmr = ravel(xm)
# true because of careful selection of data
self.assertTrue(eq(max(xr), maximum(xmr)))
self.assertTrue(eq(min(xr), minimum(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(np.product(x, 0), product(x, 0)))
self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
self.assertTrue(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
self.assertTrue(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_(eq(x1[2], x2[2]))
assert_(eq(x1[2:5], x2[2:5]))
assert_(eq(x1[:], x2[:]))
assert_(eq(x1[1:], x3[1:]))
x1[2] = 9
x2[2] = 9
assert_(eq(x1, x2))
x1[1:3] = 99
x2[1:3] = 99
assert_(eq(x1, x2))
x2[1] = masked
assert_(eq(x1, x2))
x2[1:3] = masked
assert_(eq(x1, x2))
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
self.assertEqual(type(s2), str)
self.assertEqual(type(s1), str)
self.assertEqual(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
self.assertTrue(y1._data is not x1)
self.assertTrue(allequal(x1, y1._data))
self.assertTrue(y1.mask is m)
y1a = array(y1, copy=0)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m, copy=0)
self.assertTrue(y2.mask is m)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
self.assertTrue(y2.mask is not m)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
self.assertTrue(eq(concatenate([x4, x4]), y4))
self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
y6 = repeat(x4, 2, axis=0)
self.assertTrue(eq(y5, y6))
def test_testPut(self):
# Test of put
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
self.assertTrue(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m)
x.put([0, 1, 2], [-1, 100, 200])
self.assertTrue(eq(x, [-1, 100, 200, 0, 0]))
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
def test_testOddFeatures(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_(eq(z.real, x))
assert_(eq(z.imag, 10 * x))
assert_(eq((z * conjugate(z)).real, 101 * x * x))
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_(eq(x, z))
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_(eq(x, z))
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
c[0] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
assert_(eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2)))
assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
assert_(eq(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
assert_(eq(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
assert_(eq(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5]))
atest = ones((10, 10, 10), dtype=np.float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_(eq(atest, ctest))
z = choose(c, (-x, x))
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(6)
x[5] = masked
y = arange(6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_(eq(z, zm))
assert_(getmask(zm) is nomask)
assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
z = where(c, masked, 1)
assert_(eq(z, [99, 99, 99, 1, 1, 1]))
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def test_testMinMax2(self):
# Test of minumum, maximum.
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_(eq(minimum(x, y), where(less(x, y), x, y)))
assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testInplace(self):
# Test of inplace operations and rich comparisons
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert_(eq(x, y + 1))
xm += 1
assert_(eq(x, y + 1))
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert_(eq(x, y - 1))
xm -= 1
assert_(eq(xm, y - 1))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x *= 2.0
assert_(eq(x, y * 2))
xm *= 2.0
assert_(eq(xm, y * 2))
x = arange(10) * 2
xm = arange(10)
xm[2] = masked
x //= 2
assert_(eq(x, y))
xm //= 2
assert_(eq(x, y))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x /= 2.0
assert_(eq(x, y / 2.0))
xm /= arange(10)
assert_(eq(xm, ones((10,))))
x = arange(10).astype(np.float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert_(eq(x, y + 1.))
def test_testPickle(self):
# Test of pickling
import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
s = pickle.dumps(x)
y = pickle.loads(s)
assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
xx = arange(6)
xx[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(xx[1] is masked)
self.assertEqual(filled(xx[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(eq(2.0, average(ott, axis=0)))
self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assertTrue(eq(2.0, result))
self.assertTrue(wts == 4.0)
ott[:] = masked
self.assertTrue(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0]))
self.assertTrue(average(ott, axis=1)[0] is masked)
self.assertTrue(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
self.assertTrue(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
self.assertTrue(allclose(average(x, axis=0), 2.5))
self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
self.assertTrue(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
self.assertTrue(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5))
self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5))
self.assertTrue(average(masked_array(x, m4), axis=0) is masked)
self.assertEqual(average(masked_array(x, m5), axis=0), 0.0)
self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
self.assertTrue(allclose(average(z, None), 20. / 6.))
self.assertTrue(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0]))
self.assertTrue(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
self.assertEqual(shape(r1), shape(w1))
self.assertEqual(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
self.assertEqual(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
self.assertEqual(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
self.assertTrue(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
self.assertTrue(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
self.assertTrue(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
self.assertTrue(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
self.assertTrue(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
self.assertEqual(1, int(array(1)))
self.assertEqual(1.0, float(array(1)))
self.assertEqual(1, int( | array([[[1]]]) | numpy.ma.array |
# HIV-1 protease Markov State Model Conformational Gating Analysis
#Author: <NAME>
#Correspondence: <EMAIL>, Affiliation: 1. Heidelberg Institute for Theoretical Studies, HITS gGmbH 2. European Moelcular Biology Laboratory
#This module contains core functions for molecular dynamics (MD) simulation and Markov state model analyses of apo HIV-1 protease conformational gating for the manuscript:
#<NAME>‡, <NAME>, <NAME>, <NAME> (2021) A multiscale approach for computing gated ligand binding from molecular dynamics and Brownian dynamics simulations
########################################################################################################################################
from __future__ import print_function
import warnings
import pyemma
import os
#%pylab inline
import pyemma.coordinates as coor
import pyemma.msm as msm
import pyemma.plots as mplt
from pyemma import config
config.show_progress_bars = False
#print(config.show_progress_bars)
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.image as mpimg
from collections import OrderedDict
import math
import numpy as np
import sys
import os.path
import random
import errno
from shutil import copyfile
import operator
import re
from glob import glob
#from kmodes.kmodes import KModes
import random
import MDAnalysis
from MDAnalysis.analysis import dihedrals
from MDAnalysis.analysis import align, rms, distances, contacts
from MDAnalysis.analysis.base import AnalysisFromFunction
from MDAnalysis.coordinates.memory import MemoryReader
from MDAnalysis.analysis import density
#import MDAnalysis.analysis.hbonds
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import HydrogenBondAnalysis as HBA
mpl.rcParams.update({'font.size': 12})
print('pyEMMA version: '+ pyemma.__version__)
print('MDAnalysis version: ' + MDAnalysis.version.__version__)
from sklearn.neighbors import KernelDensity
from matplotlib import gridspec
from scipy.stats import norm
########################################################################################################################################
##########################################################################################################
#
# FUNCTIONS
#
##########################################################################################################
##########################################################################################################
#################
#pyEMMA standard Functions
#################
#################
def save_figure(name):
# change these if wanted
do_save = True
fig_dir = './figs/'
if do_save:
savefig(fig_dir + name, bbox_inches='tight')
#################
def plot_sampled_function(ax_num, xall, yall, zall, dim, msm_dims, ticks_set, labels, ax=None, nbins=100, nlevels=20, cmap=cm.bwr, cbar=True, cbar_label=None):
# histogram data
xmin = np.min(xall)
xmax = np.max(xall)
dx = (xmax - xmin) / float(nbins)
ymin = np.min(yall)
ymax = np.max(yall)
dy = (ymax - ymin) / float(nbins)
# bin data
#eps = x
xbins = np.linspace(xmin - 0.5*dx, xmax + 0.5*dx, num=nbins)
ybins = np.linspace(ymin - 0.5*dy, ymax + 0.5*dy, num=nbins)
xI = np.digitize(xall, xbins)
yI = np.digitize(yall, ybins)
# result
z = np.zeros((nbins, nbins))
N = np.zeros((nbins, nbins))
# average over bins
for t in range(len(xall)):
z[xI[t], yI[t]] += zall[t]
N[xI[t], yI[t]] += 1.0
with warnings.catch_warnings() as cm:
warnings.simplefilter('ignore')
z /= N
# do a contour plot
extent = [xmin, xmax, ymin, ymax]
if ax is None:
ax = gca()
s = ax.contourf(z.T, 100, extent=extent, cmap=cmap)
if cbar:
cbar = fig.colorbar(s)
if cbar_label is not None:
cbar.ax.set_ylabel(cbar_label)
ax.set_xlim(xbins.min()-5,xbins.max()+5)
ax.set_xticks(ticks_set[np.where(msm_dims==dim[0])[0][0]])
ax.set_xlabel(labels[dim[0]],fontsize=10)
ax.set_ylim(ybins.min()-5,ybins.max()+5)
ax.set_yticks(ticks_set[np.where(msm_dims==dim[1])[0][0]])
if ax_num==0:
ax.set_ylabel(labels[dim[1]],fontsize=10)
return ax
#################
def plot_sampled_density(ax_num, xall, yall, zall, dim, msm_dims, ticks_set, labels, ax=None, nbins=100, cmap=cm.Blues, cbar=True, cbar_label=None):
return plot_sampled_function(ax_num, xall, yall, zall, dim, msm_dims, ticks_set, labels, ax=ax, nbins=nbins, cmap=cmap, cbar=cbar, cbar_label=cbar_label)
##########################################################################################################
#################
#pyEMMA MSM functions
#################
#################
def eval_transformer(trans_obj):
# Effective dimension (Really? If we just underestimate the Eigenvalues this value also shrinks...))
print('Evaluating transformer: ', str(trans_obj.__class__))
print('effective dimension', np.sum(1.0 - trans_obj.cumvar))
print('eigenvalues', trans_obj.eigenvalues[:5])
print('partial eigensum', np.sum(trans_obj.eigenvalues[:10]))
print('total variance', np.sum(trans_obj.eigenvalues ** 2))
print()
#################
def project_and_cluster(trajfiles, featurizer, sparsify=False, tica=True, lag=100, scale=True, var_cutoff=0.95, ncluster=100):
"""
Returns
-------
trans_obj, Y, clustering
"""
X = coor.load(trajfiles, featurizer)
if sparsify:
X = remove_constant(X)
if tica:
trans_obj = coor.tica(X, lag=lag, var_cutoff=var_cutoff)
else:
trans_obj = coor.pca(X, dim=-1, var_cutoff=var_cutoff)
Y = trans_obj.get_output()
if scale:
for y in Y:
y *= trans_obj.eigenvalues[:trans_obj.dimension()]
cl_obj = coor.cluster_kmeans(Y, k=ncluster, max_iter=3, fixed_seed=True)
return trans_obj, Y, cl_obj
##########################################################################################################
#################
#File reading functions
#################
def read_int_matrix(fname):
"""
reads a file containing a matrix of integer numbers
"""
a = []
with open(fname) as f:
for line in f:
row = line.rstrip().split()
a.append(row)
foo = np.array(a)
bar = foo.astype(np.int)
return bar
#################
#Read in matrix of floats from file
def read_float_matrix(fname):
"""
reads a file containing a matrix of floating point numbers
"""
a = []
with open(fname) as f:
for line in f:
row = line.rstrip().split()
a.append(row)
foo = np.array(a)
bar = foo.astype(np.float)
return bar
def READ_INITIAL_FILE ( filename ):
# read in group data into lists of lists
file = open(filename,'r')
coords=[]
for line in file:
vals=line.split()
vals2 = [float(numeric_string) for numeric_string in vals[3:6]]
coords.append(vals2)
return coords;
##########################################################################################################
#################
#Trajectory Processing Functions
#################
# This sorts the list of trajectories in double numerical order e.g. 1-1.dcd
def sorted_traj_list(traj_list):
s=[]
for i in range(len(traj_list)):
string = traj_list[i]
s.append([int(n) for n in re.findall(r'\d+\d*', string)])
s = sorted(s, key = operator.itemgetter(0, 1))
sorted_traj_list = []
for i in range(len(s)):
sorted_traj_list.append(indir+'/'+str(s[i][0])+'-'+str(s[i][1])+'.dcd')
return(sorted_traj_list)
#################
#Creates a trajectory list from an array that contains the format: batch sims frames
def traj_list_from_sims_array(sims_array, indir):
traj_list = []
for i in range(len(sims_array)):
traj_list.append(indir+'/'+str(sims_array[i][0])+'-'+str(sims_array[i][1])+'.dcd')
return traj_list
#################
#Creates a trajectory list from an array that contains the format: batch sims frames
def traj_list_from_sims_array_xtc(sims_array, indir):
traj_list = []
for i in range(len(sims_array)):
traj_list.append(indir+'/'+str(sims_array[i][1])+'-filtered.xtc')
return traj_list
#################
#Select only those trajectories from an trajlist/array that have >= than a certain threshold of frames
def thresh_subset(sims_array,thresh):
frames_thresh=np.empty((0,3))
for i in range(len(sims_array)):
if sims_array[i][2]>=thresh:
frames_thresh=np.vstack((frames_thresh,sims_array[i]))
f=frames_thresh.astype(np.int)
return f
def predefined_simsarray(full_sims_array):
"""
#creates a subarray from a predefined sim list of batch and sim numbers and a complete sims array
# this is for testing a limited number of sims e.g. if copied to local resources
"""
simlist=[[1,1],[2,1],[3,1],[4,1],[5,1],[6,1]]
sublist = []
for i in range(len(simlist)):
sublist = sublist + [x for x in full_sims_array.tolist() if x[0]==simlist[i][0] and x[1]==simlist[i][1]]
subarray=np.array(sublist)
return subarray
##########################################################################################################
#################
#Functions for calculating continuous minimum nearest neighbour contact
#################
#################
#Minimum Mean Continuous minimum distance across sliding window tau
def cmindist(data, tau):
"""
computes continuous minimum distance of data array as the minimum of the mean sliding window of length tau
"""
tscan=np.shape(data)[0]-tau+1
num_feat=np.shape(data)[1]
cmd=np.empty((0,num_feat))
for i in range(tscan):
cmd=np.vstack((cmd,np.mean(data[i:i+tau,:],axis=0)))
return np.min(cmd,axis=0)
#################
#Mean Continuous minimum distance across sliding window tau
def taumean_mindist(data, tau):
"""
computes continuous minimum distance of data array as the mean sliding window of length tau
"""
tscan=np.shape(data)[0]-tau+1
num_feat=np.shape(data)[1]
cmd=np.empty((0,num_feat))
for i in range(tscan):
cmd=np.vstack((cmd,np.mean(data[i:i+tau,:],axis=0)))
return cmd
#################
#Longest continuous time of minimum distance
def long_mindist(data, thresh):
"""
computes the longest time the minimum distance stays within a threshhold of thresh
"""
tscan=np.shape(data)[0]
num_feat=np.shape(data)[1]
count=np.empty(num_feat)
lmd=np.empty(num_feat)
for i in range(tscan):
for j in range(num_feat):
if data[i,j] < thresh:
count[j] += 1
else:
if count[j] > lmd[j]:
lmd[j] = count[j]
count[j] = 0
return lmd.astype(np.int)
#################
#Determine res-res pairs included for which to calculate minimum distance features
def res_pairs(num_res, nearn):
"""
computes res-res pairs included for which to calculate minimum distance features
state num of residues, and nearest neighbour skip e.g. i+3 is nearn=3
"""
res=[]
for i in range(num_res-nearn):
for j in range(i+nearn,num_res):
res.append([i+1,j+1])
return res
#################
#Calculate longest duration of minimum distance below a threshold of each res-res pair across traj ensemble
def ensemble_maxdur(traj_arr, col_exc, res, tau, thresh):
"""
computes longest duration of minimum distance below a threshold of each res-res pair across traj ensemble
using: list of traj nums -traj_array, res-pair list - res, sliding mean smoothing - tau, mindist threshold - thresh
col_exc is the number of colums in data file specified by traj_arr to exclude - normally col_exc=3
"""
lmd=np.empty((0,len(res)))
for i in range(len(traj_arr)):
fname = './analysis/resres_mindist/'+str(traj_arr[i,0])+'-'+str(traj_arr[i,1])+'.dat'
mindist = read_float_matrix(fname)
mindist=mindist[:,col_exc:]
#cmd=cmindist(mindist,tau)
if tau>1:
taumd=taumean_mindist(mindist,tau)
else:
taumd=mindist
lmd=np.vstack((lmd,long_mindist(taumd, thresh)))
print("Batch: "+str(traj_arr[i,0])+", Sim: "+str(traj_arr[i,1]))
#return np.max(lmd.astype(np.int),axis=0)
return lmd.astype(np.int)
#################
#Continuous minimum nearest neighbour contact calculation
def mindist_contacts(res_start, res_end, tau_c):
#Number of residues
num_res=23
#Next nearest neighbour - e.g. i+3
nearn=3
#List of i!=j res-res number pairs with i:i+3
res=res_pairs(num_res,nearn)
#Maximum duration each above res-res contact is formed in each traj
#In reality this is done once on the server and saved as a file as time consuming
#Number of columns to exclude in data files
#col_exc=3
#window length for calculating sliding mean minimum distance
#tau=10
#Threshold distance in Angstrom
#thresh=4.0
#ens_max_dur=ensemble_maxdur(sims_array, col_exc, res, tau, thresh)
#np.savetxt('ens_max_dur.dat', ens_max_dur, fmt='%1d',delimiter=' ')
fname = './ens_max_dur.dat'
ens_max_dur = read_int_matrix(fname)#Collapse all trajectories into 1 row showing maximum of each res-res pair
max_dur=np.max(ens_max_dur,axis=0)
#List of res-res contacts that fulfil tau_c - res labelling starting from 1
contacts_list=[res[x] for x in range(len(res)) if max_dur[x]>=tau_c]
contacts=np.array(contacts_list)
contacts=contacts[contacts[:,0]>=res_start]
contacts=contacts[contacts[:,1]<=res_end]
#Con0 is relabeling residue pairs starting from 0
con0_list=[[x[0]-1, x[1]-1] for x in contacts.tolist()]
con0=np.array(con0_list)
#Theoretical maximum size of res list for given residue range
num_res_select = res_end - res_start + 1
res_select=res_pairs(num_res_select,nearn)
max_res_select = len(res_select)
return con0, res, max_res_select, max_dur
##########################################################################################################
#################
#Feature Data Loading Functions
#################
#################
#Lambda coordinate space
def lambda_obj(lamdir,sims_array,num_frames=None):
"""
# loads values from lambda space for HIV-1 PR into lambda_obj
"""
coords=[]
for i in range(len(sims_array)):
filename=lamdir + '/' + str(sims_array[i][0])+'-'+str(sims_array[i][1]) + '.dat'
if os.path.isfile(filename):
tmpcoords=read_float_matrix(filename)
if num_frames==None:
coords.append(tmpcoords[:,3:6])
else:
coords.append(tmpcoords[0:num_frames,3:6])
return coords
#################
#Multidimenstional metric files coordinate space
def multidir_obj(dir_array,sims_array,num_frames=None):
"""
# loads values from lambda space and other metrics for HIV-1 PR into lambda_obj
"""
coords=[]
for i in range(len(sims_array)):
#Make a list for each correspoinding file in different directories
filename=[dir_array[x] + '/' + str(sims_array[i][0])+'-'+str(sims_array[i][1]) + '.dat' for x in range(len(dir_array))]
#Check that same files exist across all deisgnated directories
if np.sum([os.path.isfile(filename[x]) for x in range(len(dir_array))])==len(dir_array):
tmpcoords=read_float_matrix(filename[0])
tmpcoords=tmpcoords[:,3:]
for i in range(1,len(dir_array)):
tmpcoords_i=read_float_matrix(filename[i])
tmpcoords_i=tmpcoords_i[:,3:]
tmpcoords=np.hstack((tmpcoords,tmpcoords_i))
if num_frames==None:
coords.append(tmpcoords)
else:
coords.append(tmpcoords[0:num_frames,:])
return coords
##########################################################################################################
#################
#Coordinate Transformation Functions
#################
#################
def xyz_to_cyl_coords(data,th_offset=0):
x,y = data[:,0], data[:,1]
rho = np.sqrt(x**2+y**2)
theta = 180*np.arctan2(y,x)/np.pi - th_offset
theta %= 360
z=data[:,2]
return np.transpose(np.vstack((rho,theta,z)))
##########################################################################################################
#################
#Plotting functions
#################
#################
def plot_frames(plt,num_frames):
"""
# Plot number of frames for each sim
"""
fig, axs = plt.subplots(1, 1, figsize=(12, 6), constrained_layout=True)
ax=plt.axes()
plt.xticks(np.arange(0, len(num_frames), 100))
plt.yticks(np.arange(0, 2000, 100))
ax.set_xlim(0,len(num_frames))
ax.set_ylim(0,2000)
x=np.array(range(len(num_frames)))
y=num_frames
p1 = ax.plot(x, y,'k-o')
plt.show()
return
#################
def plot_dropoff(plt,sorted_frames):
"""
#Plot Drop off of trajectories with increasing number of frames
"""
fig, axs = plt.subplots(1, 1, figsize=(12, 6), constrained_layout=True)
ax=plt.axes()
plt.xticks(np.arange(0, 9000, 1000))
plt.yticks(np.arange(0, 700, 100))
ax.set_xlim(0,9000)
ax.set_ylim(0,600)
plt.xlabel('Number of frames')
plt.ylabel('Number of trajectories')
x = sorted_frames
y = np.arange(sorted_frames.size)
#p1 = ax.step(x, y,'k-')
p2 = ax.step(x[::-1], y,'r-')
plt.show()
#################
def plot_minmax_coverage(plt,min_req_frames,sorted_frames,min_coverage,max_coverage):
"""
#Plot minimum and maximum coverage based on a minimum required number of frames/traj
"""
fig, axs = plt.subplots(1, 1, figsize=(12, 6), constrained_layout=True)
ax=plt.axes()
plt.xticks(np.arange(0, 9000, 1000))
plt.yticks(np.arange(0, 1.1, 0.1))
ax.set_xlim(0,9000)
ax.set_ylim(0,1)
plt.xlabel('Used traj length (frames)')
plt.ylabel('Number of frames used')
x = min_req_frames
y = min_coverage/sum(sorted_frames)
y2 = max_coverage/sum(sorted_frames)
p1 = ax.step(x, y,'k-')
p2 = ax.step(x, y2,'b-')
plt.show()
return
#################
def trajplot_format(plt,tlim,ylims,ylab="Distance (\AA)"):
plt.rc('text', usetex=True)
plt.xlim(0,(tlim+1)/10)
if ylims[0]>=0:
plt.ylim(0,ylims[1]+1)
else:
plt.ylim(ylims[0],ylims[1]+1)
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel(r"Time (ns)", fontsize=30)
plt.ylabel(ylab, fontsize=30)
return
#################
def plot_traj(Y,sims_array,traj_no,dims,colors=['b','k','r','y'],md_numbering=True):
tlim=np.shape(Y)[1]
if md_numbering is True:
traj_id=np.where(sims_array[:,1]==traj_no)[0][0]
else:
traj_id=traj_no
ydat=np.array([j for m in [Y[traj_id][:tlim,dims[i]] for i in range(len(dims))] for j in m])
ylims=[ydat.min(),ydat.max()]
for i in range(len(dims)):
plt.plot([x/10 for x in range(1,tlim+1)], Y[traj_id][:tlim,dims[i]], '-', color=colors[i])
trajplot_format(plt,tlim,ylims)
return
#################
def plot_traj_from_Z(Z,nsnaps,sims_array,traj_no,dims,colors=['b','k','r','y'],md_numbering=True):
tlim=nsnaps
if md_numbering is True:
traj_id=np.where(sims_array[:,1]==traj_no)[0][0]
else:
traj_id=traj_no
traj_start_ind=traj_id*nsnaps
ydat=np.array([j for m in [Z[traj_start_ind:traj_start_ind+tlim,dims[i]] for i in range(len(dims))] for j in m])
ylims=[ydat.min(),ydat.max()]
for i in range(len(dims)):
plt.plot([x/10 for x in range(1,tlim+1)], Z[traj_start_ind:traj_start_ind+tlim,dims[i]], '-', color=colors[i])
trajplot_format(plt,tlim,ylims)
return
#################
def plot_traj_from_MD(data,nsnaps,dims,colors=['b','k','r','y']):
tlim=nsnaps
ydat=np.array([j for m in [data[:tlim,dims[i]] for i in range(len(dims))] for j in m])
ylims=[ydat.min(),ydat.max()]
for i in range(len(dims)):
plt.plot([x/10 for x in range(1,tlim+1)], data[:tlim,dims[i]], '-', color=colors[i])
trajplot_format(plt,tlim,ylims)
return
#################
def plot_free_energy_landscape(Z,plt,xdim,ydim,labels,cmap="jet",fill=True, contour_label=True,contour_color='k',wg=None):
#x=np.vstack(Y)[:,0]
#y=np.vstack(Y)[:,2]
x=Z[:,xdim]
y=Z[:,ydim]
rho,xbins,ybins = np.histogram2d(x,y,bins=[100,100],weights=wg)
kBT=0.596
G=-kBT*np.log(rho+0.1)
Gzero=G-np.min(G)
fig, ax = plt.subplots(figsize=(12,9))
ex=[xbins.min(),xbins.max(),ybins.min(),ybins.max()]
lev=[x /10.0 for x in range(int(5*round(np.min(G)*2))-10,0,5)]
contours=plt.contour(np.transpose(G), extent=ex, levels = lev, colors=contour_color,linestyles= '-' )
if fill is True:
plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
if contour_label is True:
plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
#plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
#plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
cbar = plt.colorbar()
#plt.clim(np.min(G)-0.5,np.max(G)+0.5)
plt.clim(-10,0)
cbar.set_label(r"G (kcal/mol)", rotation=90, fontsize=30)
cbar.ax.tick_params(labelsize=30)
plt.rc('text', usetex=True)
plt.xlim(xbins.min()-5,xbins.max())
plt.ylim(ybins.min()-5,ybins.max())
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel(labels[xdim], fontsize=30)
plt.ylabel(labels[ydim], fontsize=30)
return plt
#################
def plot_free_energy_landscape_nocbar(Z,plt,xdim,ydim,labels,cmap="jet",fill=True, contour_label=True,contour_color='k',wg=None):
#x=np.vstack(Y)[:,0]
#y=np.vstack(Y)[:,2]
x=Z[:,xdim]
y=Z[:,ydim]
rho,xbins,ybins = np.histogram2d(x,y,bins=[100,100],weights=wg)
kBT=0.596
G=-kBT*np.log(rho+0.1)
Gzero=G-np.min(G)
#fig, ax = plt.subplots(figsize=(9,9))
ex=[xbins.min(),xbins.max(),ybins.min(),ybins.max()]
lev=[x /10.0 for x in range(int(5*round(np.min(G)*2))-10,0,5)]
contours=plt.contour(np.transpose(G), extent=ex, levels = lev, colors=contour_color,linestyles= '-' )
if fill is True:
plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
if contour_label is True:
plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
#plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
#plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
#cbar = plt.colorbar()
#plt.clim(np.min(G)-0.5,np.max(G)+0.5)
plt.clim(-10,30)
#cbar.set_label(r"G (kcal/mol)", rotation=90, fontsize=30)
#cbar.ax.tick_params(labelsize=30)
plt.rc('text', usetex=True)
plt.xlim(xbins.min()-5,xbins.max())
plt.ylim(ybins.min()-5,ybins.max())
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel(labels[xdim], fontsize=30)
plt.ylabel(labels[ydim], fontsize=30)
return plt
#################
def plot_free_energy_landscape_nocbar_array(Z,plt,xdim,ydim,labels,cmap="jet",
fill=False, contour_label=False,contour_color='k',
wg=None,show_ticks=False,show_labels=False):
#x=np.vstack(Y)[:,0]
#y=np.vstack(Y)[:,2]
x=Z[:,xdim]
y=Z[:,ydim]
rho,xbins,ybins = np.histogram2d(x,y,bins=[100,100],weights=wg)
kBT=0.596
G=-kBT*np.log(rho+0.1)
Gzero=G-np.min(G)
#fig, ax = plt.subplots(figsize=(9,9))
ex=[xbins.min(),xbins.max(),ybins.min(),ybins.max()]
lev=[x /10.0 for x in range(int(5*round(np.min(G)*2))-10,0,5)]
contours=plt.contour(np.transpose(G), extent=ex, levels = lev, colors=contour_color,linestyles= '-' )
if fill is True:
plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
if contour_label is True:
plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
#plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
#plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
#cbar = plt.colorbar()
#plt.clim(np.min(G)-0.5,np.max(G)+0.5)
plt.clim(-10,30)
#cbar.set_label(r"G (kcal/mol)", rotation=90, fontsize=30)
#cbar.ax.tick_params(labelsize=30)
plt.rc('text', usetex=True)
plt.xlim(xbins.min()-5,xbins.max())
plt.ylim(ybins.min()-5,ybins.max())
if show_ticks:
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
else:
plt.xticks([])
plt.yticks([])
if show_labels:
plt.xlabel(labels[xdim], fontsize=30)
plt.ylabel(labels[ydim], fontsize=30)
return plt
#################
def plot_weighted_free_energy_landscape(Z,plt,xdim,ydim,labels, cmap="jet", fill=True, contour_label=True, contour_color='k', clim=[-10,0],cbar=False, cbar_label="G (kcal/mol)",lev_max=-1,shallow=False,wg=None,fsize_cbar=(12,9),fsize=(9,9),standalone=True):
if standalone:
if cbar:
fig, ax = plt.subplots(figsize=fsize_cbar)
else:
fig, ax = plt.subplots(figsize=fsize)
#x=np.vstack(Y)[:,0]
#y=np.vstack(Y)[:,2]
x=Z[:,xdim]
y=Z[:,ydim]
rho,xbins,ybins = np.histogram2d(x,y,bins=[100,100],weights=wg)
rho += 0.1
kBT=0.596
G=-kBT*np.log(rho/np.sum(rho))
G=G-np.max(G)
ex=[xbins.min(),xbins.max(),ybins.min(),ybins.max()]
lev=[x /10.0 for x in range(int(5*round(np.min(G)*2))-10,int(lev_max*10),5)]
if shallow is True:
lev_shallow=[-0.4,-0.3,-0.2,-0.1]
lev+=lev_shallow
contours=plt.contour(np.transpose(G), extent=ex, levels = lev, colors=contour_color, linestyles= '-' )
if fill is True:
plt.contourf(np.transpose(G), extent=ex,cmap = cmap, levels = lev)
if contour_label is True:
plt.clabel(contours, inline=True, fmt='%1.1f', fontsize=20)
plt.clim(clim[0],clim[1])
plt.rc('text', usetex=True)
if cbar:
cbar = plt.colorbar()
if cbar_label is not None:
cbar.ax.set_ylabel(cbar_label, rotation=90, fontsize=30)
cbar.ax.tick_params(labelsize=30)
plt.xlim(xbins.min()-5,xbins.max()+5)
plt.ylim(ybins.min()-5,ybins.max()+5)
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel(labels[xdim], fontsize=30)
plt.ylabel(labels[ydim], fontsize=30)
return plt
#################
def annotate_microstates(plt,sets,cl_x,cl_y,tsize=12):
i=0
for xy in zip(cl_x,cl_y):
plt.annotate(' %s' % sets[i], xy=xy, textcoords='data',
size=tsize,weight='bold',color='black', fontname='Courier'
)
#arrowprops=dict(edgecolor='red',facecolor='red', shrink=0.02,width=1,headwidth=5)
#,edgecolor='red',facecolor='red', shrink=0.05,width=2
#arrowstyle="->",edgecolor='white',facecolor='white'
i+=1
return plt
#################
def plot_metastable_sets(plt,cl_obj,meta_sets,MSM_dims,dim,mstate_color,msize=10,annotate=False,textsize=18):
for k in range(len(meta_sets)):
cl_x=cl_obj.clustercenters[meta_sets[k],np.where(MSM_dims==dim[0])[0][0]]
cl_y=cl_obj.clustercenters[meta_sets[k],np.where(MSM_dims==dim[1])[0][0]]
plt.plot(cl_x,cl_y, linewidth=0, marker='o', markersize=msize, markeredgecolor=mstate_color[k],markerfacecolor=mstate_color[k], markeredgewidth=2)
#plt.plot(cl_obj.clustercenters[meta_sets[k],np.where(MSM_dims==dim[0])[0][0]],cl_obj.clustercenters[meta_sets[k],np.where(MSM_dims==dim[1])[0][0]], linewidth=0, marker='o', markersize=msize, markeredgecolor=mstate_color[k],markerfacecolor=mstate_color[k], markeredgewidth=2)
if annotate is True:
plt=annotate_microstates(plt,meta_sets[k],cl_x,cl_y,tsize=textsize)
return
#################
def plot_projected_density(Z, zall, plt, xdim, ydim, labels, nbins=100, nlevels=20, cmap=cm.bwr, cbar=False, cbar_label=None):
if cbar:
fig, ax = plt.subplots(figsize=(12,9))
else:
fig, ax = plt.subplots(figsize=(9,9))
xall=Z[:,xdim]
yall=Z[:,ydim]
# histogram data
xmin = np.min(xall)
xmax = np.max(xall)
dx = (xmax - xmin) / float(nbins)
ymin = np.min(yall)
ymax = np.max(yall)
dy = (ymax - ymin) / float(nbins)
# bin data
#eps = x
xbins = np.linspace(xmin - 0.5*dx, xmax + 0.5*dx, num=nbins)
ybins = np.linspace(ymin - 0.5*dy, ymax + 0.5*dy, num=nbins)
xI = np.digitize(xall, xbins)
yI = np.digitize(yall, ybins)
# result
z = np.zeros((nbins, nbins))
N = np.zeros((nbins, nbins))
# average over bins
for t in range(len(xall)):
z[xI[t], yI[t]] += zall[t]
N[xI[t], yI[t]] += 1.0
#with warnings.catch_warnings() as cm:
#warnings.simplefilter('ignore')
#z /= N
# do a contour plot
extent = [xmin, xmax, ymin, ymax]
lev_step=0.0001
lev=[x*lev_step for x in range(400)]
plt.contourf(z.T, 100, extent=extent, cmap=cmap, levels = lev)
plt.clim(0,0.05)
plt.rc('text', usetex=True)
if cbar:
cbar = plt.colorbar()
if cbar_label is not None:
cbar.ax.set_ylabel(cbar_label, rotation=90, fontsize=30)
cbar.ax.tick_params(labelsize=30)
plt.xlim(xbins.min()-5,xbins.max()+5)
plt.ylim(ybins.min()-5,ybins.max()+5)
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel(labels[xdim], fontsize=30)
plt.ylabel(labels[ydim], fontsize=30)
return plt
#################
#Plot Timescale curves
def plot_its(mplt,its_dim_type,x_lim,y_lim):
#Plot relaxation timescales
mpl.rcParams.update({'font.size': 20})
mplt.plot_implied_timescales(its_dim_type, ylog=True, dt=0.1, units='ns', linewidth=2)
plt.xlim(0, x_lim); plt.ylim(0, y_lim);
#save_figure('its.png')
return
#################
def plot_timescale_ratios(its,ntims=5,ylim=4):
tim=np.transpose(its.timescales)
lags=its.lags
fig, ax = plt.subplots(figsize=(6,4))
for i in range(ntims):
plt.plot(lags/10,tim[i]/tim[i+1],'-o',label="$t_{"+str(i+1)+"}$/$t_{"+str(i+2)+"}$")
plt.rc('text', usetex=True)
plt.xlim(0,30+np.max(lags)/10)
plt.ylim(0,ylim)
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel("Time (ns)", fontsize=30)
plt.ylabel(r"$t_{i}/t_{i+1}$", fontsize=30)
legend = plt.legend(loc='upper right', shadow=False, fontsize='small')
return
#################
def plot_kinetic_variance(its,ylim=20):
lags=its.lags
fig, ax = plt.subplots(figsize=(6,4))
kinvar=[(M.eigenvalues()**2).sum() for M in its.models]
plt.plot(0.1*lags, kinvar, linewidth=2)
plt.rc('text', usetex=True)
plt.xlim(0,np.max(lags)/10)
plt.ylim(0,ylim)
plt.xticks(fontsize=30, rotation=0)
plt.yticks(fontsize=30, rotation=0)
plt.xlabel("Time (ns)", fontsize=30)
plt.ylabel(r"$\sigma^{2}$", fontsize=30)
return
##########################################################################################################
#################
#File Writing Functions
#################
#################
def write_list_to_file(fname,lname):
"""
#Writes a list to a filename: fname is filename, lname is list name e.g. traj_list
"""
with open(fname,'w') as f:
for item in lname:
f.write("%s\n" % item)
return
#################
def save_current_fig(plt, figname):
fig = plt.gcf()
fig.set_size_inches(12, 9)
plt.savefig(figname, dpi=600, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
return
#################
def save_current_fig2(plt, figname,pad=0.1):
fig = plt.gcf()
#fig.set_size_inches(12, 9)
plt.savefig(figname, dpi=600, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches='tight', pad_inches=pad,
metadata=None)
return
##########################################################################################################
#################
#Pre-Optimized Coarse-Grained Metastable State Kinetic Rate Calculation and Transition Path Theory Functions
#################
#################
def tpt_rate_matrix(M,pcca_sets,tfac):
n_sets=len(pcca_sets)
rate=np.zeros((n_sets,n_sets))
for i in range(n_sets):
for j in range(n_sets):
if i != j:
rate[i,j]=tfac*(msm.tpt(M,pcca_sets[i],pcca_sets[j]).rate)
return rate
#################
def gamma_factor(kon,init,fin):
#gam=np.sum(kon[init,fin])/(np.sum(kon[init,fin]) + np.sum(kon[fin,init]))
gam=np.sum(kon[np.ix_(init,fin)])/(np.sum(kon[np.ix_(init,fin)]) + np.sum(kon[np.ix_(fin,init)]))
return gam
def tau_c(kon,init,fin):
#gam=np.sum(kon[init,fin])/(np.sum(kon[init,fin]) + np.sum(kon[fin,init]))
tau_c=1/(np.sum(kon[np.ix_(init,fin)]) + np.sum(kon[np.ix_(fin,init)]))
return tau_c
#################
def metastable_kinetics_calc(M,tau,n_sets,init,fin):
#Create the PCCA sets, distributions, memberships
M.pcca(n_sets)
pccaX = M.metastable_distributions
pccaM = M.metastable_memberships # get PCCA memberships
pcca_sets = M.metastable_sets
pcca_assign = M.metastable_assignments
#Calculate Free energy based on the raw prob of discretized snapshots of microstates belonging to metastable state
all_disc_snaps=np.hstack([dtraj for dtraj in M.discrete_trajectories_full])
mstate_rho=np.array([len(np.where(all_disc_snaps==i)[0]) for i in range(n_clusters)])/len(all_disc_snaps)
meta_rho=np.array([np.sum(mstate_rho[pcca_sets[i]]) for i in range(len(pcca_sets))])
# Calculate Free Energy based on sum of stationary distribution (as calculated by transition matrix) of microstates belonging to each metastable state
P_msm=M.transition_matrix
meta_pi=np.array([np.sum(M.stationary_distribution[pcca_sets[i]]) for i in range(len(pcca_sets))])
#Manually Calculate the HMM free energy from the X and M matrices
NORM_M=np.linalg.inv(np.dot(np.transpose(pccaM),pccaM))
NORM_X=np.linalg.inv(np.dot(pccaX,np.transpose(pccaX)))
I=np.identity(len(pccaM))
PI=np.transpose(M.pi)*I
cg_pi=np.dot(NORM_X,np.dot(pccaX,np.dot(PI,pccaM)))
cg_pi=np.sum(np.identity(len(pccaX))*cg_pi,axis=0)
#Calculate CG transition matrix from manually constructed HMM (prior to Baum-Welch optimisation)
P_tilda=np.dot(NORM_M,np.dot(np.transpose(pccaM), np.dot(P_msm,pccaM)))
#Calculate k_on matrix from CG transition matrix
#1000 factor is to convert to microseconds^-1
kon_tilda=1000.*P_tilda/tau
#Non-diagonal rate matrix with from/to state labelling
kon_tilda_nd=nondiag_rates(kon_tilda)
#Calculate k_on from TPT rate matrix
tfac=10000
kon_tpt=tpt_rate_matrix(M,pcca_sets,tfac)
kon_tpt_nd=nondiag_rates(kon_tpt)
#Calculate gating factor for various kon_matrices
gam_kon_tilda= gamma_factor(kon_tilda,init,fin)
gam_kon_tpt= gamma_factor(kon_tpt,init,fin)
return meta_rho, meta_pi, cg_pi, kon_tilda_nd, kon_tpt_nd, gam_kon_tilda, gam_kon_tpt
#################
def nondiag_rates(kon):
nd_rates=np.zeros((0,4))
for i in range(len(kon)):
for j in range(i+1,len(kon)):
nd_rates=np.vstack((nd_rates, [int(i), int(j), kon[i,j], kon[j,i]]))
return nd_rates
#################
def tau_c(kon,init,fin):
#gam=np.sum(kon[init,fin])/(np.sum(kon[init,fin]) + np.sum(kon[fin,init]))
tau_c=1/(np.sum(kon[np.ix_(init,fin)]) + np.sum(kon[np.ix_(fin,init)]))
return tau_c
##########################################################################################################
#################
#Functions to Identify and Extract Representative Conformations of Metastable States
# Using both MSM approach and also from subselection of PMF landscapes
#################
#################
def micro_order(Xsets, Xdist,macro_state):
kBT=0.596
a=np.transpose(np.vstack((Xsets[macro_state],Xdist[macro_state,Xsets[macro_state]])))
b = a[a[:,1].argsort()[::-1]]
c = (-kBT*np.log(b[:,1])-np.min(-kBT*np.log(b[:,1]))).reshape(-1,1)
micro_state_order=np.hstack((b,c))
return micro_state_order
# Microstate and snapshot extractor
def top_microstates(macro_state, Xdist, Xsets, energy_factor):
"""
a: Creates a Boltz-weighted list of fuzzy microstates of a given macrostate
b: Creates a Boltz-weighted probability-sorted list of fuzzy microstates of a given macrostate
from the pccaX (Xdist) distribution. Most prob state is set to 0, other states are ranked relative to the most prob state
energy factor - define a cut off Boltz-weighted energy difference to select only most probable states = 0.5*kBT
chi_conf_sets: list of chosen microstates
lam - normalized lambda metric for clustercenters correponding to microstates
"""
kBT=0.596
energy_thresh=energy_factor*kBT
mic_states = np.array([x for x in range(np.shape(Xdist)[1])])
a=np.transpose(np.vstack((mic_states,-kBT* | np.log(Xdist[macro_state]) | numpy.log |
import numpy as np
import math
import torch
import random
import copy
class Gameboard:
def __init__(self):
self.boardsize = 4 # The size of one side of the board.
self.score = 0
self.board = np.zeros((self.boardsize, self.boardsize), dtype=np.int32)
self.board_tensor = torch.zeros((self.boardsize, self.boardsize), dtype=torch.int32)
self.place_random()
self.place_random()
def copy(self):
return copy.deepcopy(self)
def print(self, show_score=False):
print(self.board)
if show_score:
print('Score: {}'.format(self.score))
print()
def np_board(self):
return self.board
# Place a number in a random place on the board. If that random position is already filled, it chooses a different
# position. The default number is 2.
def place_random(self, number=2):
(x_coordinate, y_coordinate) = np.random.randint(0, self.boardsize, 2)
# print(x_coordinate, y_coordinate)
if self.board[x_coordinate, y_coordinate] == 0:
self.board[x_coordinate, y_coordinate] = number
else:
self.place_random(number)
def collapse_right(self):
# Loop over it three times so it moves blocks as far as possible
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Row {}:{}'.format(i, self.board[i]))
for j in reversed(range(0, self.boardsize)):
# print('Checking if position {} is in range'.format(j+1))
if j+1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if self.board[i, j+1] == 0:
# print('Next position is 0')
self.board[i, j+1] = self.board[i, j]
self.board[i, j] = 0
elif self.board[i, j+1] == self.board[i, j]:
# print('Next position is identical')
self.score += self.board[i, j]
self.board[i, j+1] *= 2
self.board[i, j] = 0
def collapse_left(self):
# Loop over it three times so it moves blocks as far as possible
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Row {}:{}'.format(i, self.board[i]))
for j in range(0, self.boardsize):
# print('Checking if position {} is in range'.format(j+1))
if j-1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if self.board[i, j-1] == 0:
# print('Next position is 0')
self.board[i, j-1] = self.board[i, j]
self.board[i, j] = 0
elif self.board[i, j-1] == self.board[i, j]:
# print('Next position is identical')
self.score += self.board[i, j]
self.board[i, j-1] *= 2
self.board[i, j] = 0
def collapse_down(self):
# Loop over it three times so it moves blocks as far as possible
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Column {}:{}'.format(i, self.board[:i]))
for j in reversed(range(0, self.boardsize)):
# print('Checking if position {} is in range'.format(j+1))
if j+1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if self.board[j+1, i] == 0:
# print('Next position is 0')
self.board[j+1, i] = self.board[j, i]
self.board[j, i] = 0
elif self.board[j+1, i] == self.board[j, i]:
# print('Next position is identical')
self.score += self.board[j, i]
self.board[j+1, i] *= 2
self.board[j, i] = 0
def collapse_up(self):
# Loop over it three times so it moves blocks as far as possible
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Column {}:{}'.format(i, self.board[:i]))
for j in range(0, self.boardsize):
# print('Checking if position {} is in range'.format(j+1))
if j-1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j - 1, self.board[i, j - 1]))
# print('Position {} is in range.'.format(j-1))
if self.board[j-1, i] == 0:
# print('Next position is 0')
self.board[j-1, i] = self.board[j, i]
self.board[j, i] = 0
elif self.board[j-1, i] == self.board[j, i]:
# print('Next position is identical')
self.score += self.board[j, i]
self.board[j-1, i] *= 2
self.board[j, i] = 0
def simulate_collapse_right(self):
# Loop over it three times so it moves blocks as far as possible
simulated_board = self.board.copy()
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Row {}:{}'.format(i, self.board[i]))
for j in reversed(range(0, self.boardsize)):
# print('Checking if position {} is in range'.format(j+1))
if j + 1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if simulated_board[i, j + 1] == 0:
# print('Next position is 0')
simulated_board[i, j + 1] = simulated_board[i, j]
simulated_board[i, j] = 0
elif simulated_board[i, j + 1] == simulated_board[i, j]:
# print('Next position is identical')
simulated_board[i, j + 1] *= 2
simulated_board[i, j] = 0
def simulate_collapse_down(self):
# Loop over it three times so it moves blocks as far as possible
simulated_board = self.board.copy()
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Column {}:{}'.format(i, self.board[:i]))
for j in reversed(range(0, self.boardsize)):
# print('Checking if position {} is in range'.format(j+1))
if j + 1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if simulated_board[j + 1, i] == 0:
# print('Next position is 0')
simulated_board[j + 1, i] = simulated_board[j, i]
simulated_board[j, i] = 0
elif simulated_board[j + 1, i] == simulated_board[j, i]:
# print('Next position is identical')
simulated_board[j + 1, i] *= 2
simulated_board[j, i] = 0
def simulate_collapse_left(self):
# Loop over it three times so it moves blocks as far as possible
simulated_board = self.board.copy()
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Row {}:{}'.format(i, self.board[i]))
for j in range(0, self.boardsize):
# print('Checking if position {} is in range'.format(j+1))
if j - 1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if simulated_board[i, j - 1] == 0:
# print('Next position is 0')
simulated_board[i, j - 1] = simulated_board[i, j]
simulated_board[i, j] = 0
elif simulated_board[i, j - 1] == simulated_board[i, j]:
# print('Next position is identical')
simulated_board[i, j - 1] *= 2
simulated_board[i, j] = 0
def simulate_collapse_up(self):
# Loop over it three times so it moves blocks as far as possible
simulated_board = self.board.copy()
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Column {}:{}'.format(i, self.board[:i]))
for j in range(0, self.boardsize):
# print('Checking if position {} is in range'.format(j+1))
if j - 1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j - 1, self.board[i, j - 1]))
# print('Position {} is in range.'.format(j-1))
if simulated_board[j - 1, i] == 0:
# print('Next position is 0')
simulated_board[j - 1, i] = simulated_board[j, i]
simulated_board[j, i] = 0
elif simulated_board[j - 1, i] == simulated_board[j, i]:
# print('Next position is identical')
simulated_board[j - 1, i] *= 2
simulated_board[j, i] = 0
def rotate_board(self, board, number_of_rotations):
# number of rotations is in quarter circles, with positive being counter-clockwise
# returns a copy of the board, but rotated
temporary_board = board
return temporary_board.rot90(number_of_rotations)
def simulate_move(self, direction):
# Creates a copy of the board, rotates it so that the desired collapse directory is pointed down, collapses
# down, then rotates it back to its proper orientation.
# parameter direction is a string that says 'up', 'down', 'left', or 'right'
move_dictionary = {'up': 2,
'down': 0,
'left': 1,
'right': -1}
simulated_board = self.board_tensor # make a copy
simulated_board = self.rotate_board(simulated_board, move_dictionary[direction]) # rotate the copy
# Do the collapse
for _ in range(0, 3):
for i in range(0, self.boardsize):
# print('Column {}:{}'.format(i, self.board[:i]))
for j in reversed(range(0, self.boardsize)):
# print('Checking if position {} is in range'.format(j+1))
if j + 1 in range(0, self.boardsize):
# print('({},{}) has {}; ({},{}) has {}'.format(i, j, self.board[i, j],
# i, j + 1, self.board[i, j + 1]))
# print('Position {} is in range.'.format(j+1))
if simulated_board[j + 1, i] == 0:
# print('Next position is 0')
simulated_board[j + 1, i] = simulated_board[j, i]
simulated_board[j, i] = 0
elif simulated_board[j + 1, i] == simulated_board[j, i]:
# print('Next position is identical')
simulated_board[j + 1, i] *= 2
simulated_board[j, i] = 0
simulated_board = self.rotate_board(simulated_board, -1 * move_dictionary[direction]) # rotate back
return simulated_board
def simulate_move_successful(self, index):
moves = {
0: 'up',
1: 'down',
2: 'left',
3: 'right',
4: 'nothing'
}
temporary_board = np.copy(self.board)
direction = moves[index]
simulated_board = self.simulate_move(direction)
if np.array_equal(simulated_board, temporary_board):
# print('Simulated move not successful.')
return False
return True
def collapse_nothing(self):
return
def is_board_full(self):
board_full = not (self.boardsize ** 2 - np.count_nonzero(self.board))
if board_full:
for index in range(3):
result = self.simulate_move_successful(index)
if not result:
return True # The move was not successful, so we return that the board is full
return False
def move(self, direction, show_score=False, print_board=True):
# direction is a string representing the direction to collapse
# show_score tells the print function to show the score or not after moving.
# Exit codes:
# 0: Move not successful (no change in board)
# -1: board is full
# 1: Move successful, tile added
# parameter direction is a string that says 'up', 'down', 'left', or 'right'
move_dictionary = {'up': self.collapse_up,
'down': self.collapse_down,
'left': self.collapse_left,
'right': self.collapse_right,
'nothing': self.collapse_nothing}
# Make a temporary copy of the previous board
temporary_board = np.copy(self.board)
# Execute the proper collapse function for the given direction.
if print_board:
print('Moving in direction {}'.format(direction))
move_dictionary[direction]()
# print(self.board, temporary_board)
# Add a random tile if a move was successful
if self.is_board_full():
if print_board:
self.print(show_score)
print('BOARD FULL')
return -1
if np.array_equal(self.board, temporary_board):
if print_board:
self.print()
print('Move not successful. Score: {}'.format(show_score))
return 0
else:
# print('Previous move successful')
# print('Is board full: {}'.format(self.is_board_full()))
if self.is_board_full():
if print_board:
self.print(show_score)
print('BOARD FULL')
return -1
else:
self.place_random(self.generate_random_tile())
if print_board:
self.print(show_score)
return 1
def generate_random_tile(self):
# Generate a 2, 90% of the time
return 2 if np.random.random() < 0.9 else 4
def get_highest_tile(self):
return np.max(self.board)
def get_board_total(self):
return np.sum(self.board)
def get_number_of_active_tiles(self):
return | np.count_nonzero(self.board) | numpy.count_nonzero |
import unittest
import numpy as np
from blmath.geometry.transform.rotation import rotation_from_up_and_look, euler
class TestRotationFromUpAndLook(unittest.TestCase):
def test_starting_with_canonical_reference_frame_gives_identity(self):
result = rotation_from_up_and_look(up=[0, 1, 0], look=[0, 0, 1])
np.testing.assert_array_almost_equal(result, np.eye(3))
def test_raises_value_error_with_zero_length_inputs(self):
with self.assertRaises(ValueError):
rotation_from_up_and_look(up=[0, 0, 0], look=[0, 0, 1])
with self.assertRaises(ValueError):
rotation_from_up_and_look(up=[0, 1, 0], look=[0, 0, 0])
def test_raises_value_error_with_colinear_inputs(self):
with self.assertRaises(ValueError):
rotation_from_up_and_look(up=[0, 0, 1], look=[0, 0, 1])
with self.assertRaises(ValueError):
rotation_from_up_and_look(up=[0, 0, 1], look=[0, 0, -1])
def test_normalizes_inputs(self):
result = rotation_from_up_and_look(up=[0, 42, 0], look=[0, 0, 13])
np.testing.assert_array_almost_equal(result, np.eye(3))
def test_always_outputs_float64(self):
result = rotation_from_up_and_look(up=np.array([0, 1, 0], dtype=np.float32), look=np.array([0, 0, 1], dtype=np.float32))
self.assertEqual(result.dtype, np.float64)
np.testing.assert_array_almost_equal(result, | np.eye(3) | numpy.eye |
from unittest import TestCase
import numpy as np
from ..image_measures import isotope_pattern_match, isotope_image_correlation
class IsotopePatternMatchTest(TestCase):
def test_empty_inputs(self):
inputs = (
([], []),
([[]], [0.3]),
([[]] * 2, [0.2] * 2),
)
for args in inputs:
self.assertRaises(Exception, isotope_pattern_match, *args)
def test_misaligned_shapes(self):
inputs = (
(np.arange(5 * 5).reshape((5, 5)), np.arange(6)),
(np.ones(5), np.ones(5)),
( | np.ones((3, 3)) | numpy.ones |
import sys
import re
import math
import os
import numpy as np
try:
import paraview.simple as para
except ImportError:
para = None
def Log(s):
sys.stderr.write(str(s) + '\n')
def GetStep(path):
return re.findall('[^_]*_([0-9]*)\.*.', os.path.basename(path))[0]
def GetSteps(paths):
return list(map(GetStep, paths))
def ReplaceFilename(paths, pattern, keep_dir=True):
"""
Replaces filename by pattern with step index substitution.
paths: `list(str)`
Paths.
pattern: `str`
Pattern containing a single `{}` to be replaced by step index.
Example:
>>> ReplaceFilename(["dir/vf_0001.xmf"], "sm_{}.vtk")
>>> ["dir/sm_0001.vtk"]
"""
r = []
for f in paths:
dirname = os.path.dirname(f)
basename = os.path.basename(f)
step = GetStep(f)
if keep_dir:
r.append(os.path.join(dirname, pattern.format(step)))
else:
r.append(pattern.format(step))
return r
def SubstituteStep(*args, **kwargs):
Log("Warning: SubstituteStep() is deprecated. Renamed to ReplaceFilename()")
return ReplaceFilename(*args, **kwargs)
def ApplyForceTime(sources):
'''
Applies ForceTime filter to sources.
Returns new sources and arrays with original time values.
'''
timearrays = [np.array(s.TimestepValues) if hasattr(s, "TimestepValues") else None for s in sources]
sources_ft = [para.ForceTime(s) if s is not None else None for s in sources]
return sources_ft, timearrays
def SetTimeStep(index, sources_ft, timearrays):
'''
Sets sources to time step given by index.
'''
for s,t in zip(sources_ft, timearrays):
if hasattr(s, "ForcedTime"):
try:
s.ForcedTime = t[index]
except:
pass
s.UpdatePipeline()
def SaveAnimation(steps, renderView, sources_ft, timearrays, pattern="a_{:}.png", force=False):
for index,step in enumerate(steps):
outfile = pattern.format(step)
if os.path.isfile(outfile) and not force:
Log("skip existing {:}".format(outfile))
continue
open(outfile, 'a').close()
SetTimeStep(index, sources_ft, timearrays)
Log("{:}/{:}: {:}".format(index + 1, len(steps), outfile))
para.SaveScreenshot(outfile, renderView)
def GetBoundingBox(o):
'''
Returns bounding box of object o.
[x0, y0, z0], [x1, y1, z1]
'''
o.UpdatePipeline()
di = o.GetDataInformation()
lim = di.DataInformation.GetBounds()
lim0 = np.array(lim[::2])
lim1 = np.array(lim[1::2])
return | np.array(lim0) | numpy.array |
"""Input and output (IO) of information from and to the persistence layer.
Currently, input and output of both, datasets and recipes can be handled.
Datasets
========
Both, data and metadata contained in datasets as well as the information
stored in recipes for recipe-driven data analysis can be read and written.
For datasets, two generic classes are provided:
* :class:`aspecd.io.DatasetImporter`
* :class:`aspecd.io.DatasetExporter`
As the name says, these classes should be used to implement import and
export functionality for your own purposes in applications derived from the
ASpecD framework.
Generally, both import and export should be handled via the respective
methods of the :class:`aspecd.dataset.Dataset` class, thus first
instantiating an object of that class and an appropriate importer or
exporter, and afterwards only operating on the dataset using its methods.
In its most generic form, this may look something like:
.. code-block::
dataset = aspecd.dataset.Dataset()
importer = aspecd.io.DatasetImporter(source="/path/to/your/data")
dataset.import_from(importer)
Similarly, you would handle the export of your data (and metadata)
contained in a dataset object using an exporter object, respectively.
.. code-block::
dataset = aspecd.dataset.Dataset()
importer = aspecd.io.DatasetExporter(target="/path/to/destination")
dataset.export_to(exporter)
However, if you use :ref:`recipe-driven data analysis <recipes>`, things
become much simpler:
* Imports will be automatically taken care of.
* Exports can be specified as simple task.
A simple example of a recipe only loading datasets and afterwards exporting
them could look like this:
.. code-block:: yaml
datasets:
- /path/to/first/dataset
- /path/to/second/dataset
tasks:
- kind: export
type: AdfExporter
properties:
target:
- dataset1
- dataset2
What is happening here? Two datasets are imported, and afterwards exported
to the ASpecD Dataset Format (ADF) using the :class:`aspecd.io.AdfExporter`.
Another frequent use case, although one that admittedly pretty much opposes
the whole idea of the ASpecD framework in terms of reproducibility and
traceability: Your collaboration partners require you to provide them with
raw data they can import into their favourite program for creating plots.
The only viable way: export to plain text (ouch!) - saying good-bye to all
your metadata and history:
.. code-block:: yaml
datasets:
- /path/to/first/cool/dataset
- /path/to/second/cool/dataset
- /path/to/another/cool/dataset
tasks:
- kind: export
type: TxtExporter
properties:
target:
- cool-dataset1
- cool-dataset2
- cool-dataset3
In this case, you can as well add whatever processing necessary to your
datasets before exporting them, and you see that recipes come in quite handy
here.
Importers for specific file formats
-----------------------------------
There exists a series of importers for specific file formats:
* :class:`aspecd.io.AdfImporter`
Importer for data in ASpecD Dataset Format (ADF)
* :class:`aspecd.io.AsdfImporter`
Importer for data in asdf format
* :class:`aspecd.io.TxtImporter`
Importer for data in plain text format
For details, see the respective class documentation.
Exporters for specific file formats
-----------------------------------
Datasets need to be persisted sometimes, and currently, there exist two
exporters for specific file formats that can be imported again using the
respective importers. Furthermore, the full information contained in a
dataset will be retained.
* :class:`aspecd.io.AdfExporter`
Exporter for datasets to ASpecD Dataset Format (ADF)
* :class:`aspecd.io.AsdfExporter`
Exporter for datasets to asdf format
For details, see the respective class documentation.
A bit a special case is the exporter to plain text files, as this file
format does *not* preserve the metadata stored within the dataset and should
only be used as last resort:
* :class:`aspecd.io.TxtExporter`
Exporter for data to plain text format
.. warning::
All metadata contained within a dataset (including the full history)
are lost when exporting to plain text. Therefore, using this
exporter will usually result in you loosing reproducibility. Hence,
better think twice before using this exporter and use entirely on
your own risk and only if you *really* know what you are doing (and
why).
Writing importers for data
--------------------------
When writing importer classes for your own data, there is a number of
pitfalls, some of which shall be described here together with solutions and
"best practices".
Dimensions of data
~~~~~~~~~~~~~~~~~~
Usually, we assign axes in the order *x*, *y*, *z*, and assume the *x* axis
to be the horizontal axis in a plot. However, numpy (as well as other
software), follows a different convention, with the first index referring to
the *row* of your matrix, the second index to the *column*. That boils down
to having the first index correspond to the *y* axis, and the second index
referring to the *x* axis.
As long as your data are one-dimensional, resulting in two axes objects in
your dataset, everything is fine, and the second axis will have no values.
However, if your data to be imported are two-dimensional, your first
dimension will be the index of rows (along a column), hence the *y* axis,
and the second dimension the index of your columns (along a row), *i.e.* the
*x* axis. This is perfectly fine, and it is equally fine to revert this
order, as long as you ensure your axis objects to be consistent with the
dimensions of your data.
If you assign numeric data to the :attr:`aspecd.dataset.Data.data` property,
the corresponding axes values will initially be set to the indices of the
data points along the corresponding dimension, with the first axis (index 0)
corresponding to the first dimension (row indices along a column) and
similar for each of the following dimensions of your data. Note that there
will always be one axis more than dimensions of your data. This last axis
will not have values, and usually its quantity is something like "intensity".
Backup of the data
~~~~~~~~~~~~~~~~~~
One essential concept of the ASpecD dataset is to store the original data
together with their axes in a separate, non-public property. This is done
automatically by the importer after calling out to its non-public method
:meth:`aspecd.io.DatasetImporter._import`. Hence, usually you need not take
care of this at all.
Handling of metadata
~~~~~~~~~~~~~~~~~~~~
Data without information about these data are usually pretty useless. Hence,
an ASpecD dataset is always a unit of numerical data and corresponding
metadata. While you will need to come up with your own structure for
metadata of your datasets and create a hierarchy of classes derived from
:class:`aspecd.metadata.DatasetMetadata`, your importers need to ensure that
these metadata are populated respectively. Of course, which metadata can be
populated depends strongly on the file format you are about to import.
Handling different file formats for importing data
--------------------------------------------------
Often, data are available in different formats, and deciding which importer
is appropriate for a given format can be quite involved. To free other
classes from having to contain the relevant code, a factory can be used:
* :class:`aspecd.io.DatasetImporterFactory`
Currently, the sole information provided to decide about the appropriate
importer is the source (a string). A concrete importer object is returned
by the method :meth:`get_importer`. Thus, using the factory in another
class may look like the following::
importer_factory = aspecd.io.DatasetImporterFactory()
importer = importer_factory.get_importer(source="/path/to/your/data")
dataset = aspecd.dataset.Dataset()
dataset.import_from(importer)
Here, as in the example above, "source" refers to a (unique) identifier of
your dataset, be it a filename, path, URL/URI, LOI, or alike.
.. important::
For recipe-driven data analysis to work with an ASpecD-derived package,
you need to implement a :class:`aspecd.io.DatasetImporterFactory` class
there as well that can be obtained by instantiating
``<your_package>.io.DatasetImporterFactory()``.
Recipes
=======
For recipes, a similar set of classes is provided:
* :class:`aspecd.io.RecipeImporter`
* :class:`aspecd.io.RecipeExporter`
For additional concrete classes handling import and export from and to YAML
files see below.
The same general principles laid out above for the datasets applies to
these classes as well. In particular, both import and export should be
handled via the respective methods of the :class:`aspecd.tasks.Recipe`
class, thus first instantiating an object of that class and an appropriate
importer or exporter, and afterwards only operating on the recipe using
its methods.
In its most generic form, this may look something like::
recipe = aspecd.tasks.Recipe()
importer = aspecd.io.RecipeImporter(source="/path/to/your/recipe")
recipe.import_from(importer)
Similarly, you would handle the export of the information contained in a
recipe object using an exporter object, respectively.
To simplify the input and output of recipes, and due recipe-driven data
analysis being an intrinsic property of the ASpecD framework, two classes
handling the import and export from and to YAML files are provided as well:
* :class:`aspecd.io.RecipeYamlImporter`
* :class:`aspecd.io.RecipeYamlExporter`
These classes can directly be used to work with YAML files containing
information for recipe-driven data analysis. For details of the YAML file
structure, see the :class:`aspecd.tasks.Recipe` class and its attributes.
Module documentation
====================
"""
import copy
import os
import tempfile
import zipfile
import asdf
import numpy as np
import aspecd.exceptions
import aspecd.metadata
import aspecd.utils
class DatasetImporter:
"""Base class for dataset importer.
Each class actually importing data and metadata into a dataset should
inherit from this class.
To perform the import, call the
:meth:`~aspecd.dataset.Dataset.import_from` method of the dataset
the import should be performed for, and provide a reference to the
actual importer object to it.
The actual implementation of the importing is done in the private method
:meth:`_import` that in turn gets called by :meth:`import_into`
which is called by the :meth:`aspecd.dataset.Dataset.import_from` method
of the dataset object.
One question arising when actually implementing an importer for a
specific file format: How do the data get into the dataset? The simple
answer: The :meth:`_import` method of the importer knows about the
dataset and its structure (see :class:`aspecd.dataset.Dataset` for
details) and assigns data (and metadata) read from an external source
to the respective fields of the dataset. In terms of a broader software
architecture point of view: The dataset knows nothing about the
importer besides its bare existence and interface, whereas the importer
knows about the dataset and how to map data and metadata.
Attributes
----------
dataset : :class:`aspecd.dataset.Dataset`
dataset to import data and metadata into
source : :class:`str`
specifier of the source the data and metadata will be read from
parameters : :class:`dict`
Additional parameters to control import options.
Useful in case of, *e.g.*, CSV importers where the user may want to
set things such as the delimiter
.. versionadded:: 0.2
Raises
------
aspecd.io.MissingDatasetError
Raised when no dataset exists to act upon
"""
def __init__(self, source=None):
self.source = source
self.dataset = None
self.parameters = dict()
def import_into(self, dataset=None):
"""Perform the actual import into the given dataset.
If no dataset is provided at method call, but is set as property in
the importer object, the :meth:`aspecd.dataset.Dataset.import_from`
method of the dataset will be called.
If no dataset is provided at method call nor as property in the
object, the method will raise a respective exception.
The dataset object always calls this method with the respective
dataset as argument. Therefore, in this case setting the dataset
property within the importer object is not necessary.
The actual import should be implemented within the non-public method
:meth:`_import`.
.. note::
A number of parameters of the dataset are automatically assigned
*after* calling out to the non-public method
:meth:`aspecd.io.DatasetImporter._import`, namely the
non-public property ``_origdata`` of the dataset is populated
with a copy of :attr:`aspecd.dataset.Dataset.data`, and id and
label are set to :attr:`aspecd.io.DatasetImporter.source`.
Parameters
----------
dataset : :class:`aspecd.dataset.Dataset`
Dataset to import data and metadata into
Raises
------
aspecd.io.MissingDatasetError
Raised if no dataset is provided.
"""
if not dataset:
if self.dataset:
self.dataset.import_from(self)
else:
raise aspecd.exceptions.MissingDatasetError(
"No dataset provided")
else:
self.dataset = dataset
self._import()
# Untested due to lack of ideas how to test
# pylint: disable=protected-access
self.dataset._origdata = copy.deepcopy(self.dataset.data)
self.dataset.id = self.source
self.dataset.label = self.source
def _import(self):
"""Perform the actual import of data and metadata into the dataset.
The implementation of the actual import goes in here in all
classes inheriting from DatasetImporter. This method is automatically
called by :meth:`import_into`.
Importing data and metadata includes assigning both to the respective
fields of the :obj:`aspecd.dataset.Dataset` object. For details of
its structure, see there.
Usually, this method will successively call other private/protected
methods of the importer to perform the required tasks that are
specific for each data source.
"""
class DatasetImporterFactory:
"""
Factory for creating importer objects based on the source provided.
Often, data are available in different formats, and deciding which
importer is appropriate for a given format can be quite involved. To
free other classes from having to contain the relevant code, a factory
can be used.
Currently, the sole information provided to decide about the
appropriate importer is the source (a string). A concrete importer
object is returned by the method :meth:`get_importer`. If no source is
provided, an exception will be raised.
The actual code for deciding which type of importer to return in what
case should be implemented in the non-public method :meth:`_get_importer`
in any package based on the ASpecD framework.
In its basic implementation, as done here, the non-public method
:meth:`_get_importer` returns the importers for ADF, ASDF, and TXT
depending on the file extension, and in all other cases the standard
importer.
This might be a viable way for an own :class:`DatasetImporterFactory`
implementation in the rare case of having only one single type of data,
but provides a sensible starting point for own developments.
Attributes
----------
source : :class:`str`
Source of the dataset to be loaded.
Gets set by calling the method :meth:`get_importer` with the
``source`` parameter.
Raises
------
aspecd.io.MissingSourceError
Raised if no source is provided
"""
def __init__(self):
self.source = None
def get_importer(self, source='', importer='', parameters=None):
"""
Return importer object for dataset specified by its source.
The actual code for deciding which type of importer to return in what
case should be implemented in the non-public method
:meth:`_get_importer` in any package based on the ASpecD framework.
If no importer gets returned by the method :meth:`_get_importer`,
the ASpecD-interal importers will be checked for matching the file
type. Thus, you can overwrite the behaviour of any filetype
supported natively by the ASpecD framework, but retain compatibility
to the ASpecD-specific file types.
.. note::
Currently, only filenames/paths are supported, and if ``source``
does not start with the file separator, the absolute path to the
current directory is prepended.
Parameters
----------
source : :class:`str`
string describing the source of the dataset
May be a filename or path, a URL/URI, a LOI, or similar
importer : :class:`str`
Name of the importer to use for importing the dataset
Default: ''
.. versionadded:: 0.2
parameters : :class:`dict`
Additional parameters for controlling the import
Default: None
.. versionadded:: 0.2
Returns
-------
importer : :class:`aspecd.io.DatasetImporter`
importer object of appropriate class
Raises
------
aspecd.io.MissingSourceError
Raised if no source is provided
"""
if not source:
raise aspecd.exceptions.MissingSourceError(
'A source is required to return an appropriate importer')
self.source = source
if not self.source.startswith(os.pathsep):
self.source = os.path.join(os.path.abspath(os.curdir), self.source)
if importer:
package_name = aspecd.utils.package_name(self)
# Currently untested
if not package_name.endswith('io'):
package_name = '.'.join([package_name, 'io'])
full_class_name = '.'.join([package_name, importer])
importer = aspecd.utils.object_from_class_name(full_class_name)
importer.source = self.source
if not importer:
importer = self._get_importer()
if not importer:
importer = self._get_aspecd_importer()
if parameters:
importer.parameters = parameters
return importer
# noinspection PyMethodMayBeStatic
# pylint: disable=no-self-use
def _get_importer(self):
"""Choose appropriate importer for a dataset.
Every package inheriting from the ASpecD framework should implement
this method. Note that in case you do not handle a filetype and
hence return no importer, the default ASpecD importer will be
checked for matching the given source. Thus, you can overwrite the
behaviour of any filetype supported natively by the ASpecD
framework, but retain compatibility to the ASpecD-specific file types.
Returns
-------
importer : :class:`aspecd.io.DatasetImporter`
Importer for the specific file type
"""
importer = None
return importer
def _get_aspecd_importer(self):
_, file_extension = os.path.splitext(self.source)
if file_extension == '.adf':
return AdfImporter(source=self.source)
if file_extension == '.asdf':
return AsdfImporter(source=self.source)
if file_extension == '.txt':
return TxtImporter(source=self.source)
return DatasetImporter(source=self.source)
class DatasetExporter:
"""Base class for dataset exporter.
Each class actually exporting data from a dataset to some other should
inherit from this class.
To perform the export, call the
:meth:`~aspecd.dataset.Dataset.export_to` method of the dataset
the export should be performed for, and provide a reference to the
actual exporter object to it.
The actual implementation of the exporting is done in the non-public
method :meth:`_export` that in turn gets called by :meth:`export_from`
which is called by the :meth:`aspecd.dataset.Dataset.export_to` method
of the dataset object.
Attributes
----------
dataset : :obj:`aspecd.dataset.Dataset`
dataset to export data and metadata from
target : string
specifier of the target the data and metadata will be written to
Raises
------
aspecd.io.MissingDatasetError
Raised when no dataset exists to act upon
"""
def __init__(self, target=None):
self.target = target
self.dataset = None
def export_from(self, dataset=None):
"""Perform the actual export from the given dataset.
If no dataset is provided at method call, but is set as property in
the exporter object, the :meth:`aspecd.dataset.Dataset.export_to`
method of the dataset will be called.
If no dataset is provided at method call nor as property in the
object, the method will raise a respective exception.
The dataset object always calls this method with the respective
dataset as argument. Therefore, in this case setting the dataset
property within the exporter object is not necessary.
The actual export is implemented within the non-public method
:meth:`_export` that gets automatically called.
Parameters
----------
dataset : :class:`aspecd.dataset.Dataset`
Dataset to export data and metadata from
Raises
------
aspecd.io.MissingDatasetError
Raised if no dataset is provided.
"""
if not dataset:
if self.dataset:
self.dataset.export_to(self)
else:
raise aspecd.exceptions.MissingDatasetError(
"No dataset provided")
else:
self.dataset = dataset
self._export()
def _export(self):
"""Perform the actual export of data and metadata from the dataset.
The implementation of the actual export goes in here in all
classes inheriting from DatasetExporter. This method is automatically
called by :meth:`export_from`.
Usually, this method will successively call other private/protected
methods of the exporter to perform the required tasks that are
specific for each target format.
"""
class RecipeImporter:
"""Base class for recipe importer.
Each class actually importing recipes into a :obj:`aspecd.tasks.Recipe`
object should inherit from this class.
To perform the import, call the
:meth:`~aspecd.tasks.Recipe.import_from` method of the recipe the
import should be performed for, and provide a reference to the
actual importer object to it.
The actual implementation of the importing is done in the non-public
method :meth:`_import` that in turn gets called by :meth:`import_into`
which is called by the :meth:`aspecd.tasks.Recipe.import_from` method
of the recipe object.
One question arising when actually implementing an importer for a
specific file format: How does the information get into the recipe? The
simple answer: The :meth:`_import` method of the importer knows about the
recipe and its structure (see :class:`aspecd.tasks.Recipe` for
details) and creates a dictionary with keys corresponding to the
respective attributes of the recipe. In turn, it can then call the
:meth:`aspecd.tasks.Recipe.from_dict` method. In terms of a broader
software architecture point of view: The recipe knows nothing about the
importer besides its bare existence and interface, whereas the importer
knows about the recipe and how to map the information obtained to it.
Attributes
----------
recipe : :obj:`aspecd.tasks.Recipe`
recipe to import into
source : :class:`str`
specifier of the source the information will be read from
Raises
------
aspecd.io.MissingRecipeError
Raised when no dataset exists to act upon
"""
def __init__(self, source=''):
self.source = source
self.recipe = None
def import_into(self, recipe=None):
"""Perform the actual import into the given recipe.
If no recipe is provided at method call, but is set as property in
the importer object, the :meth:`aspecd.tasks.Recipe.import_from`
method of the recipe will be called.
If no recipe is provided at method call nor as property in the
object, the method will raise a respective exception.
The recipe object always calls this method with the respective
recipe as argument. Therefore, in this case setting the recipe
property within the importer object is not necessary.
The actual import should be implemented within the non-public method
:meth:`_import`.
Parameters
----------
recipe : :obj:`aspecd.tasks.Recipe`
recipe to import into
Raises
------
aspecd.io.MissingRecipeError
Raised if no recipe is provided.
"""
if not recipe:
if self.recipe:
self.recipe.import_from(self)
else:
raise aspecd.exceptions.MissingRecipeError("No recipe provided")
else:
self.recipe = recipe
self.recipe.filename = self.source
self._import()
def _import(self):
"""Perform the actual import into the recipe.
The implementation of the actual import goes in here in all
classes inheriting from RecipeImporter. This method is automatically
called by :meth:`import_into`.
Importing metadata includes assigning it to the respective fields
of the :obj:`aspecd.tasks.Recipe` object. For details of
its structure, see there. To do this, the method should create a
dictionary that can afterwards be supplied as an argument to a call
to :meth:`aspecd.tasks.Recipe.from_dict`.
"""
class RecipeExporter:
"""Base class for recipe exporter.
Each class actually exporting recipes from :obj:`aspecd.tasks.Recipe`
objects should inherit from this class.
To perform the export, call the
:meth:`aspecd.tasks.Recipe.export_to` method of the recipe the export
should be performed for, and provide a reference to the actual exporter
object to it.
The actual implementation of the exporting is done in the non-public
method :meth:`_export` that in turn gets called by :meth:`export_from`
which is called by the :meth:`aspecd.tasks.Recipe.export_to` method
of the recipe object.
Attributes
----------
recipe : :obj:`aspecd.tasks.Recipe`
recipe to export information from
target : string
specifier of the target the information will be written to
Raises
------
aspecd.io.MissingRecipeError
Raised when no dataset exists to act upon
"""
def __init__(self, target=''):
self.target = target
self.recipe = None
def export_from(self, recipe=None):
"""Perform the actual export from the given recipe.
If no recipe is provided at method call, but is set as property in
the exporter object, the :meth:`aspecd.tasks.Recipe.export_to`
method of the recipe will be called.
If no recipe is provided at method call nor as property in the
object, the method will raise a respective exception.
The recipe object always calls this method with the respective
recipe as argument. Therefore, in this case setting the recipe
property within the exporter object is not necessary.
The actual export should be implemented within the non-public method
:meth:`_export`.
Parameters
----------
recipe : :class:`aspecd.tasks.Recipe`
Recipe to export from
Raises
------
aspecd.io.MissingRecipeError
Raised if no recipe is provided.
"""
if not recipe:
if self.recipe:
self.recipe.export_to(self)
else:
raise aspecd.exceptions.MissingRecipeError("No recipe provided")
else:
self.recipe = recipe
self._export()
def _export(self):
"""Perform the actual export from the recipe.
The implementation of the actual export goes in here in all
classes inheriting from RecipeExporter. This method is automatically
called by :meth:`export_from`.
Usually, this method will first create a dictionary from the recipe
using the :meth:`aspecd.tasks.Recipe.to_dict` method. This
dictionary can afterwards be further processed and written to some
file.
"""
class RecipeYamlImporter(RecipeImporter):
"""
Recipe importer for importing from YAML files.
The YAML file needs to have a structure compatible to the actual
recipe, such that the dict created from reading the YAML file can be
directly fed into the :meth:`aspecd.tasks.Recipe.from_dict` method.
The order of entries of the YAML file is preserved due to using ordered
dictionaries (:class:`collections.OrderedDict`) internally.
Parameters
----------
source : :class:`str`
filename of a YAML file to read from
"""
def __init__(self, source=''):
self.recipe_version = ''
self._recipe_dict = None
super().__init__(source=source)
def _import(self):
self._load_from_yaml()
self._convert()
self.recipe.from_dict(self._recipe_dict)
def _load_from_yaml(self):
yaml = aspecd.utils.Yaml()
yaml.read_from(filename=self.source)
yaml.deserialise_numpy_arrays()
self._recipe_dict = yaml.dict
def _convert(self):
self._get_recipe_version()
self._map_recipe_structure()
def _get_recipe_version(self):
self.recipe_version = self.recipe.format['version']
if 'format' in self._recipe_dict \
and 'version' in self._recipe_dict['format']:
self.recipe_version = self._recipe_dict['format']['version']
deprecated_keys = ['default_package', 'autosave_plots',
'output_directory', 'datasets_source_directory']
if any([key in self._recipe_dict for key in deprecated_keys]):
self.recipe_version = '0.1'
def _map_recipe_structure(self):
mapper = aspecd.metadata.MetadataMapper()
mapper.version = self.recipe_version
mapper.metadata = self._recipe_dict
mapper.recipe_filename = 'recipe_mapper.yaml'
mapper.map()
self._recipe_dict = mapper.metadata
class RecipeYamlExporter(RecipeExporter):
"""
Recipe exporter for exporting to YAML files.
The YAML file will have a structure corresponding to the output of the
:meth:`aspecd.tasks.Recipe.to_dict` method of the recipe object.
Parameters
----------
target : :class:`str`
filename of a YAML file to write to
"""
def __init__(self, target=''):
super().__init__(target=target)
def _export(self):
yaml = aspecd.utils.Yaml()
yaml.dict = self.recipe.to_dict()
yaml.numpy_array_to_list = True
yaml.serialise_numpy_arrays()
yaml.write_to(filename=self.target)
class AdfExporter(DatasetExporter):
"""
Dataset exporter for exporting to ASpecD dataset format.
The ASpecD dataset format is vaguely reminiscent of the Open Document
Format, *i.e.* a zipped directory containing structured data (in this
case in form of a YAML file) and binary data in a corresponding
subdirectory.
As PyYAML is not capable of dealing with NumPy arrays out of the box,
those are dealt with separately. Small arrays are stored inline as
lists, larger arrays in separate files. For details, see the
:class:`aspecd.utils.Yaml` class.
The data format tries to be as self-contained as possible,
using standard file formats and a brief description of its layout
contained within the archive. Collecting the contents in a single ZIP
archive allows the user to deal with a single file for a dataset,
while more advanced users can easily dig into the details and write
importers for other platforms and programming languages, making the
format rather platform-independent and future-safe. Due to using binary
representation for larger numerical arrays, the format should be more
memory-efficient than other formats.
"""
def __init__(self, target=None):
super().__init__(target=target)
self.extension = '.adf'
self._filenames = {
'dataset': 'dataset.yaml',
'version': 'VERSION',
'readme': 'README',
}
self._bin_dir = 'binaryData'
self._tempdir_name = ''
self._version = '1.0.0'
def _export(self):
if not self.target:
raise aspecd.exceptions.MissingTargetError
with tempfile.TemporaryDirectory() as tempdir:
self._tempdir_name = tempdir
self._create_files()
self._create_zip_archive()
def _create_zip_archive(self):
with zipfile.ZipFile(self.target + self.extension, 'w') as zipped_file:
for filename in self._filenames.values():
zipped_file.write(
filename=os.path.join(self._tempdir_name, filename),
arcname=filename)
bin_dir_path = os.path.join(self._tempdir_name, self._bin_dir)
zipped_file.write(
filename=os.path.join(bin_dir_path),
arcname=self._bin_dir)
for filename in os.listdir(bin_dir_path):
zipped_file.write(
filename=os.path.join(bin_dir_path, filename),
arcname=os.path.join(self._bin_dir, filename))
def _create_files(self):
self._create_dataset_yaml()
self._create_version_file()
self._create_readme_file()
def _create_dataset_yaml(self):
bin_dir_path = os.path.join(self._tempdir_name, self._bin_dir)
os.mkdir(bin_dir_path)
yaml = aspecd.utils.Yaml()
yaml.binary_directory = bin_dir_path
yaml.dict = self.dataset.to_dict()
yaml.serialise_numpy_arrays()
yaml.write_to(filename=os.path.join(self._tempdir_name,
self._filenames["dataset"]))
def _create_version_file(self):
with open(os.path.join(self._tempdir_name,
self._filenames["version"]), 'w+') as file:
file.write(self._version)
def _create_readme_file(self):
readme_contents = (
"Readme\n"
"======\n\n"
"This directory contains an ASpecD dataset stored in the\n"
"ASpecD dataset format (adf).\n\n"
"What follows is a bit of information on the meaning of\n"
"each of the files in the directory.\n"
"Sources of further information on the file format\n"
"are provided at the end of the file.\n\n"
"Copyright (c) 2021, <NAME>\n"
"2021-01-04\n\n"
"Files and their meaning\n"
"-----------------------\n\n"
"* dataset.yaml - text/YAML\n"
" hierarchical metadata store\n\n"
"* binaryData/<filename>.npy - NumPy binary\n"
" numerical data of the dataset stored in NumPy format\n\n"
" Only arrays exceeding a certain threshold are stored\n"
" in binary format, mainly to save space and preserve\n"
" numerical accuracy.\n\n"
"* VERSION - text\n"
" version number of the dataset format\n\n"
" The version number follows the semantic versioning scheme.\n\n"
"* README - text\n"
" This file\n\n"
"Further information\n"
"-------------------\n\n"
"More information can be found on the web in the\n"
"ASpecD package documentation:\n\n"
"https://docs.aspecd.de/adf.html\n"
)
with open(os.path.join(self._tempdir_name,
self._filenames["readme"]), 'w+') as file:
file.write(readme_contents)
class AdfImporter(DatasetImporter):
"""
Dataset importer for importing from ASpecD dataset format.
For more details of the ASpecD dataset format, see the
:class:`aspecd.io.AdfExporter` class.
"""
def __init__(self, source=None):
super().__init__(source=source)
self.extension = '.adf'
self._dataset_yaml_filename = 'dataset.yaml'
self._bin_dir = 'binaryData'
def _import(self):
with tempfile.TemporaryDirectory() as tempdir:
with zipfile.ZipFile(self.source + self.extension, 'r') as \
zipped_file:
zipped_file.extractall(path=tempdir)
yaml = aspecd.utils.Yaml()
yaml.binary_directory = os.path.join(tempdir, self._bin_dir)
yaml.read_from(os.path.join(tempdir,
self._dataset_yaml_filename))
yaml.deserialise_numpy_arrays()
self.dataset.from_dict(yaml.dict)
class AsdfExporter(DatasetExporter):
"""
Dataset exporter for exporting to Advanced Scientific Data Format (ASDF).
For more information on ASDF, see the
`homepage of the asdf package <https://asdf.readthedocs.io/en/stable/>`_,
and its `format specification <https://asdf-standard.readthedocs.io/>`_.
"""
def __init__(self, target=None):
super().__init__(target=target)
self.extension = '.asdf'
def _export(self):
if not self.target:
raise aspecd.exceptions.MissingTargetError
dataset_dict = self.dataset.to_dict()
dataset_dict["dataset_history"] = dataset_dict.pop("history")
asdf_file = asdf.AsdfFile(dataset_dict)
asdf_file.write_to(self.target + self.extension)
class AsdfImporter(DatasetImporter):
"""
Dataset importer for importing from Advanced Scientific Data Format (ASDF).
For more information on ASDF, see the
`homepage of the asdf package <https://asdf.readthedocs.io/en/stable/>`_,
and its `format specification <https://asdf-standard.readthedocs.io/>`_.
"""
def __init__(self, source=None):
super().__init__(source=source)
self.extension = '.asdf'
def _import(self):
with asdf.open(self.source + self.extension, lazy_load=False,
copy_arrays=True) as asdf_file:
dataset_dict = asdf_file.tree
dataset_dict["history"] = dataset_dict.pop("dataset_history")
self.dataset.from_dict(dataset_dict)
class TxtImporter(DatasetImporter):
# noinspection PyUnresolvedReferences
"""
Dataset importer for importing from plain text files (TXT).
Plain text files have often the disadvantage of no accompanying metadata,
therefore the use of plain text files for data storage is highly
discouraged, besides other problems like inherent low resolution/accuracy
or otherwise large file sizes.
The main reason for this class to exist is that it provides a simple way
to showcase ASpecD functionality reading from primitive data sources.
Besides that, sometimes you will encounter plain text files.
.. note::
The importer relies on :func:`numpy.loadtxt` for reading text files.
Hence, the same limitations apply, *e.g.* the dot as decimal separator.
If your data consist of two columns, the first will automatically be
interpreted as the *x* axis. In all other cases, data will be read as is
and no axes values explicitly written.
Attributes
----------
parameters : :class:`dict`
Parameters controlling the import
skiprows : :class:`int`
Number of rows to skip in text file (*e.g.*, header lines)
"""
def __init__(self, source=None):
super().__init__(source=source)
self.extension = '.txt'
self.parameters["skiprows"] = 0
def _import(self):
data = | np.loadtxt(self.source, **self.parameters) | numpy.loadtxt |
import numpy as np
import matplotlib.pyplot as plt
import inspect # Used for storing the input
from .element import Element
from .equation import HeadEquation, PotentialEquation
from .besselaesnumba import besselaesnumba
besselaesnumba.initialize()
try:
from .src import besselaesnew
besselaesnew.besselaesnew.initialize()
#print('succes on f2py')
except:
pass
from .controlpoints import controlpoints, strengthinf_controlpoints
__all__ = ['LineSinkBase', 'HeadLineSinkZero', 'HeadLineSink', 'LineSinkDitch',
'HeadLineSinkString', 'LineSinkDitchString']
class LineSinkChangeTrace:
def changetrace(self, xyzt1, xyzt2, aq, layer, ltype, modellayer, direction, hstepmax, verbose=False):
changed = False
terminate = False
xyztnew = 0
if (ltype == 'a'):
if True:
# if (layer == self.layers).any(): # in layer where line-sink is screened
# not needed anymore, I thin this is all taken care of with checking Qn1 and Qn2
if verbose:
print('hello changetrace')
print('xyz1:', xyzt1[:-1])
print('xyz2:', xyzt2[:-1])
x1, y1, z1, t1 = xyzt1
x2, y2, z2, t2 = xyzt2
eps = 1e-8
za = x1 + y1 * 1j
zb = x2 + y2 * 1j
Za = (2 * za - (self.z1 + self.z2)) / (self.z2 - self.z1)
Zb = (2 * zb - (self.z1 + self.z2)) / (self.z2 - self.z1)
if Za.imag * Zb.imag < 0:
Xa, Ya = Za.real, Za.imag
Xb, Yb = Zb.real, Zb.imag
X = Xa - Ya * (Xb - Xa) / (Yb - Ya)
if verbose: print('X', X)
if abs(X) <= 1: # crosses line-sink
if verbose: print('crosses line-sink')
Znew1 = X - eps * np.sign(Yb) * 1j # steps to side of Ya
Znew2 = X + eps * np.sign(Yb) * 1j # steps to side of Yb
znew1 = 0.5 * ((self.z2 - self.z1) * Znew1 + self.z1 + self.z2)
znew2 = 0.5 * ((self.z2 - self.z1) * Znew2 + self.z1 + self.z2)
xnew1, ynew1 = znew1.real, znew1.imag
xnew2, ynew2 = znew2.real, znew2.imag
if Ya < 0:
theta = self.theta_norm_out
else:
theta = self.theta_norm_out + np.pi
Qx1, Qy1 = self.model.disvec(xnew1, ynew1)[:, layer] * direction
Qn1 = Qx1 * np.cos(theta) + Qy1 * np.sin(theta)
Qx2, Qy2 = self.model.disvec(xnew2, ynew2)[:, layer] * direction
Qn2 = Qx2 * np.cos(theta) + Qy2 * np.sin(theta)
if verbose:
print('xnew1, ynew1:', xnew1, ynew1)
print('xnew2, ynew2:', xnew2, ynew2)
print('Qn1, Qn2', Qn1, Qn2)
print('Qn2 > Qn1:', Qn2 > Qn1)
if Qn1 < 0: # trying to cross line-sink that infiltrates, stay on bottom, don't terminate
if verbose: print('change 1')
xnew = xnew1
ynew = ynew1
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1)
tnew = t1 + dnew / dold * (t2 - t1)
changed = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
elif Qn2 < 0: # all water is taken out, terminate
if verbose: print('change 2')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1)
tnew = t1 + dnew / dold * (t2 - t1)
changed = True
terminate = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
elif Qn2 > Qn1: # line-sink infiltrates
if verbose: print('change 3')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1) # elevation just before jump
tnew = t1 + dnew / dold * (t2 - t1)
Qbelow = (znew - aq.z[modellayer + 1]) / aq.Haq[layer] * Qn1
znew2 = aq.z[modellayer + 1] + Qbelow / Qn2 * aq.Haq[layer]
changed = True
xyztnew = [np.array([xnew, ynew, znew, tnew]), np.array([xnew, ynew, znew2, tnew])]
else: # line-sink takes part of water out
if verbose: print('change 4')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1) # elevation just before jump
tnew = t1 + dnew / dold * (t2 - t1)
Qbelow = (znew - aq.z[modellayer + 1]) / aq.Haq[layer] * Qn1
if Qbelow > Qn2: # taken out
terminate = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
else:
znew2 = aq.z[modellayer + 1] + Qbelow / Qn2 * aq.Haq[layer]
xyztnew = [np.array([xnew, ynew, znew, tnew]), | np.array([xnew, ynew, znew2, tnew]) | numpy.array |
"""
-----------------------------------------------------------------------
Harmoni: a Novel Method for Eliminating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://doi.org/10.1101/2021.10.06.463319
-----------------------------------------------------------------------
script for:
** Lemon Data analysis **
-----------------------------------------------------------------------
(c) <NAME> (<EMAIL>) @ Neurolgy Dept, MPI CBS, 2021
https://github.com/minajamshidi
(c) please cite the above paper in case of using this code for your research
License: MIT License
-----------------------------------------------------------------------
last modified: 20211004 by \Mina
-----------------------------------------------------------------------
-----------------------------------------------------------------------
"""
import os.path as op
import os
import itertools
from operator import itemgetter
import multiprocessing
from functools import partial
import time
from matplotlib import pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from numpy import pi
import scipy.stats as stats
from scipy.signal import butter
from tools_general import *
from tools_source_space import *
from tools_connectivity import *
from tools_connectivity_plot import *
# directories and settings -----------------------------------------------------
# fill in these directories with your own data directories
subjects_dir = '/data/pt_02076/mne_data/MNE-fsaverage-data/' # dir for the head model
subject = 'fsaverage'
_oct = '6'
src_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-src.fif')
fwd_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-fwd.fif')
inv_method = 'eLORETA'
condition = 'EC'
# dir_adjmat = op.join('/data/pt_02076/LEMON/lemon_processed_data/networks_bandpass/eloreta/Schaefer100/', condition)
dir_adjmat = '/data/pt_02076/LEMON/lemon_processed_data/networks_coh_indiv_alphapeak_broadsvd_noperm/'
dir_raw_set = '/data/pt_nro109/Share/EEG_MPILMBB_LEMON/EEG_Preprocessed_BIDS_ID/EEG_Preprocessed/'
"""
NOTE ABOUT DATA
You have to download the data of eyes-closed rsEEG of subject sub-010017 from
https://ftp.gwdg.de/pub/misc/MPI-Leipzig_Mind-Brain-Body-LEMON/EEG_MPILMBB_LEMON/EEG_Raw_BIDS_ID/sub-010017/RSEEG/
and put it in the data_dir you specify here.
"""
# -----------------------------------------------------
# read the parcellation
# -----------------------------------------------------
parcellation = dict(name='Schaefer2018_100Parcels_7Networks_order', abb='Schaefer100')
labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir, parc=parcellation['name'])
labels = labels[:-2]
# labels = labels[:-1]
labels_sorted, idx_sorted = rearrange_labels(labels) # rearrange labels
labels_sorted2, idx_sorted2 = rearrange_labels_network(labels) # rearrange labels
labels_network_sorted, idx_lbl_sort = rearrange_labels_network(labels_sorted)
n_parc = len(labels)
n_parc_range_prod = list(itertools.product(np.arange(n_parc), np.arange(n_parc)))
# read forward solution ---------------------------------------------------
fwd = mne.read_forward_solution(fwd_dir)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)
leadfield = fwd_fixed['sol']['data']
n_vox = leadfield.shape[1]
src = fwd_fixed['src']
sfreq = 250
vertices = [src[0]['vertno'], src[1]['vertno']]
iir_params = dict(order=2, ftype='butter')
b10, a10 = butter(N=2, Wn=np.array([8, 12]) / sfreq * 2, btype='bandpass')
b20, a20 = butter(N=2, Wn=np.array([16, 24]) / sfreq * 2, btype='bandpass')
# -----------------------------------------------------
# ID settings
# -----------------------------------------------------
# ids1 = select_subjects('young', 'male', 'right', meta_file_path)
list_ids = listdir_restricted(dir_adjmat, 'sub-')
ids = [list_ids1[:10] for list_ids1 in list_ids]
ids = np.unique(np.sort(ids))
n_subj = len(ids)
# ----------------------------------------------------------------------------------------------------------------------
# Harmoni and rsEEG data - panel A
# 1:2 coh all subjects source-space
# This part is commented because it takes a lot of time - just uncomment it if you wanna run it
# ----------------------------------------------------------------------------------------------------------------------
# plv_src = np.zeros((n_vox, n_subj))
# for i_subj, subj in enumerate(ids):
# print(i_subj, '**************')
# raw_name = op.join(dir_raw_set, subj + '_EC.set')
# raw = read_eeglab_standard_chanloc(raw_name)
# data_raw = raw.get_data()
# inv_sol, inv_op, inv = extract_inv_sol(data_raw.shape, fwd, raw.info)
# fwd_ch = fwd_fixed.ch_names
# raw_ch = raw.info['ch_names']
# ind = [fwd_ch.index(ch) for ch in raw_ch]
# leadfield_raw = leadfield[ind, :]
# sfreq = raw.info['sfreq']
#
# # alpha sources --------
# raw_alpha = raw.copy()
# raw_alpha.load_data()
# raw_alpha.filter(l_freq=8, h_freq=12, method='iir', iir_params=iir_params)
# raw_alpha.set_eeg_reference(projection=True)
# stc_alpha_raw = mne.minimum_norm.apply_inverse_raw(raw_alpha, inverse_operator=inv,
# lambda2=0.05, method=inv_method, pick_ori='normal')
# # beta sources --------
# raw_beta = raw.copy()
# raw_beta.load_data()
# raw_beta.filter(l_freq=16, h_freq=24, method='iir', iir_params=iir_params)
# raw_beta.set_eeg_reference(projection=True)
# stc_beta_raw = mne.minimum_norm.apply_inverse_raw(raw_beta, inverse_operator=inv,
# lambda2=0.1, method=inv_method, pick_ori='normal')
#
# for i_parc, label1 in enumerate(labels):
# print(i_parc)
# parc_idx, _ = label_idx_whole_brain(src, label1)
# data1 = stc_alpha_raw.data[parc_idx, :]
# data2 = stc_beta_raw.data[parc_idx]
# plv_src[parc_idx, i_subj] = compute_phase_connectivity(data1, data2, 1, 2, measure='coh', axis=1, type1='abs')
#
# save_json_from_numpy('/NOBACKUP/Results/lemon_processed_data/parcels/plv_vertices_all-subj', plv_src)
#
# stc_new = mne.SourceEstimate(np.mean(plv_src, axis=-1, keepdims=True), vertices, tmin=0, tstep=0.01, subject='fsaverage')
# stc_new.plot(subject='fsaverage', subjects_dir=subjects_dir, time_viewer=True, hemi='split', background='white',
# surface='pial')
# ----------------------------------------------------------------------------------------------------------------------
# read the graphs
# ----------------------------------------------------------------------------------------------------------------------
# containers for the graphs and asymmetry index -----------------------------------
# all graphs, not thresholded
conn1_all = np.zeros((n_parc, n_parc, n_subj))
conn2_all = np.zeros((n_parc, n_parc, n_subj))
conn2_corr_all = np.zeros((n_parc, n_parc, n_subj))
conn12_all = np.zeros((n_parc, n_parc, n_subj))
conn12_corr_all = np.zeros((n_parc, n_parc, n_subj))
conn12_symm_idx = np.zeros((n_subj, 2)) # asymmetry index container
ind_triu = np.triu_indices(n_parc, k=1)
ind_diag = np.diag_indices(n_parc)
"""
************************* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CAUTION: graph adjacency matrices are rearranged here --> the parcels are rearranged as the in labels_sorted
they are rearranged in the posterior-anterior direction. In most cases, nearby parcels are also adjacent physically
************************* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
for i_subj, subj in enumerate(ids):
print(i_subj)
pickle_name = op.join(dir_adjmat, subj + '-alpha-alpha')
conn1, pval1, pval1_ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-beta-beta')
conn2, pval2, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-beta-beta-corr-grad')
conn2_corr, pval2_corr, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-alpha-beta')
conn12, pval12, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-alpha-beta-corr-grad')
conn12_corr, pval12_corr, _ = load_pickle(pickle_name)
# save the original graphs
conn1_all[:, :, i_subj] = conn1[idx_sorted, :][:, idx_sorted]
conn2_all[:, :, i_subj] = conn2[idx_sorted, :][:, idx_sorted]
conn2_corr_all[:, :, i_subj] = conn2_corr[idx_sorted, :][:, idx_sorted]
conn12_all[:, :, i_subj] = conn12[idx_sorted, :][:, idx_sorted]
conn12_corr_all[:, :, i_subj] = conn12_corr[idx_sorted, :][:, idx_sorted]
# asymmetry index from original graphs
conn12_symm_idx[i_subj, 0] = np.linalg.norm((conn12 - conn12.T)) / (2) / np.linalg.norm(conn12)
conn12_symm_idx[i_subj, 1] = np.linalg.norm((conn12_corr - conn12_corr.T)) / (2) / np.linalg.norm(conn12_corr)
conn12_all = zscore_matrix_fischer(conn12_all)
conn12_corr_all = zscore_matrix_fischer(conn12_corr_all)
# ----------------------------------------------------------------------------------------------------------------------
# Harmoni and rsEEG data - panels B & C & D & E
# means
# # ----------------------------------------------------------------------------------------------------------------------
net_mean_before = np.mean(conn12_all, axis=-1)
net_mean_after = np.mean(conn12_corr_all, axis=-1)
# zscore all -------------------------
conn12_all_z = np.zeros_like(conn12_all)
conn12_corr_all_z = np.zeros_like(conn12_corr_all)
for i_subj in range(n_subj):
print(i_subj)
conn12_all_z[:, :, i_subj] = zscore_matrix(conn12_all[:, :, i_subj])
conn12_corr_all_z[:, :, i_subj] = zscore_matrix(conn12_corr_all[:, :, i_subj])
# difference by subtracting the zscored graphs -------------------------
conn12_diff_z = conn12_corr_all_z - conn12_all_z
conn12_diff_z_mean = np.mean(conn12_diff_z, axis=-1)
conn12_diff_z_mean_pos = conn12_diff_z_mean.copy()
conn12_diff_z_mean_pos[conn12_diff_z_mean < 0] = 0
conn12_diff_z_mean_neg = conn12_diff_z_mean.copy()
conn12_diff_z_mean_neg[conn12_diff_z_mean > 0] = 0
conn12_diff_z_mean_neg = np.abs(conn12_diff_z_mean_neg)
# test the significance of change in each connection -------------------------
pvalue_zscores = np.zeros((n_parc, n_parc))
statistics_all = np.zeros((n_parc, n_parc))
for i1 in range(n_parc):
for i2 in range(n_parc):
statistics_all[i1, i2], pvalue_zscores[i1, i2] = stats.ttest_rel((conn12_all_z[i1, i2, :]),
(conn12_corr_all_z[i1, i2, :]))
ind_nonsig = pvalue_zscores > 0.05 / n_parc ** 2 # Bonferroni correction
pvalue2_zscores = np.ones((n_parc, n_parc))
pvalue2_zscores[ind_nonsig] = 0
# plot the networks -------------------------
# the mean before Harmoni - panel B
con_lbl_net_before, labels_s = plot_connectivity_bipartite_2_prime(net_mean_before,
labels_sorted, 0, edge_cmp='Blues',
fig_title='mean before',
only_lbl=None, arrange='network')
# the mean after Harmoni - panel C
con_lbl_net_after, _ = plot_connectivity_bipartite_2_prime(net_mean_after,
labels_sorted, 0, edge_cmp='Blues',
fig_title='mean after',
only_lbl=None, arrange='network')
# the positive difference - significant connections - panel D
con_lbl_net_diff_pos, _ = plot_connectivity_bipartite_2_prime(conn12_diff_z_mean_pos * pvalue2_zscores,
labels_sorted, 0, edge_cmp='Purples',
fig_title='pos difference',
only_lbl=None, arrange='network')
# the negative difference - significant connections - panel E
con_lbl_net_diff_neg, _ = plot_connectivity_bipartite_2_prime(conn12_diff_z_mean_neg * pvalue2_zscores,
labels_sorted, 0, edge_cmp='Greens',
fig_title='pos difference',
only_lbl=None, arrange='network')
fig, ax = plt.subplots()
plot_matrix(con_lbl_net_before, cmap='RdBu', vmin=None, axes=ax)
ax.set_yticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.set_xticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.xaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
ax.yaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
fig, ax = plt.subplots()
plot_matrix(con_lbl_net_after, cmap='RdBu', vmin=0, axes=ax)
ax.set_yticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.set_xticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.xaxis.grid(True, which='minor', color = 'black', linestyle='--', linewidth = 1)
ax.yaxis.grid(True, which='minor', color = 'black', linestyle='--', linewidth = 1)
fig, ax = plt.subplots()
plot_matrix(con_lbl_net_diff_pos, cmap='PRGn_r', vmin=0, axes=ax)
ax.set_yticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.set_xticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.xaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
ax.yaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
fig, ax = plt.subplots()
plot_matrix(con_lbl_net_diff_neg, cmap='PRGn', vmin=0, axes=ax)
ax.set_yticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.set_xticks([3.5, 16.5, 24.5, 27.5, 34.5, 40.5, 49.5, 57.5, 65.5, 70.5, 72.5, 79.5, 90.5], minor=True)
ax.xaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
ax.yaxis.grid(True, which='minor', color = 'black', linestyle = '--', linewidth = 1)
# plot_matrix(net_mean_before)
# plot_matrix(net_mean_after)
# plot_matrix(conn12_diff_z_mean_pos*pvalue2_zscores)
# plot_matrix(conn12_diff_z_mean_neg*pvalue2_zscores)
# ----------------------------------------------------------------------------------------------------------------------
# test the decrease
# ----------------------------------------------------------------------------------------------------------------------
pvalue_all = np.zeros((n_parc, n_parc))
tvalue_all = np.zeros((n_parc, n_parc))
for i1 in range(n_parc):
for i2 in range(n_parc):
tvalue_all[i1, i2], pvalue_all[i1, i2] = stats.ttest_rel(conn12_corr_all[i1, i2, :],
conn12_all[i1, i2, :])
ind_nonsig = pvalue_all > 0.05 / n_parc**2 # Bonferroni correction
pvalue2 = np.ones((n_parc, n_parc))
pvalue2[ind_nonsig] = 0
print('min t=', | np.min(tvalue_all[pvalue2 == 1]) | numpy.min |
from functools import lru_cache
from typing import Union
import numpy as np
from qutip import Qobj, expect, jmat, lindblad_dissipator, sigmax, sigmay, sigmaz, spin_q_function, spin_state, steadystate
from scipy import integrate
from scipy.linalg import expm
from utils import profile
def spin_S_measure(theta, Q):
# Calculate synchronisation measure from Q representation
# theta parameter and theta 'axis' of Q should be the same.
if len(theta.shape) > 1:
# To ensure params are passed the right way around
raise ValueError("theta must be either a row of column vector")
return integrate.trapz(Q * np.sin(theta), theta) - 1 / (2 * np.pi)
def my_spin_coherent_dm(j, theta, phi):
Qsize = [1, 1]
if isinstance(theta, np.ndarray):
Qsize[1] = theta.size
if isinstance(phi, np.ndarray):
Qsize[0] = phi.size
Sp = np.ones(Qsize, dtype=Qobj) * jmat(j, "+")
Sm = | np.ones(Qsize, dtype=Qobj) | numpy.ones |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from PIL import Image, ImageEnhance, ImageDraw
import math
import six
import sys; sys.path.append('../')
from data.choose_config import cfg
cfg = cfg.cfg
import random
class sampler():
def __init__(self,
max_sample,
max_trial,
min_scale,
max_scale,
min_aspect_ratio,
max_aspect_ratio,
min_jaccard_overlap,
max_jaccard_overlap,
min_object_coverage,
max_object_coverage,
use_square=False):
self.max_sample = max_sample
self.max_trial = max_trial
self.min_scale = min_scale
self.max_scale = max_scale
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
self.min_jaccard_overlap = min_jaccard_overlap
self.max_jaccard_overlap = max_jaccard_overlap
self.min_object_coverage = min_object_coverage
self.max_object_coverage = max_object_coverage
self.use_square = use_square
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])) # [A,B]
area_b = ((box_b[2] - box_b[0]) *
(box_b[3] - box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class bbox():
def __init__(self, xmin, ymin, xmax, ymax):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def random_brightness(img):
prob = np.random.uniform(0, 1)
if prob < cfg.brightness_prob:
delta = np.random.uniform(-cfg.brightness_delta,
cfg.brightness_delta) + 1
img = ImageEnhance.Brightness(img).enhance(delta)
return img
def random_contrast(img):
prob = np.random.uniform(0, 1)
if prob < cfg.contrast_prob:
delta = np.random.uniform(-cfg.contrast_delta,
cfg.contrast_delta) + 1
img = ImageEnhance.Contrast(img).enhance(delta)
return img
def random_saturation(img):
prob = np.random.uniform(0, 1)
if prob < cfg.saturation_prob:
delta = np.random.uniform(-cfg.saturation_delta,
cfg.saturation_delta) + 1
img = ImageEnhance.Color(img).enhance(delta)
return img
def random_hue(img):
prob = np.random.uniform(0, 1)
if prob < cfg.hue_prob:
delta = np.random.uniform(-cfg.hue_delta, cfg.hue_delta)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
return img
def distort_image(img):
prob = np.random.uniform(0, 1)
# Apply different distort order
if prob > 0.5:
img = random_brightness(img)
img = random_contrast(img)
img = random_saturation(img)
img = random_hue(img)
else:
img = random_brightness(img)
img = random_saturation(img)
img = random_hue(img)
img = random_contrast(img)
return img
def meet_emit_constraint(src_bbox, sample_bbox):
center_x = (src_bbox.xmax + src_bbox.xmin) / 2
center_y = (src_bbox.ymax + src_bbox.ymin) / 2
if center_x >= sample_bbox.xmin and \
center_x <= sample_bbox.xmax and \
center_y >= sample_bbox.ymin and \
center_y <= sample_bbox.ymax:
return True
return False
def project_bbox(object_bbox, sample_bbox):
if object_bbox.xmin >= sample_bbox.xmax or \
object_bbox.xmax <= sample_bbox.xmin or \
object_bbox.ymin >= sample_bbox.ymax or \
object_bbox.ymax <= sample_bbox.ymin:
return False
else:
proj_bbox = bbox(0, 0, 0, 0)
sample_width = sample_bbox.xmax - sample_bbox.xmin
sample_height = sample_bbox.ymax - sample_bbox.ymin
proj_bbox.xmin = (object_bbox.xmin - sample_bbox.xmin) / sample_width
proj_bbox.ymin = (object_bbox.ymin - sample_bbox.ymin) / sample_height
proj_bbox.xmax = (object_bbox.xmax - sample_bbox.xmin) / sample_width
proj_bbox.ymax = (object_bbox.ymax - sample_bbox.ymin) / sample_height
proj_bbox = clip_bbox(proj_bbox)
if bbox_area(proj_bbox) > 0:
return proj_bbox
else:
return False
def transform_labels(bbox_labels, sample_bbox):
sample_labels = []
for i in range(len(bbox_labels)):
sample_label = []
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if not meet_emit_constraint(object_bbox, sample_bbox):
continue
proj_bbox = project_bbox(object_bbox, sample_bbox)
if proj_bbox:
sample_label.append(bbox_labels[i][0])
sample_label.append(float(proj_bbox.xmin))
sample_label.append(float(proj_bbox.ymin))
sample_label.append(float(proj_bbox.xmax))
sample_label.append(float(proj_bbox.ymax))
sample_label = sample_label + bbox_labels[i][5:]
sample_labels.append(sample_label)
return sample_labels
def expand_image(img, bbox_labels, img_width, img_height):
prob = np.random.uniform(0, 1)
if prob < cfg.expand_prob:
if cfg.expand_max_ratio - 1 >= 0.01:
expand_ratio = np.random.uniform(1, cfg.expand_max_ratio)
height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio)
h_off = math.floor(np.random.uniform(0, height - img_height))
w_off = math.floor(np.random.uniform(0, width - img_width))
expand_bbox = bbox(-w_off / img_width, -h_off / img_height,
(width - w_off) / img_width,
(height - h_off) / img_height)
expand_img = np.ones((height, width, 3))
expand_img = np.uint8(expand_img * np.squeeze(cfg.img_mean))
expand_img = Image.fromarray(expand_img)
expand_img.paste(img, (int(w_off), int(h_off)))
bbox_labels = transform_labels(bbox_labels, expand_bbox)
return expand_img, bbox_labels, width, height
return img, bbox_labels, img_width, img_height
def clip_bbox(src_bbox):
src_bbox.xmin = max(min(src_bbox.xmin, 1.0), 0.0)
src_bbox.ymin = max(min(src_bbox.ymin, 1.0), 0.0)
src_bbox.xmax = max(min(src_bbox.xmax, 1.0), 0.0)
src_bbox.ymax = max(min(src_bbox.ymax, 1.0), 0.0)
return src_bbox
def bbox_area(src_bbox):
if src_bbox.xmax < src_bbox.xmin or src_bbox.ymax < src_bbox.ymin:
return 0.
else:
width = src_bbox.xmax - src_bbox.xmin
height = src_bbox.ymax - src_bbox.ymin
return width * height
def intersect_bbox(bbox1, bbox2):
if bbox2.xmin > bbox1.xmax or bbox2.xmax < bbox1.xmin or \
bbox2.ymin > bbox1.ymax or bbox2.ymax < bbox1.ymin:
intersection_box = bbox(0.0, 0.0, 0.0, 0.0)
else:
intersection_box = bbox(
max(bbox1.xmin, bbox2.xmin),
max(bbox1.ymin, bbox2.ymin),
min(bbox1.xmax, bbox2.xmax), min(bbox1.ymax, bbox2.ymax))
return intersection_box
def bbox_coverage(bbox1, bbox2):
inter_box = intersect_bbox(bbox1, bbox2)
intersect_size = bbox_area(inter_box)
if intersect_size > 0:
bbox1_size = bbox_area(bbox1)
return intersect_size / bbox1_size
else:
return 0.
def generate_batch_random_samples(batch_sampler, bbox_labels, image_width,
image_height, scale_array, resize_width,
resize_height):
sampled_bbox = []
for sampler in batch_sampler:
found = 0
for i in range(sampler.max_trial):
if found >= sampler.max_sample:
break
sample_bbox = data_anchor_sampling(
sampler, bbox_labels, image_width, image_height, scale_array,
resize_width, resize_height)
if sample_bbox == 0:
break
if satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
sampled_bbox.append(sample_bbox)
found = found + 1
return sampled_bbox
def data_anchor_sampling(sampler, bbox_labels, image_width, image_height,
scale_array, resize_width, resize_height):
num_gt = len(bbox_labels)
# np.random.randint range: [low, high)
rand_idx = np.random.randint(0, num_gt) if num_gt != 0 else 0
if num_gt != 0:
norm_xmin = bbox_labels[rand_idx][1]
norm_ymin = bbox_labels[rand_idx][2]
norm_xmax = bbox_labels[rand_idx][3]
norm_ymax = bbox_labels[rand_idx][4]
xmin = norm_xmin * image_width
ymin = norm_ymin * image_height
wid = image_width * (norm_xmax - norm_xmin)
hei = image_height * (norm_ymax - norm_ymin)
range_size = 0
area = wid * hei
for scale_ind in range(0, len(scale_array) - 1):
if area > scale_array[scale_ind] ** 2 and area < \
scale_array[scale_ind + 1] ** 2:
range_size = scale_ind + 1
break
if area > scale_array[len(scale_array) - 2]**2:
range_size = len(scale_array) - 2
scale_choose = 0.0
if range_size == 0:
rand_idx_size = 0
else:
# np.random.randint range: [low, high)
rng_rand_size = np.random.randint(0, range_size + 1)
rand_idx_size = rng_rand_size % (range_size + 1)
if rand_idx_size == range_size:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = min(2.0 * scale_array[rand_idx_size],
2 * math.sqrt(wid * hei))
scale_choose = random.uniform(min_resize_val, max_resize_val)
else:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = 2.0 * scale_array[rand_idx_size]
scale_choose = random.uniform(min_resize_val, max_resize_val)
sample_bbox_size = wid * resize_width / scale_choose
w_off_orig = 0.0
h_off_orig = 0.0
if sample_bbox_size < max(image_height, image_width):
if wid <= sample_bbox_size:
w_off_orig = np.random.uniform(xmin + wid - sample_bbox_size,
xmin)
else:
w_off_orig = np.random.uniform(xmin,
xmin + wid - sample_bbox_size)
if hei <= sample_bbox_size:
h_off_orig = np.random.uniform(ymin + hei - sample_bbox_size,
ymin)
else:
h_off_orig = np.random.uniform(ymin,
ymin + hei - sample_bbox_size)
else:
w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
h_off_orig = np.random.uniform(
image_height - sample_bbox_size, 0.0)
w_off_orig = math.floor(w_off_orig)
h_off_orig = math.floor(h_off_orig)
# Figure out top left coordinates.
w_off = 0.0
h_off = 0.0
w_off = float(w_off_orig / image_width)
h_off = float(h_off_orig / image_height)
sampled_bbox = bbox(w_off, h_off,
w_off + float(sample_bbox_size / image_width),
h_off + float(sample_bbox_size / image_height))
return sampled_bbox
else:
return 0
def jaccard_overlap(sample_bbox, object_bbox):
if sample_bbox.xmin >= object_bbox.xmax or \
sample_bbox.xmax <= object_bbox.xmin or \
sample_bbox.ymin >= object_bbox.ymax or \
sample_bbox.ymax <= object_bbox.ymin:
return 0
intersect_xmin = max(sample_bbox.xmin, object_bbox.xmin)
intersect_ymin = max(sample_bbox.ymin, object_bbox.ymin)
intersect_xmax = min(sample_bbox.xmax, object_bbox.xmax)
intersect_ymax = min(sample_bbox.ymax, object_bbox.ymax)
intersect_size = (intersect_xmax - intersect_xmin) * (
intersect_ymax - intersect_ymin)
sample_bbox_size = bbox_area(sample_bbox)
object_bbox_size = bbox_area(object_bbox)
overlap = intersect_size / (
sample_bbox_size + object_bbox_size - intersect_size)
return overlap
def satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
if sampler.min_jaccard_overlap == 0 and sampler.max_jaccard_overlap == 0:
has_jaccard_overlap = False
else:
has_jaccard_overlap = True
if sampler.min_object_coverage == 0 and sampler.max_object_coverage == 0:
has_object_coverage = False
else:
has_object_coverage = True
if not has_jaccard_overlap and not has_object_coverage:
return True
found = False
for i in range(len(bbox_labels)):
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if has_jaccard_overlap:
overlap = jaccard_overlap(sample_bbox, object_bbox)
if sampler.min_jaccard_overlap != 0 and \
overlap < sampler.min_jaccard_overlap:
continue
if sampler.max_jaccard_overlap != 0 and \
overlap > sampler.max_jaccard_overlap:
continue
found = True
if has_object_coverage:
object_coverage = bbox_coverage(object_bbox, sample_bbox)
if sampler.min_object_coverage != 0 and \
object_coverage < sampler.min_object_coverage:
continue
if sampler.max_object_coverage != 0 and \
object_coverage > sampler.max_object_coverage:
continue
found = True
if found:
return True
return found
def crop_image_sampling(img, bbox_labels, sample_bbox, image_width,
image_height, resize_width, resize_height,
min_face_size):
# no clipping here
xmin = int(sample_bbox.xmin * image_width)
xmax = int(sample_bbox.xmax * image_width)
ymin = int(sample_bbox.ymin * image_height)
ymax = int(sample_bbox.ymax * image_height)
w_off = xmin
h_off = ymin
width = xmax - xmin
height = ymax - ymin
cross_xmin = max(0.0, float(w_off))
cross_ymin = max(0.0, float(h_off))
cross_xmax = min(float(w_off + width - 1.0), float(image_width))
cross_ymax = min(float(h_off + height - 1.0), float(image_height))
cross_width = cross_xmax - cross_xmin
cross_height = cross_ymax - cross_ymin
roi_xmin = 0 if w_off >= 0 else abs(w_off)
roi_ymin = 0 if h_off >= 0 else abs(h_off)
roi_width = cross_width
roi_height = cross_height
roi_y1 = int(roi_ymin)
roi_y2 = int(roi_ymin + roi_height)
roi_x1 = int(roi_xmin)
roi_x2 = int(roi_xmin + roi_width)
cross_y1 = int(cross_ymin)
cross_y2 = int(cross_ymin + cross_height)
cross_x1 = int(cross_xmin)
cross_x2 = int(cross_xmin + cross_width)
sample_img = np.zeros((height, width, 3))
# print(sample_img.shape)
sample_img[roi_y1 : roi_y2, roi_x1 : roi_x2] = \
img[cross_y1: cross_y2, cross_x1: cross_x2]
sample_img = cv2.resize(
sample_img, (resize_width, resize_height), interpolation=cv2.INTER_AREA)
resize_val = resize_width
sample_labels = transform_labels_sampling(bbox_labels, sample_bbox,
resize_val, min_face_size)
return sample_img, sample_labels
def transform_labels_sampling(bbox_labels, sample_bbox, resize_val,
min_face_size):
sample_labels = []
for i in range(len(bbox_labels)):
sample_label = []
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if not meet_emit_constraint(object_bbox, sample_bbox):
continue
proj_bbox = project_bbox(object_bbox, sample_bbox)
if proj_bbox:
real_width = float((proj_bbox.xmax - proj_bbox.xmin) * resize_val)
real_height = float((proj_bbox.ymax - proj_bbox.ymin) * resize_val)
if real_width * real_height < float(min_face_size * min_face_size):
continue
else:
sample_label.append(bbox_labels[i][0])
sample_label.append(float(proj_bbox.xmin))
sample_label.append(float(proj_bbox.ymin))
sample_label.append(float(proj_bbox.xmax))
sample_label.append(float(proj_bbox.ymax))
sample_label = sample_label + bbox_labels[i][5:]
sample_labels.append(sample_label)
return sample_labels
def generate_sample(sampler, image_width, image_height):
scale = np.random.uniform(sampler.min_scale, sampler.max_scale)
aspect_ratio = np.random.uniform(sampler.min_aspect_ratio,
sampler.max_aspect_ratio)
aspect_ratio = max(aspect_ratio, (scale**2.0))
aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
bbox_width = scale * (aspect_ratio**0.5)
bbox_height = scale / (aspect_ratio**0.5)
# guarantee a squared image patch after cropping
if sampler.use_square:
if image_height < image_width:
bbox_width = bbox_height * image_height / image_width
else:
bbox_height = bbox_width * image_width / image_height
xmin_bound = 1 - bbox_width
ymin_bound = 1 - bbox_height
xmin = np.random.uniform(0, xmin_bound)
ymin = np.random.uniform(0, ymin_bound)
xmax = xmin + bbox_width
ymax = ymin + bbox_height
sampled_bbox = bbox(xmin, ymin, xmax, ymax)
return sampled_bbox
def generate_batch_samples(batch_sampler, bbox_labels, image_width,
image_height):
sampled_bbox = []
for sampler in batch_sampler:
found = 0
for i in range(sampler.max_trial):
if found >= sampler.max_sample:
break
sample_bbox = generate_sample(sampler, image_width, image_height)
if satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
sampled_bbox.append(sample_bbox)
found = found + 1
return sampled_bbox
def crop_image(img, bbox_labels, sample_bbox, image_width, image_height,
resize_width, resize_height, min_face_size):
sample_bbox = clip_bbox(sample_bbox)
xmin = int(sample_bbox.xmin * image_width)
xmax = int(sample_bbox.xmax * image_width)
ymin = int(sample_bbox.ymin * image_height)
ymax = int(sample_bbox.ymax * image_height)
sample_img = img[ymin:ymax, xmin:xmax]
resize_val = resize_width
sample_labels = transform_labels_sampling(bbox_labels, sample_bbox,
resize_val, min_face_size)
return sample_img, sample_labels
def to_chw_bgr(image):
"""
Transpose image from HWC to CHW and from RBG to BGR.
Args:
image (np.array): an image with HWC and RBG layout.
"""
# HWC to CHW
if len(image.shape) == 3:
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 1, 0)
# RBG to BGR
image = image[[2, 1, 0], :, :]
return image
def anchor_crop_image_sampling(img,
bbox_labels,
scale_array,
img_width,
img_height):
mean = np.array([104, 117, 123], dtype=np.float32)
maxSize = 12000 # max size
infDistance = 9999999
bbox_labels = np.array(bbox_labels)
scale = np.array([img_width, img_height, img_width, img_height])
boxes = bbox_labels[:, 1:5] * scale
labels = bbox_labels[:, 0]
boxArea = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
# argsort = np.argsort(boxArea)
# rand_idx = random.randint(min(len(argsort),6))
# print('rand idx',rand_idx)
rand_idx = np.random.randint(len(boxArea))
rand_Side = boxArea[rand_idx] ** 0.5
# rand_Side = min(boxes[rand_idx,2] - boxes[rand_idx,0] + 1,
# boxes[rand_idx,3] - boxes[rand_idx,1] + 1)
distance = infDistance
anchor_idx = 5
for i, anchor in enumerate(scale_array):
if abs(anchor - rand_Side) < distance:
distance = abs(anchor - rand_Side)
anchor_idx = i
target_anchor = random.choice(scale_array[0:min(anchor_idx + 1, 5) + 1])
ratio = float(target_anchor) / rand_Side
ratio = ratio * (2**random.uniform(-1, 1))
if int(img_height * ratio * img_width * ratio) > maxSize * maxSize:
ratio = (maxSize * maxSize / (img_height * img_width))**0.5
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC,
cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = random.choice(interp_methods)
image = cv2.resize(img, None, None, fx=ratio,
fy=ratio, interpolation=interp_method)
boxes[:, 0] *= ratio
boxes[:, 1] *= ratio
boxes[:, 2] *= ratio
boxes[:, 3] *= ratio
height, width, _ = image.shape
sample_boxes = []
xmin = boxes[rand_idx, 0]
ymin = boxes[rand_idx, 1]
bw = (boxes[rand_idx, 2] - boxes[rand_idx, 0] + 1)
bh = (boxes[rand_idx, 3] - boxes[rand_idx, 1] + 1)
w = h = cfg.INPUT_SIZE
for _ in range(50):
if w < max(height, width):
if bw <= w:
w_off = random.uniform(xmin + bw - w, xmin)
else:
w_off = random.uniform(xmin, xmin + bw - w)
if bh <= h:
h_off = random.uniform(ymin + bh - h, ymin)
else:
h_off = random.uniform(ymin, ymin + bh - h)
else:
w_off = random.uniform(width - w, 0)
h_off = random.uniform(height - h, 0)
w_off = math.floor(w_off)
h_off = math.floor(h_off)
# convert to integer rect x1,y1,x2,y2
rect = np.array(
[int(w_off), int(h_off), int(w_off + w), int(h_off + h)])
# keep overlap with gt box IF center in sampled patch
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] <= boxes[:, 0]) * (rect[1] <= boxes[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] >= boxes[:, 2]) * (rect[3] >= boxes[:, 3])
# mask in that both m1 and m2 are true
mask = m1 * m2
overlap = jaccard_numpy(boxes, rect)
# have any valid boxes? try again if not
if not mask.any() and not overlap.max() > 0.7:
continue
else:
sample_boxes.append(rect)
sampled_labels = []
if len(sample_boxes) > 0:
choice_idx = np.random.randint(len(sample_boxes))
choice_box = sample_boxes[choice_idx]
# print('crop the box :',choice_box)
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
m1 = (choice_box[0] < centers[:, 0]) * \
(choice_box[1] < centers[:, 1])
m2 = (choice_box[2] > centers[:, 0]) * \
(choice_box[3] > centers[:, 1])
mask = m1 * m2
current_boxes = boxes[mask, :].copy()
current_labels = labels[mask]
current_boxes[:, :2] -= choice_box[:2]
current_boxes[:, 2:] -= choice_box[:2]
if choice_box[0] < 0 or choice_box[1] < 0:
new_img_width = width if choice_box[
0] >= 0 else width - choice_box[0]
new_img_height = height if choice_box[
1] >= 0 else height - choice_box[1]
image_pad = np.zeros(
(new_img_height, new_img_width, 3), dtype=float)
image_pad[:, :, :] = mean
start_left = 0 if choice_box[0] >= 0 else -choice_box[0]
start_top = 0 if choice_box[1] >= 0 else -choice_box[1]
image_pad[start_top:, start_left:, :] = image
choice_box_w = choice_box[2] - choice_box[0]
choice_box_h = choice_box[3] - choice_box[1]
start_left = choice_box[0] if choice_box[0] >= 0 else 0
start_top = choice_box[1] if choice_box[1] >= 0 else 0
end_right = start_left + choice_box_w
end_bottom = start_top + choice_box_h
current_image = image_pad[
start_top:end_bottom, start_left:end_right, :].copy()
image_height, image_width, _ = current_image.shape
if cfg.filter_min_face:
bbox_w = current_boxes[:, 2] - current_boxes[:, 0]
bbox_h = current_boxes[:, 3] - current_boxes[:, 1]
bbox_area = bbox_w * bbox_h
mask = bbox_area > (cfg.min_face_size * cfg.min_face_size)
current_boxes = current_boxes[mask]
current_labels = current_labels[mask]
for i in range(len(current_boxes)):
sample_label = []
sample_label.append(current_labels[i])
sample_label.append(current_boxes[i][0] / image_width)
sample_label.append(current_boxes[i][1] / image_height)
sample_label.append(current_boxes[i][2] / image_width)
sample_label.append(current_boxes[i][3] / image_height)
sampled_labels += [sample_label]
sampled_labels = np.array(sampled_labels)
else:
current_boxes /= np.array([image_width,
image_height, image_width, image_height])
sampled_labels = np.hstack(
(current_labels[:, np.newaxis], current_boxes))
return current_image, sampled_labels
current_image = image[choice_box[1]:choice_box[
3], choice_box[0]:choice_box[2], :].copy()
image_height, image_width, _ = current_image.shape
if cfg.filter_min_face:
bbox_w = current_boxes[:, 2] - current_boxes[:, 0]
bbox_h = current_boxes[:, 3] - current_boxes[:, 1]
bbox_area = bbox_w * bbox_h
mask = bbox_area > (cfg.min_face_size * cfg.min_face_size)
current_boxes = current_boxes[mask]
current_labels = current_labels[mask]
for i in range(len(current_boxes)):
sample_label = []
sample_label.append(current_labels[i])
sample_label.append(current_boxes[i][0] / image_width)
sample_label.append(current_boxes[i][1] / image_height)
sample_label.append(current_boxes[i][2] / image_width)
sample_label.append(current_boxes[i][3] / image_height)
sampled_labels += [sample_label]
sampled_labels = np.array(sampled_labels)
else:
current_boxes /= np.array([image_width,
image_height, image_width, image_height])
sampled_labels = np.hstack(
(current_labels[:, np.newaxis], current_boxes))
return current_image, sampled_labels
else:
image_height, image_width, _ = image.shape
if cfg.filter_min_face:
bbox_w = boxes[:, 2] - boxes[:, 0]
bbox_h = boxes[:, 3] - boxes[:, 1]
bbox_area = bbox_w * bbox_h
mask = bbox_area > (cfg.min_face_size * cfg.min_face_size)
boxes = boxes[mask]
labels = labels[mask]
for i in range(len(boxes)):
sample_label = []
sample_label.append(labels[i])
sample_label.append(boxes[i][0] / image_width)
sample_label.append(boxes[i][1] / image_height)
sample_label.append(boxes[i][2] / image_width)
sample_label.append(boxes[i][3] / image_height)
sampled_labels += [sample_label]
sampled_labels = np.array(sampled_labels)
else:
boxes /= np.array([image_width, image_height,
image_width, image_height])
sampled_labels = np.hstack(
(labels[:, np.newaxis], boxes))
return image, sampled_labels
def reduce_image(img, bbox_labels, img_width, img_height):
bbox_labels = np.array(bbox_labels)
scale = np.array([img_width, img_height, img_width, img_height])
boxes = bbox_labels[:, 1:5] * scale
boxArea = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
boxArea_max = np.amax(boxArea)
if boxArea_max > 48*48:
reduce_ratio = (boxArea_max/(48*48))**(0.5)
height = int(img_height / reduce_ratio)
width = int(img_width / reduce_ratio)
img = img.resize((width, height),
resample=Image.LANCZOS)
boxes = boxes//reduce_ratio
new_scale = np.array([width, height, width, height])
boxes = (boxes[:, 0:4]/new_scale)
bbox_labels[:,1:5] = boxes
bbox_labels = bbox_labels.tolist()
img_height = height
img_width = width
ratio_h = img_height/320
ratio_w = img_width/320
if ratio_h > 1 and ratio_w > 1:
expand_ratio = 1
else:
expand_ratio = 1/(min(ratio_h,ratio_w))
height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio)
h_off = math.floor(np.random.uniform(0, height - img_height))
w_off = math.floor(np.random.uniform(0, width - img_width))
expand_bbox = bbox(-w_off / img_width, -h_off / img_height,
(width - w_off) / img_width,
(height - h_off) / img_height)
expand_img = np.ones((height, width, 3))
expand_img = np.uint8(expand_img * np.squeeze(cfg.img_mean))
expand_img = Image.fromarray(expand_img)
expand_img.paste(img, (int(w_off), int(h_off)))
bbox_labels = transform_labels(bbox_labels, expand_bbox)
return expand_img, bbox_labels, width, height
return img, bbox_labels.tolist(), img_width, img_height
def precrop(img, bbox_labels):
img_height, img_width = img.size[1], img.size[0]
scale = np.array([img_width, img_height, img_width, img_height])
bbox_labels = np.array(bbox_labels)
bbox_labels[:, 1:5] = bbox_labels[:, 1:5] * scale
if img_width-20 > img_height:
height_max, height_min = img_width + 20, img_width - 20
new_height = np.random.randint(height_min, height_max)
expand_img = np.ones((new_height, img_width, 3))
expand_img = np.uint8(expand_img * np.squeeze(cfg.img_mean))
expand_img = Image.fromarray(expand_img)
expand_img.paste(img, (int(0), int(0)))
img_height, img_width = new_height, img_width
img = expand_img
elif img_height-20 > img_width:
width_max, width_min = img_height + 20, img_height - 20
new_width = np.random.randint(width_min, width_max)
expand_img = np.ones((img_height, new_width, 3))
expand_img = np.uint8(expand_img * np.squeeze(cfg.img_mean))
expand_img = Image.fromarray(expand_img)
expand_img.paste(img, (int(0), int(0)))
img_height, img_width = img_height, new_width
img = expand_img
scale = np.array([img_width, img_height, img_width, img_height])
bbox_labels[:, 1:5] = bbox_labels[:, 1:5] / scale
return img, bbox_labels.tolist(), img_width, img_height
def preprocess(img, bbox_labels, mode, image_path):
img_width, img_height = img.size
sampled_labels = bbox_labels
if mode == 'train':
if cfg.apply_distort:
img = distort_image(img)
if cfg.apply_expand:
img, bbox_labels, img_width, img_height = expand_image(
img, bbox_labels, img_width, img_height)
batch_sampler = []
prob = np.random.uniform(0., 1.)
if prob > cfg.data_anchor_sampling_prob and cfg.anchor_sampling:
scale_array = np.array(cfg.ANCHOR_SIZES)#[16, 32, 64, 128, 256, 512])
'''
batch_sampler.append(
sampler(1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.6, 0.0, True))
sampled_bbox = generate_batch_random_samples(
batch_sampler, bbox_labels, img_width, img_height, scale_array,
cfg.resize_width, cfg.resize_height)
'''
img = np.array(img)
img, sampled_labels = anchor_crop_image_sampling(
img, bbox_labels, scale_array, img_width, img_height)
'''
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = crop_image_sampling(
img, bbox_labels, sampled_bbox[idx], img_width, img_height,
cfg.resize_width, cfg.resize_height, cfg.min_face_size)
'''
img = img.astype('uint8')
img = Image.fromarray(img)
else:
batch_sampler.append(sampler(1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
sampled_bbox = generate_batch_samples(
batch_sampler, bbox_labels, img_width, img_height)
img = | np.array(img) | numpy.array |
from PIL import Image
import numpy as np
def get_size_and_grad(height_len, width_len):
correct = True
flag = 0
while correct:
if flag > 0:
print("размеру мозаики должен быть делителем для ширины и высоты изображения, "
"а градация серого должна быть меньше 128")
print('Введите размер мозаики и градацию серого через пробел: ')
flag += 1
array = input().split(' ')
array = [int(x) for x in array]
if height_len % array[0] == 0 and width_len % array[0] == 0 and array[1] < 128:
correct = False
print('Данные корректы, ищите результат в папке.')
return(array[0], array[1])
def search_grey(i, j, array, size, grad):
sum = int(np.sum(array[i:i + size, j:j + size, 0]) + np.sum(array[i:i + size, j:j + size, 1])
+ np.sum(array[i:i + size, j:j + size, 2])) / 3
total_grey = int(sum // (size * size))
array[i:i + size, j:j + size, 0] = int(total_grey // grad) * grad
array[i:i + size, j:j + size, 1] = int(total_grey // grad) * grad
array[i:i + size, j:j + size, 2] = int(total_grey // grad) * grad
img = Image.open("img2.jpg")
pixel_array = | np.array(img) | numpy.array |
# Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# THIS IS NOT THE ORIGINAL VERSION OF THE FILE.
#
# Last modified 2021-12-02
import logging
import numpy as np
logger = logging.getLogger(__name__)
class Optimizer(object):
def __init__(self, theta, step_size):
#self.theta = theta
self.dim = len(theta)
self.t = 0
def update(self, theta, globalg):
logger.info(self.t)
self.t += 1
step = self._compute_step(globalg)
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return ratio, theta + step
def _compute_step(self, globalg):
raise NotImplementedError
class SimpleSGD(Optimizer):
def __init__(self, theta, stepsize):
Optimizer.__init__(self, theta, stepsize)
self.stepsize = stepsize
def _compute_step(self, globalg):
step = -self.stepsize * globalg
return step
class SGD(Optimizer):
def __init__(self, theta, stepsize, momentum=0.9):
Optimizer.__init__(self, theta, stepsize)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, theta, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08):
Optimizer.__init__(self, theta, stepsize)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def reset(self):
self.m = | np.zeros(self.dim, dtype=np.float32) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
class TimeSeries:
def __init__(self, feature_range: tuple = (0, 1)):
self.feature_range = feature_range
self.scaler = MinMaxScaler(feature_range=feature_range)
self.dataset = []
def scaled(self):
return self.scaler.fit_transform(self.dataset)
def unscaled(self, dataset):
return self.scaler.inverse_transform(dataset)
def create_indexed_dataset(self, look_back=1, index_feature_range=(0.05, 0.95)):
scaler = MinMaxScaler(feature_range=index_feature_range)
dataset = self.scaled()
x = [dataset[i:i + look_back] for i in range(0, len(dataset) - look_back)]
y = [dataset[i] for i in range(look_back, len(dataset))]
n = len(y)
t = np.fromiter(range(n), np.float64).reshape((n, 1))
t = scaler.fit_transform(t)
x = np.array(x)
x = x.reshape((x.shape[0], x.shape[1]))
return np.hstack((t, x)).reshape((n, look_back + 1, 1)), np.array(y)
def create_dataset(self, look_back=1):
dataset = self.scaled()
x = [dataset[i:i + look_back] for i in range(0, len(dataset) - look_back)]
y = [dataset[i] for i in range(look_back, len(dataset))]
return | np.array(x) | numpy.array |
import numpy as np
import pandas as pd
#import random
import scipy as sc
import scipy.stats as stats
from scipy.special import factorial,digamma
import numdifftools as nd
from scipy.optimize import minimize
from joblib import Parallel, delayed
##############################################################################################################
########################################### Functions #####################################################
def Sorting_and_Sequencing(Simulation):
# take as input the number of bins (Simulation.bins), the diversity (Simulation.diversity) and size of the library sorted (N), the number of reads to allocate in total (Simulation.reads), the post sorting amplification step (Simulation.ratio_amplification),if the library is balanced (BIAS_Library), the underlying protein Simulation.distribution (gamma or lognormal), the fluorescence bounds for the sorting machine (Simulation.partitioning),and the parameters of the said Simulation.distribution.
# Return the (Simulation.diversity*Bins) matrix resulting from the sequencing and the sorting matrix Nj (number of cell sorted in each bin)
global Sij
def sorting_protein_matrix_populate(i,j):
if Simulation.distribution=='lognormal':
element_matrix=stats.norm.cdf(Simulation.partitioning[j+1],loc=Simulation.theta1[i], scale=Simulation.theta2[i])-stats.norm.cdf(Simulation.partitioning[j],loc=Simulation.theta1[i], scale=Simulation.theta2[i])
else:
element_matrix=stats.gamma.cdf(Simulation.partitioning[j+1],a=Simulation.theta1[i], scale=Simulation.theta2[i])-stats.gamma.cdf(Simulation.partitioning[j],a=Simulation.theta1[i], scale=Simulation.theta2[i])
return(element_matrix)
#### STEP 1 - Draw the ratio p_concentration
if Simulation.bias_library==True:
params=np.ones(Simulation.diversity)
Dir=[random.gammavariate(a,1) for a in params]
Dir=[v/sum(Dir) for v in Dir]
# Sample from the Simulation.diversity simplex to get ratios
#p_concentration=np.ones(Simulation.diversity)/Simulation.diversity
p_concentration=Dir
else:
p_concentration=[1/Simulation.diversity]*Simulation.diversity
#### STEP 2 - Draw the sample sizes= of each genetic construct
Ni=np.random.multinomial(Simulation.size, p_concentration, size=1)[0]
#### STEP 3 - Compute binning
## Compute ratios qji
Qij=np.fromfunction(sorting_protein_matrix_populate, (Simulation.diversity, Simulation.bins), dtype=int)
## Compute Nij
Nij=Qij* Ni[:, np.newaxis]
Nij=np.floor(Nij) #Convert to Integer numbers
#### STEP 4 - PCR amplification
Nij_amplified=np.multiply(Nij,Simulation.ratio_amplification)
#### STEP 5 - Compute Reads allocation
N=np.sum(Nij)
Nj=np.sum(Nij, axis=0)
READS=np.floor(Nj*Simulation.reads/N) #Allocate reads with repsect to the number of cells srted in each bin
#### STEP 6 - DNA sampling
Sij=np.zeros((Simulation.diversity,Simulation.bins))
#Compute ratios& Multinomial sampling
for j in range(Simulation.bins):
if np.sum(Nij_amplified,axis=0)[j]!=0:
Concentration_vector=Nij_amplified[:,j]/np.sum(Nij_amplified,axis=0)[j]
else:
Concentration_vector=np.zeros(Simulation.diversity)
Sij[:,j]=np.random.multinomial(READS[j],Concentration_vector,size=1)
return(Sij,Nj)
def Sorting(Simulation):
# take as input the number of bins (Simulation.bins), the diversity (Simulation.diversity) and size of the library sorted (N), the post sorting amplification step (Simulation.ratio_amplification),if the library is balanced (BIAS_Library), the underlying protein Simulation.distribution (gamma or lognormal), the fluorescence bounds for the sorting machine (Simulation.partitioning),and the parameters of the said Simulation.distribution.
# Return the (Simulation.diversity*Bins) matrix resulting from the sorting step
global Sij
#### STEP 1 - Draw the ratio p_concentration
def sorting_protein_matrix_populate(i,j):
if Simulation.distribution=='lognormal':
element_matrix=stats.norm.cdf(Simulation.partitioning[j+1],loc=Simulation.theta1[i], scale=Simulation.theta2[i])-stats.norm.cdf(Simulation.partitioning[j],loc=Simulation.theta1[i], scale=Simulation.theta2[i])
else:
element_matrix=stats.gamma.cdf(Simulation.partitioning[j+1],a=Simulation.theta1[i], scale=Simulation.theta2[i])-stats.gamma.cdf(Simulation.partitioning[j],a=Simulation.theta1[i], scale=Simulation.theta2[i])
return(element_matrix)
if Simulation.bias_library==True:
params=np.ones(Simulation.diversity)
Dir=[random.gammavariate(a,1) for a in params]
Dir=[v/sum(Dir) for v in Dir]
# Sample from the 30,000 simplex to get ratios
#p_concentration=np.ones(Simulation.diversity)/Simulation.diversity
p_concentration=Dir
else:
p_concentration=[1/Simulation.diversity]*Simulation.diversity
#### STEP 2 - Draw the sample sizes= of each genetic construct
Ni=np.random.multinomial(Simulation.size, p_concentration, size=1)[0]
#Ni=Ni[0]
#### STEP 3 - Compute binning
## Compute ratios qji
Qij=np.fromfunction(sorting_protein_matrix_populate, (Simulation.diversity, Simulation.bins), dtype=int)
## Compute Nij
Nij=Qij* Ni[:, np.newaxis]
Nij=np.floor(Nij) #Convert to Integer numbers
return(Nij)
def Sequencing(Simulation,Nij):
# take as input the number of bins (Simulation.bins), the diversity (Simulation.diversity) and size of the library sorted (N), the number of reads to allocate in total (Simulation.reads), the post sorting amplification step (Simulation.ratio_amplification),if the library is balanced (BIAS_Library), the underlying protein Simulation.distribution (gamma or lognormal), the fluorescence bounds for the sorting machine (Simulation.partitioning),and the parameters of the said Simulation.distribution.
# Return the (Simulation.diversity*Bins) matrix resulting from the sequencing Sij and the sorting matrix Nj (number of cell sorted in each bin)
#### STEP 4 - PCR amplification
Nij_amplified=np.multiply(Nij,Simulation.ratio_amplification)
#### STEP 5 - Compute Reads allocation
N=np.sum(Nij)
Nj=np.sum(Nij, axis=0)
READS=np.floor(Nj*Simulation.reads/N) #Allocate reads with repsect to the number of cells srted in each bin
#### STEP 6 - DNA sampling
Sij= | np.zeros((Simulation.diversity,Simulation.bins)) | numpy.zeros |
import typing
import numpy as np
import tensorflow as tf
import src.estimation.configuration as configs
import src.utils.plots as plots
from src.acceptance.base import hand_orientation, joint_relation_errors, vectors_angle
from src.acceptance.gesture_acceptance_result import GestureAcceptanceResult
from src.detection.plots import image_plot
from src.system.database.reader import UsecaseDatabaseReader
from src.system.hand_position_estimator import HandPositionEstimator
from src.utils.camera import Camera
class GestureRecognizer:
def __init__(self, error_thresh: int, orientation_thresh: int, database_subdir: str, camera_name: str,
plot_result=True, plot_feedback=False, plot_orientation=True):
self.jre_thresh = error_thresh
self.orientation_thresh = orientation_thresh
self.plot_result = plot_result
self.plot_feedback = plot_feedback
self.plot_orientation = plot_orientation
self.camera = Camera(camera_name)
config = configs.PredictCustomDataset()
self.estimator = HandPositionEstimator(self.camera, config=config)
self.database_reader = UsecaseDatabaseReader()
self.database_reader.load_from_subdir(database_subdir)
self.gesture_database = self.database_reader.hand_poses
def start(self, image_generator, generator_includes_labels=False) -> \
typing.Generator[GestureAcceptanceResult, None, None]:
"""
Starts gesture recognition. It uses images supplied by
image_generator.
Parameters
----------
image_generator : generator
The source of images.
generator_includes_labels : bool
Whether the generator also returns labels.
Returns
-------
Generator[GestureAcceptanceResult]
Yields instances of the GestureAcceptanceResult class.
"""
image_idx = 0
norm, mean = None, None
# Prepare figure for live plotting, but only if we are really going to plot.
if self.plot_result:
if self.plot_feedback:
fig, ax = plots.plot_skeleton_with_jre_subplots()
else:
fig, ax = image_plot()
for image_array in image_generator:
# If the generator also returns labels, expand the tuple
if generator_includes_labels:
image_array, gesture_label = image_array
if tf.rank(image_array) == 4:
image_array = image_array[0]
joints_uvz = self.estimator.estimate_from_image(image_array)
# Detection failed, continue to next image
if joints_uvz is None:
continue
joints_xyz = self.camera.pixel_to_world(joints_uvz)
acceptance_result = self.accept_gesture(joints_xyz)
if generator_includes_labels:
acceptance_result.expected_gesture_label = gesture_label.numpy()
# plot the hand position with gesture label
image_subregion = self.estimator.get_cropped_image()
joints_subregion = self.estimator.convert_to_cropped_coords(joints_uvz)
if self.plot_result:
gesture_label = self._get_gesture_label(acceptance_result)
if self.plot_feedback:
# get JREs
jres = acceptance_result.joints_jre[:, acceptance_result.predicted_gesture_idx]
if self.plot_orientation:
norm, mean = self._get_orientation_vectors_in_2d(acceptance_result)
plots.plot_skeleton_with_jre_live(
fig, ax, image_subregion, joints_subregion, jres,
label=gesture_label, norm_vec=norm, mean_vec=mean)
else:
plots.plot_skeleton_with_label_live(fig, ax, image_subregion, joints_subregion, gesture_label)
image_idx += 1
yield acceptance_result
def accept_gesture(self, keypoints: np.ndarray) -> GestureAcceptanceResult:
"""
Compares given keypoints to the ones stored in the database
and decides whether the hand satisfies some of the defined gestures.
Basically performs gesture recognition from the hand's skeleton.
Parameters
----------
keypoints ndarray of 21 keypoints, shape (batch_size, joints, coords)
"""
result = GestureAcceptanceResult()
result.joints_jre = joint_relation_errors(keypoints, self.gesture_database)
aggregated_errors = | np.sum(result.joints_jre, axis=-1) | numpy.sum |
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import json
import ngraph as ng
from test.ngraph.util import get_runtime, run_op_node
from ngraph.impl import Function, NodeVector
from ngraph.exceptions import UserInputError
@pytest.mark.parametrize('dtype', [np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.config.gpu_skip(reason='Not implemented')
def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name='A')
parameter_b = ng.parameter(shape, dtype=dtype, name='B')
parameter_c = ng.parameter(shape, dtype=dtype, name='C')
model = (parameter_a + parameter_b) * parameter_c
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
value_b = np.array([[5, 6], [7, 8]], dtype=dtype)
value_c = np.array([[9, 10], [11, 12]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[54, 80], [110, 144]], dtype=dtype))
value_a = np.array([[13, 14], [15, 16]], dtype=dtype)
value_b = np.array([[17, 18], [19, 20]], dtype=dtype)
value_c = np.array([[21, 22], [23, 24]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[630, 704], [782, 864]], dtype=dtype))
def test_function_call():
runtime = get_runtime()
dtype = int
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name='A')
parameter_b = ng.parameter(shape, dtype=dtype, name='B')
parameter_c = ng.parameter(shape, dtype=dtype, name='C')
parameter_list = [parameter_a, parameter_b, parameter_c]
ops = ((parameter_a + parameter_b) * parameter_c)
func = Function(NodeVector([ops]), parameter_list, 'addmul')
fc = ng.function_call(func, NodeVector(parameter_list))
computation = runtime.computation(fc, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
value_b = | np.array([[5, 6], [7, 8]], dtype=dtype) | numpy.array |
# The observation model contains the diagonals (stored as vectors) of the observation
# matrices for each possible sensor reading
# The last of these vectors contains the probabilities for the sensor to produce nothing
import numpy as np
import matplotlib.pyplot as plt
import random
import models.StateModel
class ObservationModel:
def __init__(self, stateModel):
self.__stateModel = stateModel
self.__rows, self.__cols, self.__head = stateModel.get_grid_dimensions()
self.__dim = self.__rows * self.__cols * self.__head #possible states
self.__num_readings = self.__rows * self.__cols + 1 #sensor readings
self.__vectors = | np.ones(shape=(self.__num_readings, self.__dim)) | numpy.ones |
import numpy as np
import subprocess
import fileinput
import argparse
import random
import shutil
import math
import cv2
import os
import sys
global counter
def createNumpyMatrix(geometricVertices):
"""Parse the strings from the obj file and convert them into numpy matrix of floats to perform math efficiently"""
vertices = []
for line in geometricVertices:
# convert the string to floats for x,y,z coordinates
elements = list(map(lambda x: float(x), line.split()[1:]))
vertices.append(elements)
# convert to 3 x numPoints matrix
vertices = np.asarray(vertices)
vertices = vertices.T
#print(vertices.shape)
return vertices
def getCenterOfMass(geometricVertices):
# com will be a 3x1 vector
com = np.average(geometricVertices, axis=1)
com = com.reshape(3,1)
return com
def centerAndScaleObject(geometricVertices, com, resize, meshIsAreaLight):
"""Translate the object vertices so that they are centered around the origin"""
geometricVertices = geometricVertices - com
stdev = np.std(geometricVertices, axis=1) / float(resize)
stdev = stdev.reshape(3,1)
if not meshIsAreaLight:
# do not scale the area light mesh object
geometricVertices = geometricVertices / stdev
return geometricVertices
def getRotationMatrix(angleX=0.0, angleY=0.0, angleZ=0.0):
if angleX == 0.0 and angleY == 0.0 and angleZ == 0.0:
angleX = round(random.uniform(0, 2*math.pi), 2)
angleY = round(random.uniform(0, 2*math.pi), 2)
angleZ = round(random.uniform(0, 2*math.pi), 2)
Rx = np.array([[1, 0, 0], [0, math.cos(angleX), -math.sin(angleX)], [0, math.sin(angleX), math.cos(angleX)]], dtype=np.float)
Ry = np.array([[math.cos(angleY), 0, math.sin(angleY)], [0, 1, 0], [-math.sin(angleY), 0, math.cos(angleY)]], dtype=np.float)
Rz = np.array([[math.cos(angleZ), -math.sin(angleZ), 0], [math.sin(angleZ), math.cos(angleZ), 0], [0, 0, 1]], dtype=np.float)
R = np.matmul(np.matmul(Rx, Ry), Rz)
#R = np.identity(3)
return R
def rotateObject(geometricVertices, rotationMatrix):
"""Perform matrix multiplication - Rx to get the vertex coordinates after rotation"""
rotatedGeometricVertices = np.matmul(rotationMatrix, geometricVertices)
return rotatedGeometricVertices
def getAxisAlignedBoundingBox(geometricVertices):
mins = np.amin(geometricVertices, axis=1)
maxs = | np.amax(geometricVertices, axis=1) | numpy.amax |
import numpy as np
from numpy import linalg as la
EX = [[102.38590308, 114.03596477, 120.20542635]]
EXY = [[15875.05715902, 16286.31198753, 16212.65198293],
[16286.31198753, 17917.16620909, 18175.70219696],
[16212.65198293, 18175.70219696, 19657.62259017]]
a = EXY - np.dot(np.array(EX).T, np.array(EX))
print(a)
u, sigma, vt = | la.svd(a) | numpy.linalg.svd |
"""
Visualize Genetic Algorithm to find the shortest path for travel sales problem.
Visit my tutorial website for more: https://mofanpy.com/tutorials/
"""
import matplotlib.pyplot as plt
import numpy as np
N_CITIES = 20 # DNA size
CROSS_RATE = 0.1
MUTATE_RATE = 0.02
POP_SIZE = 500
N_GENERATIONS = 500
class GA(object):
def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size, ):
self.DNA_size = DNA_size
self.cross_rate = cross_rate
self.mutate_rate = mutation_rate
self.pop_size = pop_size
self.pop = np.vstack([np.random.permutation(DNA_size) for _ in range(pop_size)])
def translateDNA(self, DNA, city_position): # get cities' coord in order
line_x = np.empty_like(DNA, dtype=np.float64)
line_y = np.empty_like(DNA, dtype=np.float64)
for i, d in enumerate(DNA):
city_coord = city_position[d]
line_x[i, :] = city_coord[:, 0]
line_y[i, :] = city_coord[:, 1]
return line_x, line_y
def get_fitness(self, line_x, line_y):
total_distance = | np.empty((line_x.shape[0],), dtype=np.float64) | numpy.empty |
import numpy as np
import py.test
import random
from weldnumpy import weldarray, erf as welderf
import scipy.special as ss
'''
TODO0: Decompose heavily repeated stuff, like the assert blocks and so on.
TODO: New tests:
- reduce ufuncs: at least the supported ones.
- use np.add.reduce syntax for the reduce ufuncs.
- getitem: lists and ndarrays + ints.
- error based tests: nan; underflow/overflow; unsupported types [true] * [...] etc;
- long computational graphs - that segfault or take too long; will require implicit evaluation
when the nested ops get too many.
- edge/failing cases: out = ndarray for op involving weldarrays.
- update elements of an array in a loop etc. --> setitem test.
- setitem + views tests.
'''
UNARY_OPS = [np.exp, np.log, np.sqrt]
# TODO: Add wa.erf - doesn't use the ufunc functionality of numpy so not doing it for
# now.
BINARY_OPS = [np.add, np.subtract, np.multiply, np.divide]
REDUCE_UFUNCS = [np.add.reduce, np.multiply.reduce]
# FIXME: weld mergers dont support non-commutative ops --> need to find a workaround for this.
# REDUCE_UFUNCS = [np.add.reduce, np.subtract.reduce, np.multiply.reduce, np.divide.reduce]
TYPES = ['float32', 'float64', 'int32', 'int64']
NUM_ELS = 10
# TODO: Create test with all other ufuncs.
def random_arrays(num, dtype):
'''
Generates random Weld array, and numpy array of the given num elements.
'''
# np.random does not support specifying dtype, so this is a weird
# way to support both float/int random numbers
test = np.zeros((num), dtype=dtype)
test[:] = np.random.randn(*test.shape)
test = np.abs(test)
# at least add 1 so no 0's (o.w. divide errors)
random_add = np.random.randint(1, high=10, size=test.shape)
test = test + random_add
test = test.astype(dtype)
np_test = np.copy(test)
w = weldarray(test, verbose=False)
return np_test, w
def given_arrays(l, dtype):
'''
@l: list.
returns a np array and a weldarray.
'''
test = np.array(l, dtype=dtype)
np_test = np.copy(test)
w = weldarray(test)
return np_test, w
def test_unary_elemwise():
'''
Tests all the unary ops in UNARY_OPS.
FIXME: For now, unary ops seem to only be supported on floats.
'''
for op in UNARY_OPS:
for dtype in TYPES:
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
np_test, w = random_arrays(NUM_ELS, dtype)
w2 = op(w)
np_result = op(np_test)
w2_eval = w2.evaluate()
assert np.allclose(w2, np_result)
assert np.array_equal(w2_eval, np_result)
def test_binary_elemwise():
'''
'''
for op in BINARY_OPS:
for dtype in TYPES:
np_test, w = random_arrays(NUM_ELS, dtype)
np_test2, w2 = random_arrays(NUM_ELS, dtype)
w3 = op(w, w2)
weld_result = w3.evaluate()
np_result = op(np_test, np_test2)
# Need array equal to keep matching types for weldarray, otherwise
# allclose tries to subtract floats from ints.
assert np.array_equal(weld_result, np_result)
def test_multiple_array_creation():
'''
Minor edge case but it fails right now.
---would probably be fixed after we get rid of the loop fusion at the numpy
level.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
w = weldarray(w) # creating array again.
w2 = np.exp(w)
weld_result = w2.evaluate()
np_result = np.exp(np_test)
assert np.allclose(weld_result, np_result)
def test_array_indexing():
'''
Need to decide: If a weldarray item is accessed - should we evaluateuate the
whole array (for expected behaviour to match numpy) or not?
'''
pass
def test_numpy_operations():
'''
Test operations that aren't implemented yet - it should pass it on to
numpy's implementation, and return weldarrays.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
np_result = np.sin(np_test)
w2 = np.sin(w)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_type_conversion():
'''
After evaluating, the dtype of the returned array must be the same as
before.
'''
for t in TYPES:
_, w = random_arrays(NUM_ELS, t)
_, w2 = random_arrays(NUM_ELS, t)
w2 = np.add(w, w2)
weld_result = w2.evaluate()
assert weld_result.dtype == t
def test_concat():
'''
Test concatenation of arrays - either Weld - Weld, or Weld - Numpy etc.
'''
pass
def test_views_basic():
'''
Taking views into a 1d weldarray should return a weldarray view of the
correct data without any copying.
'''
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
assert isinstance(w2, weldarray)
def test_views_update_child():
'''
Updates both parents and child to put more strain.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:5], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# unary part
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
asserts(w, n, w2, n2)
# binary part
n3, w3 = random_arrays(3, 'float32')
n2 = np.add(n2, n3, out=n2)
w2 = np.add(w2, w3, out=w2)
w2.evaluate()
asserts(w, n, w2, n2)
w2 += 5.0
n2 += 5.0
w2.evaluate()
asserts(w, n, w2, n2)
def test_views_update_parent():
'''
Create a view, then update the parent in place. The change should be
effected in the view-child as well.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:4], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:4]
n2 = n[2:4]
w = np.exp(w, out=w)
n = np.exp(n, out=n)
w2.evaluate()
print(w2)
print(w[2:4])
# w2 should have been updated too.
asserts(w, n, w2, n2)
n3, w3 = random_arrays(NUM_ELS, 'float32')
w = np.add(w, w3, out=w)
n = np.add(n, n3, out=n)
asserts(w, n, w2, n2)
assert np.allclose(w3, n3)
# check scalars
w += 5.0
n += 5.0
w.evaluate()
asserts(w, n, w2, n2)
def test_views_update_mix():
'''
'''
n, w = random_arrays(10, 'float32')
# Let's add more complexity. Before messing with child views etc, first
# register an op with the parent as well.
n = np.sqrt(n)
w = np.sqrt(w)
# get the child views
w2 = w[2:5]
n2 = n[2:5]
# updatig the values in place is still reflected correctly.
w = np.log(w, out=w)
n = np.log(n, out=n)
# evaluating this causes the internal representation to change. So can't
# rely on w.weldobj.context[w.name] anymore.
w.evaluate()
# print("w2 before exp: ", w2)
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
def test_views_mix2():
'''
update parent/child, binary/unary ops.
'''
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
n3, w3 = random_arrays(NUM_ELS, 'float32')
w = np.add(w, w3, out=w)
n = np.add(n, n3, out=n)
assert np.allclose(w[2:5], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
# now update the child
def test_views_grandparents_update_mix():
'''
Similar to above. Ensure consistency of views of views etc.
'''
n, w = random_arrays(10, 'float32')
# Let's add more complexity. Before messing with child views etc, first
# register an op with the parent as well.
# TODO: uncomment.
n = np.sqrt(n)
w = np.sqrt(w)
# get the child views
w2 = w[2:9]
n2 = n[2:9]
w3 = w2[2:4]
n3 = n2[2:4]
assert np.allclose(w3.evaluate(), n3)
# updatig the values in place is still reflected correctly.
w = np.log(w, out=w)
n = np.log(n, out=n)
# evaluating this causes the internal representation to change. So can't
# rely on w.weldobj.context[w.name] anymore.
w.evaluate()
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
# w2.evaluate()
w3 = np.sqrt(w3, out=w3)
n3 = np.sqrt(n3, out=n3)
assert np.allclose(w[2:9], w2)
assert np.allclose(w2, n2)
assert np.allclose(w3, n3)
assert np.allclose(w, n)
assert np.allclose(w2[2:4], w3)
def test_views_check_old():
'''
Old views should still be valid etc.
'''
pass
def test_views_mess():
'''
More complicated versions of the views test.
'''
# parent arrays
NUM_ELS = 100
num_views = 10
n, w = random_arrays(NUM_ELS, 'float32')
# in order to avoid sqrt running into bad values
w += 1000.00
n += 1000.00
weld_views = []
np_views = []
weld_views2 = []
np_views2 = []
for i in range(num_views):
nums = random.sample(range(0,NUM_ELS), 2)
start = min(nums)
end = max(nums)
# FIXME: Need to add correct behaviour in this case.
if start == end:
continue
weld_views.append(w[start:end])
np_views.append(n[start:end])
np.sqrt(weld_views[i], out=weld_views[i])
np.sqrt(np_views[i], out=np_views[i])
np.log(weld_views[i], out=weld_views[i])
np.log(np_views[i], out=np_views[i])
np.exp(weld_views[i], out=weld_views[i])
np.exp(np_views[i], out=np_views[i])
# add some binary ops.
n2, w2 = random_arrays(len(np_views[i]), 'float32')
weld_views[i] = np.add(weld_views[i], w2, out=weld_views[i])
np_views[i] = np.add(np_views[i], n2, out=np_views[i])
# weld_views[i].evaluate()
a = np.log(weld_views[i])
b = np.log(np_views[i])
assert np.allclose(a, b)
w = np.sqrt(w, out=w)
n = np.sqrt(n, out=n)
assert np.allclose(n, w)
assert np.array_equal(w.evaluate(), n)
# TODO: Add stuff with grandchildren, and so on.
for i in range(num_views):
assert np.array_equal(np_views[i], weld_views[i].evaluate())
assert np.allclose(np_views[i], weld_views[i])
def test_views_overlap():
'''
Two overlapping views of the same array. Updating one must result in the
other being updated too.
'''
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# TODO: uncomment
w3 = w[4:7]
n3 = n[4:7]
# w4, n4 are non overlapping views. Values should never change
w4 = w[7:9]
n4 = n[7:9]
# w5, n5 are contained within w2, n2.
w5 = w[3:4]
n5 = n[3:4]
# unary part
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
print("starting binary part!")
# binary part:
# now update the child with binary op
n3, w3 = random_arrays(3, 'float32')
# n3, w3 = given_arrays([1.0, 1.0, 1.0], 'float32')
n2 = np.add(n2, n3, out=n2)
print('going to do np.add on w2,w3, out=w2')
w2 = np.add(w2, w3, out=w2)
# assert np.allclose(w[2:5], w2)
assert np.allclose(w, n)
assert np.allclose(w2.evaluate(), n2)
print('w5: ', w5)
print(n5)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
w2 += 5.0
n2 += 5.0
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w, n)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
def test_mix_np_weld_ops():
'''
Weld Ops + Numpy Ops - before executing any of the numpy ops, the
registered weld ops must be evaluateuated.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
np_test = np.exp(np_test)
np_result = np.sin(np_test)
w2 = np.exp(w)
w2 = np.sin(w2)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_scalars():
'''
Special case of broadcasting rules - the scalar is applied to all the
Weldrray members.
'''
t = "int32"
print("t = ", t)
n, w = random_arrays(NUM_ELS, t)
n2 = n + 2
w2 = w + 2
w2 = w2.evaluate()
assert np.allclose(w2, n2)
# test by combining it with binary op.
n, w = random_arrays(NUM_ELS, t)
w += 10
n += 10
n2, w2 = random_arrays(NUM_ELS, t)
w = np.add(w, w2)
n = np.add(n, n2)
assert np.allclose(w, n)
t = "float32"
print("t = ", t)
np_test, w = random_arrays(NUM_ELS, t)
np_result = np_test + 2.00
w2 = w + 2.00
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_stale_add():
'''
Registers op for weldarray w2, and then add it to w1. Works trivially
because updating a weldobject with another weldobject just needs to get the
naming right.
'''
n1, w1 = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
w2 = np.exp(w2)
n2 = np.exp(n2)
w1 = np.add(w1, w2)
n1 = np.add(n1, n2)
w1 = w1.evaluate()
assert np.allclose(w1, n1)
def test_cycle():
'''
This was a problem when I was using let statements to hold intermediate
weld code. (because of my naming scheme)
'''
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([3.0, 3.0], 'float32')
# w3 depends on w1.
w3 = np.add(w1, w2)
n3 = np.add(n1, n2)
# changing this to some other variable lets us pass the test.
w1 = np.exp(w1)
n1 = np.exp(n1)
w1 = np.add(w1,w3)
n1 = np.add(n1, n3)
assert np.allclose(w1.evaluate(), n1)
assert np.allclose(w3.evaluate(), n3)
def test_self_assignment():
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([2.0, 1.0], 'float32')
w1 = np.exp(w1)
n1 = np.exp(n1)
assert np.allclose(w1.evaluate(), n1)
w1 = w1 + w2
n1 = n1 + n2
assert np.allclose(w1.evaluate(), n1)
def test_reuse_array():
'''
a = np.add(b,)
Ensure that despite sharing underlying memory of ndarrays, future ops on a
and b should not affect each other as calculations are performed based on
the weldobject which isn't shared between the two.
'''
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([2.0, 1.0], 'float32')
w3 = np.add(w1, w2)
n3 = np.add(n1, n2)
w1 = np.log(w1)
n1 = np.log(n1)
w3 = np.exp(w3)
n3 = np.exp(n3)
w1 = w1 + w3
n1 = n1 + n3
w1_result = w1.evaluate()
assert np.allclose(w1_result, n1)
w3_result = w3.evaluate()
assert np.allclose(w3_result, n3)
def test_fancy_indexing():
'''
TODO: Needs more complicated tests that mix different indexing strategies,
but since fancy indexing creates a new array - it shouldn't have any
problems dealing with further stuff.
'''
_, w = random_arrays(NUM_ELS, 'float64')
b = w > 0.50
w2 = w[b]
assert isinstance(w2, weldarray)
assert id(w) != id(w2)
def test_mixing_types():
'''
mixing f32 with f64, or i32 with f64.
Weld doesn't seem to support this right now, so pass it on to np.
'''
n1, w1 = random_arrays(2, 'float64')
n2, w2 = random_arrays(2, 'float32')
w3 = w1 + w2
n3 = n1 + n2
assert np.array_equal(n3, w3.evaluate())
def test_inplace_assignment():
'''
With the output optimization, this should be quite efficient for weld.
'''
n, w = random_arrays(100, 'float32')
n2, w2 = random_arrays(100, 'float32')
orig_addr = id(w)
for i in range(100):
n += n2
w += w2
# Ensures that the stuff above happened in place.
assert id(w) == orig_addr
w3 = w.evaluate()
assert np.allclose(n, w)
def test_nested_weld_expr():
'''
map(zip(map(...))) kind of really long nested expressions.
Add a timeout - it shouldn't take literally forever as it does now.
'''
pass
def test_getitem_evaluate():
'''
Should evaluateuate stuff before returning from getitem.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
n += n2
w += w2
assert n[0] == w[0]
def test_implicit_evaluate():
n, w = random_arrays(2, 'float32')
n2, w2 = random_arrays(2, 'float32')
w3 = w+w2
n3 = n+n2
print(w3)
w3 = w3.evaluate()
w3 = w3.evaluate()
assert np.allclose(w3, n3)
def test_setitem_basic():
'''
set an arbitrary item in the array after registering ops on it.
'''
# TODO: run this on all types.
n, w = random_arrays(NUM_ELS, 'float32')
n[0] = 5.0
w[0] = 5.0
assert np.allclose(n, w)
n[0] += 10.0
w[0] += 10.0
assert np.allclose(n, w)
n[2] -= 5.0
w[2] -= 5.0
assert np.allclose(n, w)
def test_setitem_slice():
'''
'''
n, w = random_arrays(NUM_ELS, 'float32')
n[0:2] = [5.0, 2.0]
w[0:2] = [5.0, 2.0]
assert np.allclose(n, w)
n[4:6] += 10.0
w[4:6] += 10.0
assert np.allclose(n, w)
def test_setitem_strides():
'''
TODO: make more complicated versions which do multiple types of changes on strides at once.
TODO2: need to support different strides.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n[0:2:1] = [5.0, 2.0]
w[0:2:1] = [5.0, 2.0]
print('w: ', w)
print('n: ', n)
assert np.allclose(n, w)
n[5:8:1] += 10.0
w[5:8:1] += 10.0
assert np.allclose(n, w)
def test_setitem_list():
'''
'''
n, w = random_arrays(NUM_ELS, 'float32')
a = [0, 3]
n[a] = [5.0, 13.0]
w[a] = [5.0, 13.0]
print('n: ', n)
print('w: ', w)
assert np.allclose(n, w)
def test_setitem_weird_indexing():
'''
try to confuse the weldarray with different indexing patterns.
'''
pass
def test_setitem_mix():
'''
Mix all setitem stuff / and other ops.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n = | np.sqrt(n) | numpy.sqrt |
"""
Many of these tests use the minimal test/data/gdc.bed file which has just
enough complexity to be useful in testing corner cases. When reading through
the tests, it's useful to have that file open to understand what's happening.
"""
import os
import metaseq
import multiprocessing
from metaseq.array_helpers import ArgumentError
import numpy as np
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
nan = np.nan
inf = np.inf
gs = {}
for kind in ['bed', 'bam', 'bigbed', 'bigwig']:
gs[kind] = metaseq.genomic_signal(metaseq.example_filename('gdc.%s' % kind), kind)
PROCESSES = int(os.environ.get("METASEQ_PROCESSES", multiprocessing.cpu_count()))
def test_tointerval():
assert metaseq.helpers.tointerval("chr2L:1-10[-]").strand == '-'
assert metaseq.helpers.tointerval("chr2L:1-10[+]").strand == '+'
assert metaseq.helpers.tointerval("chr2L:1-10").strand == '.'
def test_local_count():
def check(kind, coord, expected, stranded):
try:
result = gs[kind].local_count(coord, stranded=stranded)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert result == expected, (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, expected, stranded in (
('chr2L:1-80', 3, False), # easy case
('chr2L:1000-3000', 0, False), # above upper boundary
('chr2L:1-9', 0, False), # below lower boundary
('chr2L:71-73[-]', 2, False), # unstranded = 2
('chr2L:71-73[-]', 1, True), # stranded = 1
('chr2L:70-71', 2, False), # pathological corner case
# ('chr2L:75-76', 0, False), # pathological corner case
):
yield check, kind, coord, expected, stranded
def test_local_coverage_stranded():
def check(kind, coord, expected):
try:
result = gs[kind].local_coverage(coord)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20[-]',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.])[::-1],
# note reverse------------------------------------------------------------------------^^^^^^
),
),
('chr2L:68-76[-]',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.])[::-1],
# note reverse----------------------------^^^^^^
),
),
):
yield check, kind, coord, expected
def test_local_coverage_shifted():
def check(kind, coord, shift_width, expected):
try:
result = gs[kind].local_coverage(coord, shift_width=shift_width)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, shift_width, expected in (
('chr2L:1-20', -2,
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.]),
),
),
# this one is complex, because the minus-strand read shifts left,
# and the plus-strand shifts right.
('chr2L:68-76', 1,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 1., 1., 2., 2., 2., 1., 1.]),
),
),
# shift the reads all the way out of the window...
('chr2L:68-76', 10,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
yield check, kind, coord, shift_width, expected
def test_local_coverage_read_strand():
"""
checks stranded full binning
excludes bigwig since strand doesn't make sense for that format.
"""
def check(kind, coord, read_strand, expected):
try:
result = gs[kind].local_coverage(coord, read_strand=read_strand)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, read_strand, expected in (
('chr2L:1-20', '+',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.]),
),
),
('chr2L:1-20', '-',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
yield check, kind, coord, read_strand, expected
def test_local_coverage_fragment_size():
def check(kind, coord, fragment_size, expected):
try:
result = gs[kind].local_coverage(coord, fragment_size=fragment_size)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, fragment_size, expected in (
('chr2L:1-20', 7,
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0.]),
),
),
('chr2L:68-76', 6,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 1., 2., 2., 2., 2., 2., 1.]),
),
),
('chr2L:68-76', 1,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 1., 0., 0., 0., 1., 0.]),
),
),
):
yield check, kind, coord, fragment_size, expected
def test_local_coverage_score():
def check(kind, coord, expected):
try:
result = gs[kind].local_coverage(coord, use_score=True)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bigbed', 'bed']:
for coord, expected in (
('chr2L:1-20',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 255., 255., 255., 255., 255., 0., 0., 0., 0., 0.]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 510., 510., 510., 510., 510., 0.]),
),
),
):
yield check, kind, coord, expected
def test_local_coverage_full():
"""generator of tests for local coverage
ensures that all formats are consistent in their results when retrieving
the full un-binned data.
"""
def check(kind, coord, processes, expected):
try:
result = gs[kind].local_coverage(coord, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.]),
),
),
('chr2L:568-576',
(
np.array([568, 569, 570, 571, 572, 573, 574, 575]),
np.array([0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
for processes in [None, PROCESSES]:
yield check, kind, coord, processes, expected
def test_local_coverage_binned():
"""generator of tests for local coverage
ensures that all formats are consistent in their results when retrieving
binned data.
"""
def check(kind, coord, processes, expected):
if kind == 'bigwig':
result = gs[kind].local_coverage(coord, bins=8, method='get_as_array', processes=processes)
else:
try:
result = gs[kind].local_coverage(coord, bins=8, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
try:
assert np.allclose(result[0], expected[0]) and np.allclose(result[1], expected[1])
except:
print (kind, coord, result, expected)
raise
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20',
(
np.array([ 1., 3.57142857, 6.14285714, 8.71428571, 11.28571429, 13.85714286, 16.42857143, 19.]),
np.array([ 0., 0., 0., 0., 1., 1., 0., 0. ]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.]),
),
),
):
for processes in [None, PROCESSES]:
yield check, kind, coord, processes, expected
def test_array_binned():
def check(kind, coord, processes, expected):
if kind == 'bigwig':
result = gs[kind].array(coord, bins=8, method='get_as_array', processes=processes)
else:
try:
result = gs[kind].array(coord, bins=8, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
try:
assert np.allclose(result, expected)
except:
print (kind, coord, result, expected)
raise
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
(['chr2L:1-20'],
np.array([[0., 0., 0., 0., 1., 1., 0., 0. ]]),
),
(['chr2L:1-20', 'chr2L:1-20[-]'],
np.array([[0., 0., 0., 0., 1., 1., 0., 0. ],
[0., 0., 1., 1., 0., 0., 0., 0. ]]),
),
(['chr2L:68-76'],
| np.array([[0., 0., 2., 2., 2., 2., 2., 0.]]) | numpy.array |
# coding: utf-8
# In[1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
get_ipython().magic('matplotlib inline')
# In[2]:
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = 'D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
# In[3]:
num_classes = 10
| np.random.seed(133) | numpy.random.seed |
"""
Testing code.
Updated BSM February 2017
"""
import sys
import os
import numpy as np
import pytest
from pytest import approx
from numpy.testing import assert_allclose
from scipy.spatial.distance import cdist
from pykrige import kriging_tools as kt
from pykrige import core
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.uk import UniversalKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
allclose_pars = {"rtol": 1e-05, "atol": 1e-08}
@pytest.fixture
def validation_ref():
data = np.genfromtxt(os.path.join(BASE_DIR, "test_data/test_data.txt"))
ok_test_answer, ok_test_gridx, ok_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test1_answer.asc"), footer=2
)
uk_test_answer, uk_test_gridx, uk_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test2_answer.asc"), footer=2
)
return (
data,
(ok_test_answer, ok_test_gridx, ok_test_gridy),
(uk_test_answer, uk_test_gridx, uk_test_gridy),
)
@pytest.fixture
def sample_data_2d():
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 6.0, 1.0)
gridx_2 = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(gridx, gridy)
mask = np.array(xi == yi)
return data, (gridx, gridy, gridx_2), mask
@pytest.fixture
def sample_data_3d():
data = np.array(
[
[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7],
]
)
gridx = np.arange(0.0, 0.6, 0.05)
gridy = np.arange(0.0, 0.6, 0.01)
gridz = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(gridz, gridy, gridx, indexing="ij")
mask = np.array((xi == yi) & (yi == zi))
return data, (gridx, gridy, gridz), mask
def test_core_adjust_for_anisotropy():
X = np.array([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0]), **allclose_pars)
def test_core_adjust_for_anisotropy_3d():
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [90.0, 0.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([1.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 0.0, 2.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, -2.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 90.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 2.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([2.0, 0.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 0.0, 90.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, 0.0, 2.0]), **allclose_pars)
def test_core_make_variogram_parameter_list():
# test of first case - variogram_model_parameters is None
# function should return None unaffected
result = core._make_variogram_parameter_list("linear", None)
assert result is None
# tests for second case - variogram_model_parameters is dict
with pytest.raises(KeyError):
core._make_variogram_parameter_list("linear", {"tacos": 1.0, "burritos": 2.0})
result = core._make_variogram_parameter_list(
"linear", {"slope": 1.0, "nugget": 0.0}
)
assert result == [1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("power", {"frijoles": 1.0})
result = core._make_variogram_parameter_list(
"power", {"scale": 2.0, "exponent": 1.0, "nugget": 0.0}
)
assert result == [2.0, 1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("exponential", {"tacos": 1.0})
with pytest.raises(KeyError):
core._make_variogram_parameter_list(
"exponential", {"range": 1.0, "nugget": 1.0}
)
result = core._make_variogram_parameter_list(
"exponential", {"sill": 5.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list(
"exponential", {"psill": 4.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
with pytest.raises(TypeError):
core._make_variogram_parameter_list("custom", {"junk": 1.0})
with pytest.raises(ValueError):
core._make_variogram_parameter_list("blarg", {"junk": 1.0})
# tests for third case - variogram_model_parameters is list
with pytest.raises(ValueError):
core._make_variogram_parameter_list("linear", [1.0, 2.0, 3.0])
result = core._make_variogram_parameter_list("linear", [1.0, 2.0])
assert result == [1.0, 2.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("power", [1.0, 2.0])
result = core._make_variogram_parameter_list("power", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("exponential", [1.0, 2.0, 3.0, 4.0])
result = core._make_variogram_parameter_list("exponential", [5.0, 2.0, 1.0])
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list("custom", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("junk", [1.0, 1.0, 1.0])
# test for last case - make sure function handles incorrect
# variogram_model_parameters type appropriately
with pytest.raises(TypeError):
core._make_variogram_parameter_list("linear", "tacos")
def test_core_initialize_variogram_model(validation_ref):
data, _, _ = validation_ref
# Note the variogram_function argument is not a string in real life...
# core._initialize_variogram_model also checks the length of input
# lists, which is redundant now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
# core._initialize_variogram_model does also check coordinate type,
# this is NOT redundant
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0, 0.0, 0.0],
"spherical",
6,
False,
"tacos",
)
x = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack((x, y)).T, z, "linear", [0.0, 0.0], "linear", 6, False, "euclidean"
)
assert_allclose(lags, np.array([1.0, 2.0, 3.0]))
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_initialize_variogram_model_3d(sample_data_3d):
data, _, _ = sample_data_3d
# Note the variogram_function argument is not a string in real life...
# again, these checks in core._initialize_variogram_model are redundant
# now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0, 0.0],
"linear",
6,
False,
"geographic",
)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack(
(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
)
).T,
np.array([1.0, 2.0, 3.0, 4.0]),
"linear",
[0.0, 0.0],
"linear",
3,
False,
"euclidean",
)
assert_allclose(
lags, np.array([np.sqrt(3.0), 2.0 * np.sqrt(3.0), 3.0 * np.sqrt(3.0)])
)
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_calculate_variogram_model():
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
False,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
True,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.8284271, 5.1961524, 8.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 1.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 1.4142, 1.7321, 2.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 0.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.2642, 1.7293, 1.9004, 1.9634]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([0.5769, 1.4872, 1.9065, 1.9914]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([3.33060952, 3.85063879, 3.96667301, 3.99256374]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.60487044, 3.85968813, 3.99694817, 3.99998564]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
def test_core_krige():
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22], [43.8, 24.6, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([18.8, 67.9]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([43.8, 24.6]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_core_krige_3d():
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22], [43.8, 24.6, 1.0, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([18.8, 67.9, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([43.8, 24.6, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_non_exact():
# custom data for this test
data = np.array(
[
[0.0, 0.0, 0.47],
[1.5, 1.5, 0.56],
[3, 3, 0.74],
[4.5, 4.5, 1.47],
]
)
# construct grid points so diagonal
# is identical to input points
gridx = np.arange(0.0, 4.51, 1.5)
gridy = np.arange(0.0, 4.51, 1.5)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
ok_non_exact = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
exact_values=False,
)
z_non_exact, ss_non_exact = ok_non_exact.execute(
"grid", gridx, gridy, backend="vectorized"
)
in_values = np.diag(z)
# test that krig field
# at input location are identical
# to the inputs themselves with
# exact_values == True
assert_allclose(in_values, data[:, 2])
# test that krig field
# at input location are different
# than the inputs themselves
# with exact_values == False
assert ~np.allclose(in_values, data[:, 2])
# test that off diagonal values are the same
# by filling with dummy value and comparing
# each entry in array
np.fill_diagonal(z, 0.0)
np.fill_diagonal(z_non_exact, 0.0)
assert_allclose(z, z_non_exact)
def test_ok(validation_ref):
# Test to compare OK results to those obtained using KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, (ok_test_answer, gridx, gridy), _ = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, ok_test_answer)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, ok_test_answer)
def test_ok_update_variogram_model(validation_ref):
data, (ok_test_answer, gridx, gridy), _ = validation_ref
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
with pytest.raises(ValueError):
ok.update_variogram_model("blurg")
ok.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that new parameters equal to the set parameters
assert variogram_model != ok.variogram_model
assert not np.array_equal(variogram_parameters, ok.variogram_model_parameters)
assert anisotropy_scaling != ok.anisotropy_scaling
assert anisotropy_angle != ok.anisotropy_angle
def test_ok_get_variogram_points(validation_ref):
# Test to compare the variogram of OK results to those obtained using
# KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (ok_test_answer, gridx, gridy) = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = ok.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_ok_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], exact_values="blurg")
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
with pytest.raises(ValueError):
ok.execute("blurg", gridx, gridy)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
ok.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
ok.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_cython_ok(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
z1, ss1 = ok.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok_non_exact.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
closest_points = 4
z1, ss1 = ok.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok_non_exact.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
def test_uk(validation_ref):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, uk_test_answer)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, uk_test_answer)
def test_uk_update_variogram_model(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["external_Z"])
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
drift_terms=["external_Z"],
external_drift=np.array([0]),
)
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["point_log"])
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
with pytest.raises(ValueError):
uk.update_variogram_model("blurg")
uk.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that the new parameters are equal to the expected ones
assert variogram_model != uk.variogram_model
assert not np.array_equal(variogram_parameters, uk.variogram_model_parameters)
assert anisotropy_scaling != uk.anisotropy_scaling
assert anisotropy_angle != uk.anisotropy_angle
def test_uk_get_variogram_points(validation_ref):
# Test to compare the variogram of UK with linear drift to results from
# KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
drift_terms=["regional_linear"],
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = uk.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_uk_calculate_data_point_zscalars(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0),
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
assert_allclose(uk.z_scalars, data[:, 0])
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), gridy)
with pytest.raises(ValueError):
uk._calculate_data_point_zscalars(xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
assert_allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1))
def test_uk_execute_single_point():
# Test data and answer from lecture notes by <NAME>, UCLA Stats
data = np.array(
[
[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0],
]
)
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[10.0, 9.99, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute(
"points", np.array([point[0]]), np.array([point[1]]), backend="vectorized"
)
assert z_answer == approx(z[0], rel=0.1)
assert ss_answer == approx(ss[0], rel=0.1)
z, ss = uk.execute(
"points", np.array([61.0]), np.array([139.0]), backend="vectorized"
)
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
z, ss = uk.execute("points", np.array([61.0]), np.array([139.0]), backend="loop")
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
def test_uk_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
exact_values="blurg",
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
uk.execute("blurg", gridx, gridy)
with pytest.raises(ValueError):
uk.execute("grid", gridx, gridy, backend="mrow")
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
uk.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
uk.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_ok_uk_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="vectorized")
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
exact_values=False,
)
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="loop")
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
def test_ok_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok_v, ss_ok_v = ok.execute("grid", gridx, gridy, backend="vectorized")
z_ok_l, ss_ok_l = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok_v, z_ok_l)
assert_allclose(ss_ok_v, ss_ok_l)
def test_uk_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_uk_v, ss_uk_v = uk.execute("grid", gridx, gridy, backend="vectorized")
z_uk_l, ss_uk_l = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_uk_v, z_uk_l)
assert_allclose(ss_uk_v, ss_uk_l)
def test_kriging_tools(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx, gridy)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
z_write, ss_write = ok.execute("masked", gridx, gridy, mask=mask_ref)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert np.ma.allclose(
z_write,
np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True,
rtol=0.01,
atol=0.01,
)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx_2, gridy)
kt.write_asc_grid(
gridx_2,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=2,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx_2, x_read)
assert_allclose(gridy, y_read)
os.remove(os.path.join(BASE_DIR, "test_data/temp.asc"))
# http://doc.pytest.org/en/latest/skipping.html#id1
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_uk_three_primary_drifts(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "external_Z", "point_log"],
point_drift=well,
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
def test_uk_specified_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
xg, yg = np.meshgrid(gridx, gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = (
well[0, 2]
* np.log(np.sqrt((xg - well[0, 0]) ** 2.0 + (yg - well[0, 1]) ** 2.0))
* -1.0
)
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100.0 * well[0, 2] * -1.0
point_log_data = (
well[0, 2]
* np.log(
np.sqrt((data[:, 0] - well[0, 0]) ** 2.0 + (data[:, 1] - well[0, 1]) ** 2.0)
)
* -1.0
)
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100.0 * well[0, 2] * -1.0
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=data[:, 0],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:2, 0]],
)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1]],
)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[gridx, gridy])
with pytest.raises(TypeError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=gridx)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg, point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
def test_uk_functional_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x # noqa
func_y = lambda x, y: y # noqa
def func_well(x, y):
return -well[0, 2] * np.log(
np.sqrt((x - well[0, 0]) ** 2.0 + (y - well[0, 1]) ** 2.0)
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=func_x,
)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y, func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
def test_uk_with_external_drift(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
dem, demx, demy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_dem.asc")
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="spherical",
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=demx,
external_drift_y=demy,
verbose=False,
)
answer, gridx, gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_answer.asc")
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, answer, **allclose_pars)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, answer, **allclose_pars)
def test_force_exact():
data = np.array([[1.0, 1.0, 2.0], [2.0, 2.0, 1.5], [3.0, 3.0, 1.0]])
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 1.0],
)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not (np.any(np.isclose(ss, 0)))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 1.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert z == approx(2.0)
assert ss == approx(0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 2.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert ss != approx(0.0)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = ok.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid",
np.arange(0.5, 10.0, 1.0),
np.arange(0.5, 10.0, 2.0),
backend="vectorized",
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0), backend="loop"
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid", np.arange(0.5, 10.0, 1.0), np.arange(0.5, 10.0, 2.0), backend="loop"
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = uk.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
| assert_allclose(ss, 0.0, **allclose_pars) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 The HERA Team
# Licensed under the 2-clause BSD License
from __future__ import print_function, division, absolute_import
from time import time
import numpy as np
import tensorflow as tf
import h5py
import random
from sklearn.metrics import confusion_matrix
from scipy import ndimage
from copy import copy
def transpose(X):
"""
Transpose for use in the map functions.
"""
return X.T
def normalize(X):
"""
Normalization for the log amplitude required in the folding process.
"""
sh = np.shape(X)
absX = np.abs(X)
absX = np.where(absX <= 0.0, (1e-8) * np.random.randn(sh[0], sh[1]), absX)
LOGabsX = np.nan_to_num(np.log10(absX))
return np.nan_to_num((LOGabsX - np.nanmean(LOGabsX)) / np.nanstd(np.abs(LOGabsX)))
def normphs(X):
"""
Normalization for the phase in the folding proces.
"""
sh = np.shape(X)
return np.array(np.sin(np.angle(X)))
def tfnormalize(X):
"""
Skip connection layer normalization.
"""
sh = np.shape(X)
X_norm = tf.contrib.layers.layer_norm(X, trainable=False)
return X
def foldl(data, ch_fold=16, padding=2):
"""
Folding function for carving up a waterfall visibility flags for prediction in the FCN.
"""
sh = np.shape(data)
_data = data.T.reshape(ch_fold, sh[1] / ch_fold, -1)
_DATA = np.array(map(transpose, _data))
_DATApad = np.array(
map(
np.pad,
_DATA,
len(_DATA) * [((padding + 2, padding + 2), (padding, padding))],
len(_DATA) * ["reflect"],
)
)
return _DATApad
def pad(data, padding=2):
"""
Padding function applied to folded spectral windows.
Reflection is default padding.
"""
sh = np.shape(data)
t_pad = 16
data_pad = np.pad(
data, pad_width=((t_pad + 2, t_pad + 2), (t_pad, t_pad)), mode="reflect"
)
return data_pad
def unpad(data, diff=4, padding=2):
"""
Unpadding function for recovering flag predictions.
"""
sh = np.shape(data)
t_unpad = sh[0]
return data[padding[0] : sh[0] - padding[0], padding[1] : sh[1] - padding[1]]
def store_iterator(it):
a = [x for x in it]
return np.array(a)
def fold(data, ch_fold=16, padding=2):
"""
Folding function for carving waterfall visibilities with additional normalized log
and phase channels.
Input: (Batch, Time, Frequency)
Output: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
"""
sh = np.shape(data)
_data = data.T.reshape(ch_fold, int(sh[1] / ch_fold), -1)
_DATA = store_iterator(map(transpose, _data))
_DATApad = store_iterator(map(pad, _DATA))
DATA = np.stack(
(
store_iterator(map(normalize, _DATApad)),
store_iterator(map(normphs, _DATApad)),
np.mod(store_iterator(map(normphs, _DATApad)), np.pi),
),
axis=-1,
)
return DATA
def unfoldl(data_fold, ch_fold=16, padding=2):
"""
Unfolding function for recombining the carved label (flag) frequency windows back into a complete
waterfall visibility.
Input: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
Output: (Batch, Time, Frequency)
"""
sh = np.shape(data_fold)
data_unpad = data_fold[
:, (padding + 2) : (sh[1] - (padding + 2)), padding : sh[2] - padding
]
ch_fold, ntimes, dfreqs = np.shape(data_unpad)
data_ = np.transpose(data_unpad, (0, 2, 1))
_data = data_.reshape(ch_fold * dfreqs, ntimes).T
return _data
def stacked_layer(
input_layer,
num_filter_layers,
kt,
kf,
activation,
stride,
pool,
bnorm=True,
name="None",
dropout=None,
maxpool=True,
mode=True,
):
"""
Creates a 3x stacked layer of convolutional layers. Each layer uses the same kernel size.
Batch normalized output is default and recommended for faster convergence, although
not every may require it (???).
Input: Tensor Variable (Batch*FoldFactor, Time, Reduced Frequency, Input Filter Layers)
Output: Tensor Variable (Batch*FoldFactor, Time/2, Reduced Frequency/2, num_filter_layers)
"""
conva = tf.layers.conv2d(
inputs=input_layer,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
)
if kt - 2 < 0:
kt = 3
if dropout is not None:
convb = tf.layers.dropout(
tf.layers.conv2d(
inputs=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
),
rate=dropout,
)
else:
convb = tf.layers.conv2d(
inputs=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
)
shb = convb.get_shape().as_list()
convc = tf.layers.conv2d(
inputs=convb,
filters=num_filter_layers,
kernel_size=(1, 1),
padding="same",
activation=activation,
)
if bnorm:
bnorm_conv = tf.layers.batch_normalization(
convc, scale=True, center=True, training=mode, fused=True
)
else:
bnorm_conv = convc
if maxpool:
pool = tf.layers.max_pooling2d(
inputs=bnorm_conv, pool_size=pool, strides=stride
)
elif maxpool is None:
pool = bnorm_conv
else:
pool = tf.layers.average_pooling2d(
inputs=bnorm_conv, pool_size=pool, strides=stride
)
return pool
def batch_accuracy(labels, predictions):
"""
Returns the RFI class accuracy.
"""
labels = tf.cast(labels, dtype=tf.int64)
predictions = tf.cast(predictions, dtype=tf.int64)
correct = tf.reduce_sum(
tf.cast(tf.equal(tf.add(labels, predictions), 2), dtype=tf.int64)
)
total = tf.reduce_sum(labels)
return tf.divide(correct, total)
def accuracy(labels, predictions):
"""
Numpy version of RFI class accuracy.
"""
correct = 1.0 * np.sum((labels + predictions) == 2)
total = 1.0 * np.sum(labels == 1)
print("correct", correct)
print("total", total)
try:
return correct / total
except BaseException:
return 1.0
def MCC(tp, tn, fp, fn):
"""
Calculates the Mathews Correlation Coefficient.
"""
if tp == 0 and fn == 0:
return tp * tn - fp * fn
else:
return (tp * tn - fp * fn) / np.sqrt(
(1.0 * (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
)
def f1(tp, tn, fp, fn):
"""
Calculates the F1 Score.
"""
precision = tp / (1.0 * (tp + fp))
recall = tp / (1.0 * (tp + fn))
return 2.0 * precision * recall / (precision + recall)
def SNRvsTPR(data, true_flags, flags):
"""
Calculates the signal-to-noise ratio versus true positive rate (recall).
"""
SNR = np.linspace(0.0, 4.0, 30)
snr_tprs = []
data_ = np.copy(data)
flags_ = np.copy(flags)
true_flags_ = np.copy(true_flags)
for snr_ in SNR:
snr_map = np.log10(data_ * flags_ / np.std(data_ * np.logical_not(true_flags)))
snr_inds = snr_map < snr_
confuse_mat = confusion_matrix(
true_flags_[snr_inds].astype(int).reshape(-1),
flags_[snr_inds].astype(int).reshape(-1),
)
if | np.size(confuse_mat) | numpy.size |
from __future__ import print_function, division
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
from emnist import extract_training_samples
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Model
from keras.optimizers import Adam
from sklearn.utils import shuffle
class DCGAN():
def __init__(self,
n_xin=40000,
n_xout=10000,
mia_attacks=None,
use_advreg=False):
""" Builds a DCGAN model with adversarial attacks
:param n_xin, n_xout Size of training and out-of-distribution data
:param mia_attacks List with following possible values ["logan", "dist", "featuremap"]
:param use_advreg Build with advreg or without
"""
if mia_attacks is None:
mia_attacks = []
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.use_advreg = use_advreg
self.mia_attacks = mia_attacks
np.random.seed(0)
# Load the EMNIST data
(self.x_in, y_in), (self.x_out, y_out) = self.load_emnist_data(n_xin=n_xin, n_xout=n_xout)
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.featuremap_model, self.discriminator, self.critic_model_with_advreg, self.advreg_model = self.build_discriminator(
optimizer)
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def get_xin(self):
""" Gets the members
"""
return self.x_in
def get_xout(self):
""" Gets the non-members
"""
return self.x_out
def load_emnist_data(self,
n_xin,
n_xout):
"""
Load x_in, x_out and the test set
@:param n_xin: Size of the X_in dataset
@:param n_xout: Size of the X_out dataset
@:return xin, xout, test
"""
def normalize(data):
return np.reshape((data.astype(np.float32) - 127.5) / 127.5, (-1, 28, 28, 1))
# Load and normalize the training data
(x_train, y_train) = extract_training_samples('digits')
x_train = normalize(x_train)
# Shuffle for some randomness
x_train, y_train = shuffle(x_train, y_train)
assert (n_xin + n_xout < len(x_train)) # No overflow, sizes have to be assured
# Split into x_in and x_out
x_in, y_in = x_train[:n_xin], y_train[:n_xin]
x_out, y_out = x_train[n_xin:n_xin + n_xout], y_train[n_xin:n_xin + n_xout]
return (x_in, y_in), (x_out, y_out)
def wasserstein_loss(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def build_advreg(self, input_shape):
""" Build the model for the adversarial regularizer
"""
advreg_in = Input(input_shape)
l0 = Dense(units=500)(advreg_in)
l1 = Dropout(0.2)(l0)
l2 = Dense(units=250)(l1)
l3 = Dropout(0.2)(l2)
l4 = Dense(units=10)(l3)
advreg_out = Dense(units=1, activation="linear")(l4)
return Model(advreg_in, advreg_out)
def build_generator(self):
input_data = Input((self.latent_dim,))
l0 = Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)(input_data)
l1 = Reshape((7, 7, 128))(l0)
l2 = UpSampling2D()(l1)
l3 = Conv2D(128, kernel_size=3, padding="same")(l2)
l4 = BatchNormalization(momentum=0.8)(l3)
l5 = Activation("relu")(l4)
l6 = UpSampling2D()(l5)
l7 = Conv2D(64, kernel_size=3, padding="same")(l6)
l8 = BatchNormalization(momentum=0.8)(l7)
l9 = Activation("relu")(l8)
l10 = Conv2D(self.channels, kernel_size=3, padding="same")(l9)
output = Activation("tanh")(l10)
return Model(input_data, output)
def build_discriminator(self, optimizer):
dropout = 0.25
img_shape = (28, 28, 1)
critic_in = Input(img_shape)
l0 = Conv2D(16, kernel_size=3, strides=2, input_shape=img_shape, padding="same")(critic_in)
l1 = LeakyReLU(alpha=0.2)(l0)
l2 = Dropout(dropout)(l1)
l3 = Conv2D(32, kernel_size=3, strides=2, padding="same")(l2)
l4 = ZeroPadding2D(padding=((0, 1), (0, 1)))(l3)
l5 = BatchNormalization(momentum=0.8)(l4)
l6 = LeakyReLU(alpha=0.2)(l5)
l7 = Dropout(dropout)(l6)
l8 = Conv2D(64, kernel_size=3, strides=2, padding="same")(l7)
l9 = BatchNormalization(momentum=0.8)(l8)
l10 = LeakyReLU(alpha=0.2)(l9)
l11 = Dropout(dropout)(l10)
l12 = Conv2D(128, kernel_size=3, strides=1, padding="same")(l11)
l13 = BatchNormalization(momentum=0.8)(l12)
l14 = LeakyReLU(alpha=0.2)(l13)
l15 = Dropout(dropout)(l14)
featuremaps = Flatten()(l15)
critic_out = Dense(1, activation="sigmoid", name="critic_out")(featuremaps)
""" Build the critic WITHOUT the adversarial regularization
"""
critic_model_without_advreg = Model(inputs=[critic_in], outputs=[critic_out])
critic_model_without_advreg.compile(optimizer=optimizer,
metrics=["accuracy"],
loss='binary_crossentropy')
""" Build the adversarial regularizer
If no adversarial regularization is required, disable it in the training function /!\
"""
featuremap_model = Model(inputs=[critic_in], outputs=[featuremaps])
advreg = self.build_advreg(input_shape=(2048,))
mia_pred = advreg(featuremap_model(critic_in))
naming_layer = Lambda(lambda x: x, name='mia_pred')
mia_pred = naming_layer(mia_pred)
advreg_model = Model(inputs=[critic_in], outputs=[mia_pred])
# Do not train the critic when updating the adversarial regularizer
featuremap_model.trainable = False
def advreg(y_true, y_pred):
return 2*K.binary_crossentropy(y_true, y_pred)
advreg_model.compile(optimizer=optimizer,
metrics=["accuracy"],
loss=self.wasserstein_loss)
""" Build the critic WITH the adversarial regularization
"""
critic_model_with_advreg = Model(inputs=[critic_in], outputs=[critic_out, mia_pred])
advreg_model.trainable = False
def critic_out(y_true, y_pred):
return K.binary_crossentropy(y_true, y_pred)
def mia_pred(y_true, y_pred):
return K.binary_crossentropy(y_true, y_pred)
critic_model_with_advreg.compile(optimizer=optimizer,
metrics=["accuracy"],
loss={
"critic_out": self.wasserstein_loss,
"mia_pred": self.wasserstein_loss
})
return featuremap_model, critic_model_without_advreg, critic_model_with_advreg, advreg_model
def train(self, epochs, batch_size=128, save_interval=50):
logan_precisions, featuremap_precisions = [], []
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
idx = np.random.randint(0, len(self.x_in), batch_size)
imgs = self.x_in[idx]
idx_out = np.random.randint(0, len(self.x_out), batch_size)
imgs_out = self.x_out[idx_out]
# Sample noise and generate a batch of new images
noise = | np.random.normal(0, 1, (batch_size, self.latent_dim)) | numpy.random.normal |
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylops.utils import dottest
from pylops.signalprocessing import Interp, Bilinear
par1 = {'ny': 21, 'nx': 11, 'nt':20, 'imag': 0,
'dtype':'float64', 'kind': 'nearest'} # real, nearest
par2 = {'ny': 21, 'nx': 11, 'nt':20, 'imag': 1j,
'dtype':'complex128', 'kind': 'nearest'} # complex, nearest
par3 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 0,
'dtype': 'float64', 'kind': 'linear'} # real, linear
par4 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 1j,
'dtype': 'complex128', 'kind': 'linear'} # complex, linear
par5 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 0,
'dtype': 'float64', 'kind': 'sinc'} # real, sinc
par6 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 1j,
'dtype': 'complex128', 'kind': 'sinc'} # complex, sinc
# subsampling factor
perc_subsampling = 0.4
def test_sincinterp():
"""Check accuracy of sinc interpolation of subsampled version of input
signal
"""
nt = 81
dt = 0.004
t = np.arange(nt) * dt
ntsub = 10
dtsub = dt / ntsub
tsub = np.arange(nt * ntsub) * dtsub
tsub = tsub[:np.where(tsub == t[-1])[0][0] + 1]
x = np.sin(2 * np.pi * 10 * t) + \
0.4 * np.sin(2 * np.pi * 20 * t) - \
2 * np.sin(2 * np.pi * 5 * t)
xsub = np.sin(2 * np.pi * 10 * tsub) + \
0.4 * np.sin(2 * np.pi * 20 * tsub) - \
2 * np.sin(2 * np.pi * 5 * tsub)
iava = tsub[20:-20] / (dtsub * ntsub) # exclude edges
SI1op, iava = Interp(nt, iava, kind='sinc', dtype='float64')
y = SI1op * x
print(np.max(np.abs(xsub[20:-20] - y)))
assert_array_almost_equal(xsub[20:-20], y, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par2), (par3),
(par4), (par5), (par6)])
def test_Interp_1dsignal(par):
"""Dot-test and forward for Interp operator for 1d signal
"""
np.random.seed(1)
x = np.random.normal(0, 1, par['nx']) + \
par['imag'] * np.random.normal(0, 1, par['nx'])
Nsub = int(np.round(par['nx'] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par['nx']))[:Nsub])
# fixed indeces
Iop, _ = Interp(par['nx'], iava, kind=par['kind'], dtype=par['dtype'])
assert dottest(Iop, Nsub, par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
# decimal indeces
Idecop, _ = Interp(par['nx'], iava + 0.3, kind=par['kind'],
dtype=par['dtype'])
assert dottest(Iop, Nsub, par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(par['nx'], iava_rep + 0.3,
kind=par['kind'], dtype=par['dtype'])
# forward
y = Iop * x
ydec = Idecop * x
assert_array_almost_equal(y, x[iava])
if par['kind'] == 'nearest':
assert_array_almost_equal(ydec, x[iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3),
(par4), (par5), (par6)])
def test_Interp_2dsignal(par):
"""Dot-test and forward for Restriction operator for 2d signal
"""
np.random.seed(1)
x = np.random.normal(0, 1, (par['nx'], par['nt'])) + \
par['imag'] * np.random.normal(0, 1, (par['nx'], par['nt']))
# 1st direction
Nsub = int(np.round(par['nx'] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par['nx']))[:Nsub])
# fixed indeces
Iop, _ = Interp(par['nx']*par['nt'], iava,
dims=(par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
assert dottest(Iop, Nsub*par['nt'], par['nx']*par['nt'],
complexflag=0 if par['imag'] == 0 else 3)
# decimal indeces
Idecop, _ = Interp(par['nx'] * par['nt'], iava + 0.3,
dims=(par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(par['nx'] * par['nt'], iava_rep + 0.3,
dims=(par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
y = (Iop * x.ravel()).reshape(Nsub, par['nt'])
ydec = (Idecop * x.ravel()).reshape(Nsub, par['nt'])
assert_array_almost_equal(y, x[iava])
if par['kind'] == 'nearest':
assert_array_almost_equal(ydec, x[iava])
# 2nd direction
Nsub = int(np.round(par['nt'] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par['nt']))[:Nsub])
# fixed indeces
Iop, _ = Interp(par['nx'] * par['nt'], iava,
dims=(par['nx'], par['nt']), dir=1,
kind=par['kind'], dtype=par['dtype'])
assert dottest(Iop, par['nx'] * Nsub, par['nx'] * par['nt'],
complexflag=0 if par['imag'] == 0 else 3)
# decimal indeces
Idecop, _ = Interp(par['nx'] * par['nt'], iava + 0.3,
dims=(par['nx'], par['nt']), dir=1,
kind=par['kind'], dtype=par['dtype'])
assert dottest(Idecop, par['nx'] * Nsub, par['nx'] * par['nt'],
complexflag=0 if par['imag'] == 0 else 3)
y = (Iop * x.ravel()).reshape(par['nx'], Nsub)
ydec = (Idecop * x.ravel()).reshape(par['nx'], Nsub)
assert_array_almost_equal(y, x[:, iava])
if par['kind'] == 'nearest':
assert_array_almost_equal(ydec, x[:, iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3),
(par4), (par5), (par6)])
def test_Interp_3dsignal(par):
"""Dot-test and forward for Interp operator for 3d signal
"""
np.random.seed(1)
x = np.random.normal(0, 1, (par['ny'], par['nx'], par['nt'])) + \
par['imag'] * np.random.normal(0, 1, (par['ny'], par['nx'], par['nt']))
# 1st direction
Nsub = int(np.round(par['ny'] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par['ny']))[:Nsub])
# fixed indeces
Iop, _ = Interp(par['ny']*par['nx']*par['nt'], iava,
dims=(par['ny'], par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
assert dottest(Iop, Nsub*par['nx']*par['nt'],
par['ny']*par['nx']*par['nt'],
complexflag=0 if par['imag'] == 0 else 3)
# decimal indeces
Idecop, _ = Interp(par['ny'] * par['nx'] * par['nt'], iava + 0.3,
dims=(par['ny'], par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
assert dottest(Idecop, Nsub * par['nx'] * par['nt'],
par['ny'] * par['nx'] * par['nt'],
complexflag=0 if par['imag'] == 0 else 3)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(par['ny'] * par['nx'] * par['nt'], iava_rep + 0.3,
dims=(par['ny'], par['nx'], par['nt']), dir=0,
kind=par['kind'], dtype=par['dtype'])
y = (Iop * x.ravel()).reshape(Nsub, par['nx'], par['nt'])
ydec = (Idecop * x.ravel()).reshape(Nsub, par['nx'], par['nt'])
assert_array_almost_equal(y, x[iava])
if par['kind'] == 'nearest':
assert_array_almost_equal(ydec, x[iava])
# 2nd direction
Nsub = int( | np.round(par['nx'] * perc_subsampling) | numpy.round |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.