prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https:/sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
cutP = 0.10 #condition on recoverability/tolerance
for filenormal_ in sorted(allFiles_normal):
filename = filenormal_[60:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
PeriodIn = datnormal['p'] # input period -- 'p' in data file
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri = datnormal1["NstarsTRILEGAL"][0]
#print("N_tri = ", N_tri)
Nall = len(PeriodIn)
m1hAll0, m1b = np.histogram(datnormal["m1"], bins=mbins)
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/Nall*fbFit(m1val))
N_mult = N_tri*fb
##########################################################
if len(PeriodIn) == 0.:
continue
if N_tri == 0:
continue
else:
PeriodOut = datnormal['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean = datnormal['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
observable = datnormal.loc[PeriodOut != -999].index
observable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999)].index
observable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999)].index
observable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999)].index
observable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999)].index
observable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999)].index
observable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999)].index
observable_22 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 22.)].index
observable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_195 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
fullP = abs(PeriodOut - PeriodIn)/PeriodIn
halfP = abs(PeriodOut - 0.5*PeriodIn)/(0.5*PeriodIn)
twiceP = abs(PeriodOut - 2*PeriodIn)/(2*PeriodIn)
recoverable = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_22 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_195 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
P03 = datnormal.loc[PeriodIn <= 0.3].index
P1 = datnormal.loc[PeriodIn <= 1].index
P10 = datnormal.loc[PeriodIn <= 10].index
P30 = datnormal.loc[PeriodIn <= 30].index
P100 = datnormal.loc[PeriodIn <= 100].index
P1000 = datnormal.loc[PeriodIn <= 1000].index
P_22 = datnormal.loc[appMagMean <= 22.].index
P03_22 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 22.)].index
P1_22 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 22.)].index
P10_22 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 22.)].index
P30_22 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 22.)].index
P100_22 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 22.)].index
P1000_22 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 22.)].index
P_195 = datnormal.loc[appMagMean <= 19.5].index
P03_195 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 19.5)].index
P1_195 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 19.5)].index
P10_195 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 19.5)].index
P30_195 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 19.5)].index
P100_195 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 19.5)].index
P1000_195 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 19.5)].index
N_all = (len(PeriodIn)/len(PeriodIn))*N_mult
N_all03 = (len(P03)/len(PeriodIn))*N_mult
N_all1 = (len(P1)/len(PeriodIn))*N_mult
N_all10 = (len(P10)/len(PeriodIn))*N_mult
N_all30 = (len(P30)/len(PeriodIn))*N_mult
N_all100 = (len(P100)/len(PeriodIn))*N_mult
N_all1000 = (len(P1000)/len(PeriodIn))*N_mult
N_all_22 = (len(P_22)/len(PeriodIn))*N_mult
N_all03_22 = (len(P03_22)/len(PeriodIn))*N_mult
N_all1_22 = (len(P1_22)/len(PeriodIn))*N_mult
N_all10_22 = (len(P10_22)/len(PeriodIn))*N_mult
N_all30_22 = (len(P30_22)/len(PeriodIn))*N_mult
N_all100_22 = (len(P100_22)/len(PeriodIn))*N_mult
N_all1000_22 = (len(P1000_22)/len(PeriodIn))*N_mult
N_all_195 = (len(P_195)/len(PeriodIn))*N_mult
N_all03_195 = (len(P03_195)/len(PeriodIn))*N_mult
N_all1_195 = (len(P1_195)/len(PeriodIn))*N_mult
N_all10_195 = (len(P10_195)/len(PeriodIn))*N_mult
N_all30_195 = (len(P30_195)/len(PeriodIn))*N_mult
N_all100_195 = (len(P100_195)/len(PeriodIn))*N_mult
N_all1000_195 = (len(P1000_195)/len(PeriodIn))*N_mult
N_obs = (len(observable)/len(PeriodIn))*N_mult
N_obs03 = (len(observable_03)/len(PeriodIn))*N_mult
N_obs1 = (len(observable_1)/len(PeriodIn))*N_mult
N_obs10 = (len(observable_10)/len(PeriodIn))*N_mult
N_obs30 = (len(observable_30)/len(PeriodIn))*N_mult
N_obs100 = (len(observable_100)/len(PeriodIn))*N_mult
N_obs1000 = (len(observable_1000)/len(PeriodIn))*N_mult
N_obs_22 = (len(observable_22)/len(PeriodIn))*N_mult
N_obs03_22 = (len(observable_03_22)/len(PeriodIn))*N_mult
N_obs1_22 = (len(observable_1_22)/len(PeriodIn))*N_mult
N_obs10_22 = (len(observable_10_22)/len(PeriodIn))*N_mult
N_obs30_22 = (len(observable_30_22)/len(PeriodIn))*N_mult
N_obs100_22 = (len(observable_100_22)/len(PeriodIn))*N_mult
N_obs1000_22 = (len(observable_1000_22)/len(PeriodIn))*N_mult
N_obs_195 = (len(observable_195)/len(PeriodIn))*N_mult
N_obs03_195 = (len(observable_03_195)/len(PeriodIn))*N_mult
N_obs1_195 = (len(observable_1_195)/len(PeriodIn))*N_mult
N_obs10_195 = (len(observable_10_195)/len(PeriodIn))*N_mult
N_obs30_195 = (len(observable_30_195)/len(PeriodIn))*N_mult
N_obs100_195 = (len(observable_100_195)/len(PeriodIn))*N_mult
N_obs1000_195 = (len(observable_1000_195)/len(PeriodIn))*N_mult
N_rec = (len(recoverable)/len(PeriodIn))*N_mult
N_rec03 = (len(recoverable_03)/len(PeriodIn))*N_mult
N_rec1 = (len(recoverable_1)/len(PeriodIn))*N_mult
N_rec10 = (len(recoverable_10)/len(PeriodIn))*N_mult
N_rec30 = (len(recoverable_30)/len(PeriodIn))*N_mult
N_rec100 = (len(recoverable_100)/len(PeriodIn))*N_mult
N_rec1000 = (len(recoverable_1000)/len(PeriodIn))*N_mult
N_rec_22 = (len(recoverable_22)/len(PeriodIn))*N_mult
N_rec03_22 = (len(recoverable_03_22)/len(PeriodIn))*N_mult
N_rec1_22 = (len(recoverable_1_22)/len(PeriodIn))*N_mult
N_rec10_22 = (len(recoverable_10_22)/len(PeriodIn))*N_mult
N_rec30_22 = (len(recoverable_30_22)/len(PeriodIn))*N_mult
N_rec100_22 = (len(recoverable_100_22)/len(PeriodIn))*N_mult
N_rec1000_22 = (len(recoverable_1000_22)/len(PeriodIn))*N_mult
N_rec_195 = (len(recoverable_195)/len(PeriodIn))*N_mult
N_rec03_195 = (len(recoverable_03_195)/len(PeriodIn))*N_mult
N_rec1_195 = (len(recoverable_1_195)/len(PeriodIn))*N_mult
N_rec10_195 = (len(recoverable_10_195)/len(PeriodIn))*N_mult
N_rec30_195 = (len(recoverable_30_195)/len(PeriodIn))*N_mult
N_rec100_195 = (len(recoverable_100_195)/len(PeriodIn))*N_mult
N_rec1000_195 = (len(recoverable_1000_195)/len(PeriodIn))*N_mult
N_totalnormal_array.append(float(N_all))
N_totalobservablenormal_array.append(float(N_obs))
N_totalrecoverablenormal_array.append(float(N_rec))
N_totalnormal_array_03.append(float(N_all03))
N_totalobservablenormal_array_03.append(float(N_obs03))
N_totalrecoverablenormal_array_03.append(float(N_rec03))
N_totalnormal_array_1.append(float(N_all1))
N_totalobservablenormal_array_1.append(float(N_obs1))
N_totalrecoverablenormal_array_1.append(float(N_rec1))
N_totalnormal_array_10.append(float(N_all10))
N_totalobservablenormal_array_10.append(float(N_obs10))
N_totalrecoverablenormal_array_10.append(float(N_rec10))
N_totalnormal_array_30.append(float(N_all30))
N_totalobservablenormal_array_30.append(float(N_obs30))
N_totalrecoverablenormal_array_30.append(float(N_rec30))
N_totalnormal_array_100.append(float(N_all100))
N_totalobservablenormal_array_100.append(float(N_obs100))
N_totalrecoverablenormal_array_100.append(float(N_rec100))
N_totalnormal_array_1000.append(float(N_all1000))
N_totalobservablenormal_array_1000.append(float(N_obs1000))
N_totalrecoverablenormal_array_1000.append(float(N_rec1000))
N_totalnormal22_array.append(float(N_all_22))
N_totalobservablenormal22_array.append(float(N_obs_22))
N_totalrecoverablenormal22_array.append(float(N_rec_22))
N_totalnormal22_array_03.append(float(N_all03_22))
N_totalobservablenormal22_array_03.append(float(N_obs03_22))
N_totalrecoverablenormal22_array_03.append(float(N_rec03_22))
N_totalnormal22_array_1.append(float(N_all1_22))
N_totalobservablenormal22_array_1.append(float(N_obs1_22))
N_totalrecoverablenormal22_array_1.append(float(N_rec1_22))
N_totalnormal22_array_10.append(float(N_all10_22))
N_totalobservablenormal22_array_10.append(float(N_obs10_22))
N_totalrecoverablenormal22_array_10.append(float(N_rec10_22))
N_totalnormal22_array_30.append(float(N_all30_22))
N_totalobservablenormal22_array_30.append(float(N_obs30_22))
N_totalrecoverablenormal22_array_30.append(float(N_rec30_22))
N_totalnormal22_array_100.append(float(N_all100_22))
N_totalobservablenormal22_array_100.append(float(N_obs100_22))
N_totalrecoverablenormal22_array_100.append(float(N_rec100_22))
N_totalnormal22_array_1000.append(float(N_all1000_22))
N_totalobservablenormal22_array_1000.append(float(N_obs1000_22))
N_totalrecoverablenormal22_array_1000.append(float(N_rec1000_22))
N_totalnormal195_array.append(float(N_all_195))
N_totalobservablenormal195_array.append(float(N_obs_195))
N_totalrecoverablenormal195_array.append(float(N_rec_195))
N_totalnormal195_array_03.append(float(N_all03_195))
N_totalobservablenormal195_array_03.append(float(N_obs03_195))
N_totalrecoverablenormal195_array_03.append(float(N_rec03_195))
N_totalnormal195_array_1.append(float(N_all1_195))
N_totalobservablenormal195_array_1.append(float(N_obs1_195))
N_totalrecoverablenormal195_array_1.append(float(N_rec1_195))
N_totalnormal195_array_10.append(float(N_all10_195))
N_totalobservablenormal195_array_10.append(float(N_obs10_195))
N_totalrecoverablenormal195_array_10.append(float(N_rec10_195))
N_totalnormal195_array_30.append(float(N_all30_195))
N_totalobservablenormal195_array_30.append(float(N_obs30_195))
N_totalrecoverablenormal195_array_30.append(float(N_rec30_195))
N_totalnormal195_array_100.append(float(N_all100_195))
N_totalobservablenormal195_array_100.append(float(N_obs100_195))
N_totalrecoverablenormal195_array_100.append(float(N_rec100_195))
N_totalnormal195_array_1000.append(float(N_all1000_195))
N_totalobservablenormal195_array_1000.append(float(N_obs1000_195))
N_totalrecoverablenormal195_array_1000.append(float(N_rec1000_195))
N_totalnormal = np.sum(N_totalnormal_array)
N_totalnormal_03 = np.sum(N_totalnormal_array_03)
N_totalnormal_1 = np.sum(N_totalnormal_array_1)
N_totalnormal_10 = np.sum(N_totalnormal_array_10)
N_totalnormal_30 = np.sum(N_totalnormal_array_30)
N_totalnormal_100 = np.sum(N_totalnormal_array_100)
N_totalnormal_1000 = np.sum(N_totalnormal_array_1000)
N_totalobservablenormal = np.sum(N_totalobservablenormal_array)
N_totalobservablenormal_03 = np.sum(N_totalobservablenormal_array_03)
N_totalobservablenormal_1 = np.sum(N_totalobservablenormal_array_1)
N_totalobservablenormal_10 = np.sum(N_totalobservablenormal_array_10)
N_totalobservablenormal_30 = np.sum(N_totalobservablenormal_array_30)
N_totalobservablenormal_100 = np.sum(N_totalobservablenormal_array_100)
N_totalobservablenormal_1000 = np.sum(N_totalobservablenormal_array_1000)
N_totalrecoverablenormal = np.sum(N_totalrecoverablenormal_array)
N_totalrecoverablenormal_03 = np.sum(N_totalrecoverablenormal_array_03)
N_totalrecoverablenormal_1 = np.sum(N_totalrecoverablenormal_array_1)
N_totalrecoverablenormal_10 = np.sum(N_totalrecoverablenormal_array_10)
N_totalrecoverablenormal_30 = np.sum(N_totalrecoverablenormal_array_30)
N_totalrecoverablenormal_100 = np.sum(N_totalrecoverablenormal_array_100)
N_totalrecoverablenormal_1000 = np.sum(N_totalrecoverablenormal_array_1000)
N_totalnormal22 = np.sum(N_totalnormal22_array)
N_totalnormal22_03 = np.sum(N_totalnormal22_array_03)
N_totalnormal22_1 = np.sum(N_totalnormal22_array_1)
N_totalnormal22_10 = np.sum(N_totalnormal22_array_10)
N_totalnormal22_30 = np.sum(N_totalnormal22_array_30)
N_totalnormal22_100 = np.sum(N_totalnormal22_array_100)
N_totalnormal22_1000 = np.sum(N_totalnormal22_array_1000)
N_totalobservablenormal22 = np.sum(N_totalobservablenormal22_array)
N_totalobservablenormal22_03 = np.sum(N_totalobservablenormal22_array_03)
N_totalobservablenormal22_1 = np.sum(N_totalobservablenormal22_array_1)
N_totalobservablenormal22_10 = np.sum(N_totalobservablenormal22_array_10)
N_totalobservablenormal22_30 = np.sum(N_totalobservablenormal22_array_30)
N_totalobservablenormal22_100 = np.sum(N_totalobservablenormal22_array_100)
N_totalobservablenormal22_1000 = np.sum(N_totalobservablenormal22_array_1000)
N_totalrecoverablenormal22 = np.sum(N_totalrecoverablenormal22_array)
N_totalrecoverablenormal22_03 = np.sum(N_totalrecoverablenormal22_array_03)
N_totalrecoverablenormal22_1 = np.sum(N_totalrecoverablenormal22_array_1)
N_totalrecoverablenormal22_10 = np.sum(N_totalrecoverablenormal22_array_10)
N_totalrecoverablenormal22_30 = np.sum(N_totalrecoverablenormal22_array_30)
N_totalrecoverablenormal22_100 = np.sum(N_totalrecoverablenormal22_array_100)
N_totalrecoverablenormal22_1000 = np.sum(N_totalrecoverablenormal22_array_1000)
N_totalnormal195 = np.sum(N_totalnormal195_array)
N_totalnormal195_03 = np.sum(N_totalnormal195_array_03)
N_totalnormal195_1 = np.sum(N_totalnormal195_array_1)
N_totalnormal195_10 = np.sum(N_totalnormal195_array_10)
N_totalnormal195_30 = np.sum(N_totalnormal195_array_30)
N_totalnormal195_100 = np.sum(N_totalnormal195_array_100)
N_totalnormal195_1000 = np.sum(N_totalnormal195_array_1000)
N_totalobservablenormal195 = np.sum(N_totalobservablenormal195_array)
N_totalobservablenormal195_03 = np.sum(N_totalobservablenormal195_array_03)
N_totalobservablenormal195_1 = np.sum(N_totalobservablenormal195_array_1)
N_totalobservablenormal195_10 = np.sum(N_totalobservablenormal195_array_10)
N_totalobservablenormal195_30 = np.sum(N_totalobservablenormal195_array_30)
N_totalobservablenormal195_100 = np.sum(N_totalobservablenormal195_array_100)
N_totalobservablenormal195_1000 = np.sum(N_totalobservablenormal195_array_1000)
N_totalrecoverablenormal195 = np.sum(N_totalrecoverablenormal195_array)
N_totalrecoverablenormal195_03 = np.sum(N_totalrecoverablenormal195_array_03)
N_totalrecoverablenormal195_1 = np.sum(N_totalrecoverablenormal195_array_1)
N_totalrecoverablenormal195_10 = np.sum(N_totalrecoverablenormal195_array_10)
N_totalrecoverablenormal195_30 = np.sum(N_totalrecoverablenormal195_array_30)
N_totalrecoverablenormal195_100 = np.sum(N_totalrecoverablenormal195_array_100)
N_totalrecoverablenormal195_1000 = np.sum(N_totalrecoverablenormal195_array_1000)
wholerecoverypercent_normal = (N_totalrecoverablenormal/N_totalobservablenormal)*100
wholerecoverypercent_normal_03 = (N_totalrecoverablenormal_03/N_totalobservablenormal_03)*100
wholerecoverypercent_normal_1 = (N_totalrecoverablenormal_1/N_totalobservablenormal_1)*100
wholerecoverypercent_normal_10 = (N_totalrecoverablenormal_10/N_totalobservablenormal_10)*100
wholerecoverypercent_normal_30 = (N_totalrecoverablenormal_30/N_totalobservablenormal_30)*100
wholerecoverypercent_normal_100 = (N_totalrecoverablenormal_100/N_totalobservablenormal_100)*100
wholerecoverypercent_normal_1000 = (N_totalrecoverablenormal_1000/N_totalobservablenormal_1000)*100
sigmanormal = ((N_totalrecoverablenormal**(1/2))/N_totalobservablenormal)*100
sigmanormal_03 = ((N_totalrecoverablenormal_03**(1/2))/N_totalobservablenormal_03)*100
sigmanormal_1 = ((N_totalrecoverablenormal_1**(1/2))/N_totalobservablenormal_1)*100
sigmanormal_10 = ((N_totalrecoverablenormal_10**(1/2))/N_totalobservablenormal_10)*100
sigmanormal_30 = ((N_totalrecoverablenormal_30**(1/2))/N_totalobservablenormal_30)*100
sigmanormal_100 = ((N_totalrecoverablenormal_100**(1/2))/N_totalobservablenormal_100)*100
sigmanormal_1000 = ((N_totalrecoverablenormal_1000**(1/2))/N_totalobservablenormal_1000)*100
overallrecoverypercent_normal = (N_totalrecoverablenormal/N_totalnormal)*100
overallrecoverypercent_normal_03 = (N_totalrecoverablenormal_03/N_totalnormal_03)*100
overallrecoverypercent_normal_1 = (N_totalrecoverablenormal_1/N_totalnormal_1)*100
overallrecoverypercent_normal_10 = (N_totalrecoverablenormal_10/N_totalnormal_10)*100
overallrecoverypercent_normal_30 = (N_totalrecoverablenormal_30/N_totalnormal_30)*100
overallrecoverypercent_normal_100 = (N_totalrecoverablenormal_100/N_totalnormal_100)*100
overallrecoverypercent_normal_1000 = (N_totalrecoverablenormal_1000/N_totalnormal_1000)*100
overallsigmanormal = ((N_totalrecoverablenormal**(1/2))/N_totalnormal)*100
overallsigmanormal_03 = ((N_totalrecoverablenormal_03**(1/2))/N_totalnormal_03)*100
overallsigmanormal_1 = ((N_totalrecoverablenormal_1**(1/2))/N_totalnormal_1)*100
overallsigmanormal_10 = ((N_totalrecoverablenormal_10**(1/2))/N_totalnormal_10)*100
overallsigmanormal_30 = ((N_totalrecoverablenormal_30**(1/2))/N_totalnormal_30)*100
overallsigmanormal_100 = ((N_totalrecoverablenormal_100**(1/2))/N_totalnormal_100)*100
overallsigmanormal_1000 = ((N_totalrecoverablenormal_1000**(1/2))/N_totalnormal_1000)*100
wholerecoverypercent_normal22 = (N_totalrecoverablenormal22/N_totalobservablenormal22)*100
wholerecoverypercent_normal22_03 = (N_totalrecoverablenormal22_03/N_totalobservablenormal22_03)*100
wholerecoverypercent_normal22_1 = (N_totalrecoverablenormal22_1/N_totalobservablenormal22_1)*100
wholerecoverypercent_normal22_10 = (N_totalrecoverablenormal22_10/N_totalobservablenormal22_10)*100
wholerecoverypercent_normal22_30 = (N_totalrecoverablenormal22_30/N_totalobservablenormal22_30)*100
wholerecoverypercent_normal22_100 = (N_totalrecoverablenormal22_100/N_totalobservablenormal22_100)*100
wholerecoverypercent_normal22_1000 = (N_totalrecoverablenormal22_1000/N_totalobservablenormal22_1000)*100
sigmanormal22 = ((N_totalrecoverablenormal22**(1/2))/N_totalobservablenormal22)*100
sigmanormal22_03 = ((N_totalrecoverablenormal22_03**(1/2))/N_totalobservablenormal22_03)*100
sigmanormal22_1 = ((N_totalrecoverablenormal22_1**(1/2))/N_totalobservablenormal22_1)*100
sigmanormal22_10 = ((N_totalrecoverablenormal22_10**(1/2))/N_totalobservablenormal22_10)*100
sigmanormal22_30 = ((N_totalrecoverablenormal22_30**(1/2))/N_totalobservablenormal22_30)*100
sigmanormal22_100 = ((N_totalrecoverablenormal22_100**(1/2))/N_totalobservablenormal22_100)*100
sigmanormal22_1000 = ((N_totalrecoverablenormal22_1000**(1/2))/N_totalobservablenormal22_1000)*100
overallrecoverypercent_normal22 = (N_totalrecoverablenormal22/N_totalnormal22)*100
overallrecoverypercent_normal22_03 = (N_totalrecoverablenormal22_03/N_totalnormal22_03)*100
overallrecoverypercent_normal22_1 = (N_totalrecoverablenormal22_1/N_totalnormal22_1)*100
overallrecoverypercent_normal22_10 = (N_totalrecoverablenormal22_10/N_totalnormal22_10)*100
overallrecoverypercent_normal22_30 = (N_totalrecoverablenormal22_30/N_totalnormal22_30)*100
overallrecoverypercent_normal22_100 = (N_totalrecoverablenormal22_100/N_totalnormal22_100)*100
overallrecoverypercent_normal22_1000 = (N_totalrecoverablenormal22_1000/N_totalnormal22_1000)*100
overallsigmanormal22 = ((N_totalrecoverablenormal22**(1/2))/N_totalnormal22)*100
overallsigmanormal22_03 = ((N_totalrecoverablenormal22_03**(1/2))/N_totalnormal22_03)*100
overallsigmanormal22_1 = ((N_totalrecoverablenormal22_1**(1/2))/N_totalnormal22_1)*100
overallsigmanormal22_10 = ((N_totalrecoverablenormal22_10**(1/2))/N_totalnormal22_10)*100
overallsigmanormal22_30 = ((N_totalrecoverablenormal22_30**(1/2))/N_totalnormal22_30)*100
overallsigmanormal22_100 = ((N_totalrecoverablenormal22_100**(1/2))/N_totalnormal22_100)*100
overallsigmanormal22_1000 = ((N_totalrecoverablenormal22_1000**(1/2))/N_totalnormal22_1000)*100
wholerecoverypercent_normal195 = (N_totalrecoverablenormal195/N_totalobservablenormal195)*100
wholerecoverypercent_normal195_03 = (N_totalrecoverablenormal195_03/N_totalobservablenormal195_03)*100
wholerecoverypercent_normal195_1 = (N_totalrecoverablenormal195_1/N_totalobservablenormal195_1)*100
wholerecoverypercent_normal195_10 = (N_totalrecoverablenormal195_10/N_totalobservablenormal195_10)*100
wholerecoverypercent_normal195_30 = (N_totalrecoverablenormal195_30/N_totalobservablenormal195_30)*100
wholerecoverypercent_normal195_100 = (N_totalrecoverablenormal195_100/N_totalobservablenormal195_100)*100
wholerecoverypercent_normal195_1000 = (N_totalrecoverablenormal195_1000/N_totalobservablenormal195_1000)*100
sigmanormal195 = ((N_totalrecoverablenormal195**(1/2))/N_totalobservablenormal195)*100
sigmanormal195_03 = ((N_totalrecoverablenormal195_03**(1/2))/N_totalobservablenormal195_03)*100
sigmanormal195_1 = ((N_totalrecoverablenormal195_1**(1/2))/N_totalobservablenormal195_1)*100
sigmanormal195_10 = ((N_totalrecoverablenormal195_10**(1/2))/N_totalobservablenormal195_10)*100
sigmanormal195_30 = ((N_totalrecoverablenormal195_30**(1/2))/N_totalobservablenormal195_30)*100
sigmanormal195_100 = ((N_totalrecoverablenormal195_100**(1/2))/N_totalobservablenormal195_100)*100
sigmanormal195_1000 = ((N_totalrecoverablenormal195_1000**(1/2))/N_totalobservablenormal195_1000)*100
overallrecoverypercent_normal195 = (N_totalrecoverablenormal195/N_totalnormal195)*100
overallrecoverypercent_normal195_03 = (N_totalrecoverablenormal195_03/N_totalnormal195_03)*100
overallrecoverypercent_normal195_1 = (N_totalrecoverablenormal195_1/N_totalnormal195_1)*100
overallrecoverypercent_normal195_10 = (N_totalrecoverablenormal195_10/N_totalnormal195_10)*100
overallrecoverypercent_normal195_30 = (N_totalrecoverablenormal195_30/N_totalnormal195_30)*100
overallrecoverypercent_normal195_100 = (N_totalrecoverablenormal195_100/N_totalnormal195_100)*100
overallrecoverypercent_normal195_1000 = (N_totalrecoverablenormal195_1000/N_totalnormal195_1000)*100
overallsigmanormal195 = ((N_totalrecoverablenormal195**(1/2))/N_totalnormal195)*100
overallsigmanormal195_03 = ((N_totalrecoverablenormal195_03**(1/2))/N_totalnormal195_03)*100
overallsigmanormal195_1 = ((N_totalrecoverablenormal195_1**(1/2))/N_totalnormal195_1)*100
overallsigmanormal195_10 = ((N_totalrecoverablenormal195_10**(1/2))/N_totalnormal195_10)*100
overallsigmanormal195_30 = ((N_totalrecoverablenormal195_30**(1/2))/N_totalnormal195_30)*100
overallsigmanormal195_100 = ((N_totalrecoverablenormal195_100**(1/2))/N_totalnormal195_100)*100
overallsigmanormal195_1000 = ((N_totalrecoverablenormal195_1000**(1/2))/N_totalnormal195_1000)*100\
print("N_totalnormal = ", N_totalnormal, "and in log = ", np.log10(N_totalnormal), "**** N_totalobservablenormal = ", N_totalobservablenormal, "and in log = ", np.log10(N_totalobservablenormal), "**** N_totalrecoverablenormal = ", N_totalrecoverablenormal, "and in log = ", np.log10(N_totalrecoverablenormal))
print("N_totalnormal_03 = ", N_totalnormal_03, "and in log = ", np.log10(N_totalnormal_03), "**** N_totalobservablenormal_03 = ", N_totalobservablenormal_03, "and in log = ", np.log10(N_totalobservablenormal_03), "**** N_totalrecoverablenormal_03 = ", N_totalrecoverablenormal_03, "and in log = ", np.log10(N_totalrecoverablenormal_03))
print("N_totalnormal_1 = ", N_totalnormal_1, "and in log = ", np.log10(N_totalnormal_1), "**** N_totalobservablenormal_1 = ", N_totalobservablenormal_1, "and in log = ", np.log10(N_totalobservablenormal_1), "**** N_totalrecoverablenormal_1 = ", N_totalrecoverablenormal_1, "and in log = ", np.log10(N_totalrecoverablenormal_1))
print("N_totalnormal_10 = ", N_totalnormal_10, "and in log = ", np.log10(N_totalnormal_10), "**** N_totalobservablenormal_10 = ", N_totalobservablenormal_10, "and in log = ", np.log10(N_totalobservablenormal_10), "**** N_totalrecoverablenormal_10 = ", N_totalrecoverablenormal_10, "and in log = ", np.log10(N_totalrecoverablenormal_10))
print("N_totalnormal_30 = ", N_totalnormal_30, "and in log = ", np.log10(N_totalnormal_30), "**** N_totalobservablenormal_30 = ", N_totalobservablenormal_30, "and in log = ", np.log10(N_totalobservablenormal_30), "**** N_totalrecoverablenormal_30 = ", N_totalrecoverablenormal_30, "and in log = ", np.log10(N_totalrecoverablenormal_30))
print("N_totalnormal_100 = ", N_totalnormal_100, "and in log = ", np.log10(N_totalnormal_100), "**** N_totalobservablenormal_100 = ", N_totalobservablenormal_100, "and in log = ", np.log10(N_totalobservablenormal_100), "**** N_totalrecoverablenormal_100 = ", N_totalrecoverablenormal_100, "and in log = ", np.log10(N_totalrecoverablenormal_100))
print("N_totalnormal_1000 = ", N_totalnormal_1000, "and in log = ", np.log10(N_totalnormal_1000), "**** N_totalobservablenormal_1000 = ", N_totalobservablenormal_1000, "and in log = ", np.log10(N_totalobservablenormal_1000), "**** N_totalrecoverablenormal_1000 = ", N_totalrecoverablenormal_1000, "and in log = ", np.log10(N_totalrecoverablenormal_1000))
print("********************************")
print("wholerecoverypercent_normal = $", wholerecoverypercent_normal, "/pm", sigmanormal, "$")
print("wholerecoverypercent_normal_03 = $", wholerecoverypercent_normal_03, "/pm", sigmanormal_03, "$")
print("wholerecoverypercent_normal_1 = $", wholerecoverypercent_normal_1, "/pm", sigmanormal_1, "$")
print("wholerecoverypercent_normal_10 = $", wholerecoverypercent_normal_10, "/pm", sigmanormal_10, "$")
print("wholerecoverypercent_normal_30 = $", wholerecoverypercent_normal_30, "/pm", sigmanormal_30, "$")
print("wholerecoverypercent_normal_100 = $", wholerecoverypercent_normal_100, "/pm", sigmanormal_100, "$")
print("wholerecoverypercent_normal_1000 = $", wholerecoverypercent_normal_1000, "/pm", sigmanormal_1000, "$")
print("********************************")
print("overallrecoverypercent_normal = $", overallrecoverypercent_normal, "/pm", overallsigmanormal)
print("overallrecoverypercent_normal_03 = $", overallrecoverypercent_normal_03, "/pm", overallsigmanormal_03)
print("overallrecoverypercent_normal_1 = $", overallrecoverypercent_normal_1, "/pm", overallsigmanormal_1)
print("overallrecoverypercent_normal_10 = $", overallrecoverypercent_normal_10, "/pm", overallsigmanormal_10)
print("overallrecoverypercent_normal_30 = $", overallrecoverypercent_normal_30, "/pm", overallsigmanormal_30)
print("overallrecoverypercent_normal_100 = $", overallrecoverypercent_normal_100, "/pm", overallsigmanormal_100)
print("overallrecoverypercent_normal_1000 = $", overallrecoverypercent_normal_1000, "/pm", overallsigmanormal_1000)
print("################################")
print("N_totalnormal22 = ", N_totalnormal22, "and in log = ", np.log10(N_totalnormal22), "**** N_totalobservablenormal22 = ", N_totalobservablenormal22, "and in log = ", np.log10(N_totalobservablenormal22), "**** N_totalrecoverablenormal22 = ", N_totalrecoverablenormal22, "and in log = ", np.log10(N_totalrecoverablenormal22))
print("N_totalnormal22_03 = ", N_totalnormal22_03, "and in log = ", np.log10(N_totalnormal22_03), "**** N_totalobservablenormal22_03 = ", N_totalobservablenormal22_03, "and in log = ", np.log10(N_totalobservablenormal22_03), "**** N_totalrecoverablenormal22_03 = ", N_totalrecoverablenormal22_03, "and in log = ", np.log10(N_totalrecoverablenormal22_03))
print("N_totalnormal22_1 = ", N_totalnormal22_1, "and in log = ", np.log10(N_totalnormal22_1), "**** N_totalobservablenormal22_1 = ", N_totalobservablenormal22_1, "and in log = ", np.log10(N_totalobservablenormal22_1), "**** N_totalrecoverablenormal22_1 = ", N_totalrecoverablenormal22_1, "and in log = ", np.log10(N_totalrecoverablenormal22_1))
print("N_totalnormal22_10 = ", N_totalnormal22_10, "and in log = ", np.log10(N_totalnormal22_10), "**** N_totalobservablenormal22_10 = ", N_totalobservablenormal22_10, "and in log = ", np.log10(N_totalobservablenormal22_10), "**** N_totalrecoverablenormal22_10 = ", N_totalrecoverablenormal22_10, "and in log = ", np.log10(N_totalrecoverablenormal22_10))
print("N_totalnormal22_30 = ", N_totalnormal22_30, "and in log = ", np.log10(N_totalnormal22_30), "**** N_totalobservablenormal22_30 = ", N_totalobservablenormal22_30, "and in log = ", np.log10(N_totalobservablenormal22_30), "**** N_totalrecoverablenormal22_30 = ", N_totalrecoverablenormal22_30, "and in log = ", np.log10(N_totalrecoverablenormal22_30))
print("N_totalnormal22_100 = ", N_totalnormal22_100, "and in log = ", np.log10(N_totalnormal22_100), "**** N_totalobservablenormal22_100 = ", N_totalobservablenormal22_100, "and in log = ", np.log10(N_totalobservablenormal22_100), "**** N_totalrecoverablenormal22_100 = ", N_totalrecoverablenormal22_100, "and in log = ", np.log10(N_totalrecoverablenormal22_100))
print("N_totalnormal22_1000 = ", N_totalnormal22_1000, "and in log = ", np.log10(N_totalnormal22_1000), "**** N_totalobservablenormal22_1000 = ", N_totalobservablenormal22_1000, "and in log = ", np.log10(N_totalobservablenormal22_1000), "**** N_totalrecoverablenormal22_1000 = ", N_totalrecoverablenormal22_1000, "and in log = ", np.log10(N_totalrecoverablenormal22_1000))
print("********************************")
print("wholerecoverypercent_normal22 = $", wholerecoverypercent_normal22, "/pm", sigmanormal22, "$")
print("wholerecoverypercent_normal22_03 = $", wholerecoverypercent_normal22_03, "/pm", sigmanormal22_03, "$")
print("wholerecoverypercent_normal22_1 = $", wholerecoverypercent_normal22_1, "/pm", sigmanormal22_1, "$")
print("wholerecoverypercent_normal22_10 = $", wholerecoverypercent_normal22_10, "/pm", sigmanormal22_10, "$")
print("wholerecoverypercent_normal22_30 = $", wholerecoverypercent_normal22_30, "/pm", sigmanormal22_30, "$")
print("wholerecoverypercent_normal22_100 = $", wholerecoverypercent_normal22_100, "/pm", sigmanormal22_100, "$")
print("wholerecoverypercent_normal22_1000 = $", wholerecoverypercent_normal22_1000, "/pm", sigmanormal22_1000, "$")
print("********************************")
print("overallrecoverypercent_normal22 = $", overallrecoverypercent_normal22, "/pm", overallsigmanormal22, "$")
print("overallrecoverypercent_normal22_03 = $", overallrecoverypercent_normal22_03, "/pm", overallsigmanormal22_03, "$")
print("overallrecoverypercent_normal22_1 = $", overallrecoverypercent_normal22_1, "/pm", overallsigmanormal22_1, "$")
print("overallrecoverypercent_normal22_10 = $", overallrecoverypercent_normal22_10, "/pm", overallsigmanormal22_10, "$")
print("overallrecoverypercent_normal22_30 = $", overallrecoverypercent_normal22_30, "/pm", overallsigmanormal22_30, "$")
print("overallrecoverypercent_normal22_100 = $", overallrecoverypercent_normal22_100, "/pm", overallsigmanormal22_100, "$")
print("overallrecoverypercent_normal22_1000 = $", overallrecoverypercent_normal22_1000, "/pm", overallsigmanormal22_1000, "$")
print("###############################")
print("N_totalnormal195 = ", N_totalnormal195, "and in log = ", np.log10(N_totalnormal195), "**** N_totalobservablenormal195 = ", N_totalobservablenormal195, "and in log = ", np.log10(N_totalobservablenormal195), "**** N_totalrecoverablenormal195 = ", N_totalrecoverablenormal195, "and in log = ", np.log10(N_totalrecoverablenormal195))
print("N_totalnormal195_03 = ", N_totalnormal195_03, "and in log = ", np.log10(N_totalnormal195_03), "**** N_totalobservablenormal195_03 = ", N_totalobservablenormal195_03, "and in log = ", np.log10(N_totalobservablenormal195_03), "**** N_totalrecoverablenormal195_03 = ", N_totalrecoverablenormal195_03, "and in log = ", np.log10(N_totalrecoverablenormal195_03))
print("N_totalnormal195_1 = ", N_totalnormal195_1, "and in log = ", np.log10(N_totalnormal195_1), "**** N_totalobservablenormal195_1 = ", N_totalobservablenormal195_1, "and in log = ", np.log10(N_totalobservablenormal195_1), "**** N_totalrecoverablenormal195_1 = ", N_totalrecoverablenormal195_1, "and in log = ", np.log10(N_totalrecoverablenormal195_1))
print("N_totalnormal195_10 = ", N_totalnormal195_10, "and in log = ", np.log10(N_totalnormal195_10), "**** N_totalobservablenormal195_10 = ", N_totalobservablenormal195_10, "and in log = ", np.log10(N_totalobservablenormal195_10), "**** N_totalrecoverablenormal195_10 = ", N_totalrecoverablenormal195_10, "and in log = ", np.log10(N_totalrecoverablenormal195_10))
print("N_totalnormal195_30 = ", N_totalnormal195_30, "and in log = ", np.log10(N_totalnormal195_30), "**** N_totalobservablenormal195_30 = ", N_totalobservablenormal195_30, "and in log = ", np.log10(N_totalobservablenormal195_30), "**** N_totalrecoverablenormal195_30 = ", N_totalrecoverablenormal195_30, "and in log = ", np.log10(N_totalrecoverablenormal195_30))
print("N_totalnormal195_100 = ", N_totalnormal195_100, "and in log = ", np.log10(N_totalnormal195_100), "**** N_totalobservablenormal195_100 = ", N_totalobservablenormal195_100, "and in log = ", np.log10(N_totalobservablenormal195_100), "**** N_totalrecoverablenormal195_100 = ", N_totalrecoverablenormal195_100, "and in log = ", np.log10(N_totalrecoverablenormal195_100))
print("N_totalnormal195_1000 = ", N_totalnormal195_1000, "and in log = ", np.log10(N_totalnormal195_1000), "**** N_totalobservablenormal195_1000 = ", N_totalobservablenormal195_1000, "and in log = ", np.log10(N_totalobservablenormal195_1000), "**** N_totalrecoverablenormal195_1000 = ", N_totalrecoverablenormal195_1000, "and in log = ", np.log10(N_totalrecoverablenormal195_1000))
print("********************************")
print("wholerecoverypercent_normal195 = $", wholerecoverypercent_normal195, "/pm", sigmanormal195, "$")
print("wholerecoverypercent_normal195_03 = $", wholerecoverypercent_normal195_03, "/pm", sigmanormal195_03, "$")
print("wholerecoverypercent_normal195_1 = $", wholerecoverypercent_normal195_1, "/pm", sigmanormal195_1, "$")
print("wholerecoverypercent_normal195_10 = $", wholerecoverypercent_normal195_10, "/pm", sigmanormal195_10, "$")
print("wholerecoverypercent_normal195_30 = $", wholerecoverypercent_normal195_30, "/pm", sigmanormal195_30, "$")
print("wholerecoverypercent_normal195_100 = $", wholerecoverypercent_normal195_100, "/pm", sigmanormal195_100, "$")
print("wholerecoverypercent_normal195_1000 = $", wholerecoverypercent_normal195_1000, "/pm", sigmanormal195_1000, "$")
print("********************************")
print("overallrecoverypercent_normal195 = $", overallrecoverypercent_normal195, "/pm", overallsigmanormal195, "$")
print("overallrecoverypercent_normal195_03 = $", overallrecoverypercent_normal195_03, "/pm", overallsigmanormal195_03, "$")
print("overallrecoverypercent_normal195_1 = $", overallrecoverypercent_normal195_1, "/pm", overallsigmanormal195_1, "$")
print("overallrecoverypercent_normal195_10 = $", overallrecoverypercent_normal195_10, "/pm", overallsigmanormal195_10, "$")
print("overallrecoverypercent_normal195_30 = $", overallrecoverypercent_normal195_30, "/pm", overallsigmanormal195_30, "$")
print("overallrecoverypercent_normal195_100 = $", overallrecoverypercent_normal195_100, "/pm", overallsigmanormal195_100, "$")
print("overallrecoverypercent_normal195_1000 = $", overallrecoverypercent_normal195_1000, "/pm", overallsigmanormal195_1000, "$")
print("#############################")
print("binarypercent_22 = $", (N_totalnormal22/N_totalnormal)*100, "/pm", ((N_totalnormal22**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_195 = $", (N_totalnormal195/N_totalnormal)*100, "/pm", ((N_totalnormal195**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_03 = $", (N_totalnormal_03/N_totalnormal)*100, "/pm", ((N_totalnormal_03**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_1 = $", (N_totalnormal_1/N_totalnormal)*100, "/pm", ((N_totalnormal_1**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_10 = $", (N_totalnormal_10/N_totalnormal)*100, "/pm", ((N_totalnormal_10**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_30 = $", (N_totalnormal_30/N_totalnormal)*100, "/pm", ((N_totalnormal_30**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_100 = $", (N_totalnormal_100/N_totalnormal)*100, "/pm", ((N_totalnormal_100**(1/2))/N_totalnormal)*100, "$")
print("binarypercent_1000 = $", (N_totalnormal_1000/N_totalnormal)*100, "/pm", ((N_totalnormal_1000**(1/2))/N_totalnormal)*100, "$")
print("observablepercent_03 = $", (N_totalobservablenormal_03/N_totalnormal_03)*100, "/pm", ((N_totalobservablenormal_03**(1/2))/N_totalnormal_03)*100, "$")
print("observablepercent_1 = $", (N_totalobservablenormal_1/N_totalnormal_1)*100, "/pm", ((N_totalobservablenormal_1**(1/2))/N_totalnormal_1)*100, "$")
print("observablepercent_10 = $", (N_totalobservablenormal_10/N_totalnormal_10)*100, "/pm", ((N_totalobservablenormal_10**(1/2))/N_totalnormal_10)*100, "$")
print("observablepercent_30 = $", (N_totalobservablenormal_30/N_totalnormal_30)*100, "/pm", ((N_totalobservablenormal_30**(1/2))/N_totalnormal_30)*100, "$")
print("observablepercent_100 = $", (N_totalobservablenormal_100/N_totalnormal_100)*100, "/pm", ((N_totalobservablenormal_100**(1/2))/N_totalnormal_100)*100, "$")
print("observablepercent_1000 = $", (N_totalobservablenormal_1000/N_totalnormal_1000)*100, "/pm", ((N_totalobservablenormal_1000**(1/2))/N_totalnormal_1000)*100, "$")
print("observablepercent = $", (N_totalobservablenormal/N_totalnormal)*100, "/pm", ((N_totalobservablenormal**(1/2))/N_totalnormal)*100, "$")
print("observablepercent22 = $", (N_totalobservablenormal22/N_totalnormal22)*100, "/pm", ((N_totalobservablenormal22**(1/2))/N_totalnormal22)*100, "$")
print("observablepercent195 = $", (N_totalobservablenormal195/N_totalnormal195)*100, "/pm", ((N_totalobservablenormal195**(1/2))/N_totalnormal195)*100, "$")
for filefast_ in sorted(allFiles_fast):
filename = filefast_[69:] #when file path no longer has /old in it, will be filefast_[65:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datfast = pd.read_csv(filefast_, sep = ',', header=2)
PeriodIn = datfast['p'] # input period -- 'p' in data file
##########################################################
datfast1 = pd.read_csv(filefast_, sep = ',', header=0, nrows=1)
N_tri = datfast1["NstarsTRILEGAL"][0]
Nall = len(PeriodIn)
m1hAll0, m1b = np.histogram(datfast["m1"], bins=mbins)
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/Nall*fbFit(m1val))
N_mult = N_tri*fb
##########################################################
if len(PeriodIn) == 0.:
continue
if N_tri == 0:
continue
else:
PeriodOut = datfast['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean = datfast['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
observable = datfast.loc[PeriodOut != -999].index
observable_03 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999)].index
observable_1 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999)].index
observable_10 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999)].index
observable_30 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999)].index
observable_100 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999)].index
observable_1000 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999)].index
observable_22 = datfast.loc[(PeriodOut != -999) & (appMagMean <= 22.)].index
observable_03_22 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1_22 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_10_22 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_30_22 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_100_22 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1000_22 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_195 = datfast.loc[(PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_03_195 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1_195 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_10_195 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_30_195 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_100_195 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1000_195 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
fullP = abs(PeriodOut - PeriodIn)/PeriodIn
halfP = abs(PeriodOut - 0.5*PeriodIn)/(0.5*PeriodIn)
twiceP = abs(PeriodOut - 2*PeriodIn)/(2*PeriodIn)
recoverable = datfast.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_03 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_10 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_30 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_100 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1000 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_22 = datfast.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_03_22 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1_22 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_10_22 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_30_22 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_100_22 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1000_22 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_195 = datfast.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_03_195 = datfast.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1_195 = datfast.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_10_195 = datfast.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_30_195 = datfast.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_100_195 = datfast.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1000_195 = datfast.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
P03 = datfast.loc[PeriodIn <= 0.3].index
P1 = datfast.loc[PeriodIn <= 1].index
P10 = datfast.loc[PeriodIn <= 10].index
P30 = datfast.loc[PeriodIn <= 30].index
P100 = datfast.loc[PeriodIn <= 100].index
P1000 = datfast.loc[PeriodIn <= 1000].index
P_22 = datfast.loc[appMagMean <= 22.].index
P03_22 = datfast.loc[(PeriodIn <= 0.3) & (appMagMean <= 22.)].index
P1_22 = datfast.loc[(PeriodIn <= 1) & (appMagMean <= 22.)].index
P10_22 = datfast.loc[(PeriodIn <= 10) & (appMagMean <= 22.)].index
P30_22 = datfast.loc[(PeriodIn <= 30) & (appMagMean <= 22.)].index
P100_22 = datfast.loc[(PeriodIn <= 100) & (appMagMean <= 22.)].index
P1000_22 = datfast.loc[(PeriodIn <= 1000) & (appMagMean <= 22.)].index
P_195 = datfast.loc[appMagMean <= 19.5].index
P03_195 = datfast.loc[(PeriodIn <= 0.3) & (appMagMean <= 19.5)].index
P1_195 = datfast.loc[(PeriodIn <= 1) & (appMagMean <= 19.5)].index
P10_195 = datfast.loc[(PeriodIn <= 10) & (appMagMean <= 19.5)].index
P30_195 = datfast.loc[(PeriodIn <= 30) & (appMagMean <= 19.5)].index
P100_195 = datfast.loc[(PeriodIn <= 100) & (appMagMean <= 19.5)].index
P1000_195 = datfast.loc[(PeriodIn <= 1000) & (appMagMean <= 19.5)].index
N_all = (len(PeriodIn)/len(PeriodIn))*N_mult
N_all03 = (len(P03)/len(PeriodIn))*N_mult
N_all1 = (len(P1)/len(PeriodIn))*N_mult
N_all10 = (len(P10)/len(PeriodIn))*N_mult
N_all30 = (len(P30)/len(PeriodIn))*N_mult
N_all100 = (len(P100)/len(PeriodIn))*N_mult
N_all1000 = (len(P1000)/len(PeriodIn))*N_mult
N_all_22 = (len(P_22)/len(PeriodIn))*N_mult
N_all03_22 = (len(P03_22)/len(PeriodIn))*N_mult
N_all1_22 = (len(P1_22)/len(PeriodIn))*N_mult
N_all10_22 = (len(P10_22)/len(PeriodIn))*N_mult
N_all30_22 = (len(P30_22)/len(PeriodIn))*N_mult
N_all100_22 = (len(P100_22)/len(PeriodIn))*N_mult
N_all1000_22 = (len(P1000_22)/len(PeriodIn))*N_mult
N_all_195 = (len(P_195)/len(PeriodIn))*N_mult
N_all03_195 = (len(P03_195)/len(PeriodIn))*N_mult
N_all1_195 = (len(P1_195)/len(PeriodIn))*N_mult
N_all10_195 = (len(P10_195)/len(PeriodIn))*N_mult
N_all30_195 = (len(P30_195)/len(PeriodIn))*N_mult
N_all100_195 = (len(P100_195)/len(PeriodIn))*N_mult
N_all1000_195 = (len(P1000_195)/len(PeriodIn))*N_mult
N_obs = (len(observable)/len(PeriodIn))*N_mult
N_obs03 = (len(observable_03)/len(PeriodIn))*N_mult
N_obs1 = (len(observable_1)/len(PeriodIn))*N_mult
N_obs10 = (len(observable_10)/len(PeriodIn))*N_mult
N_obs30 = (len(observable_30)/len(PeriodIn))*N_mult
N_obs100 = (len(observable_100)/len(PeriodIn))*N_mult
N_obs1000 = (len(observable_1000)/len(PeriodIn))*N_mult
N_obs_22 = (len(observable_22)/len(PeriodIn))*N_mult
N_obs03_22 = (len(observable_03_22)/len(PeriodIn))*N_mult
N_obs1_22 = (len(observable_1_22)/len(PeriodIn))*N_mult
N_obs10_22 = (len(observable_10_22)/len(PeriodIn))*N_mult
N_obs30_22 = (len(observable_30_22)/len(PeriodIn))*N_mult
N_obs100_22 = (len(observable_100_22)/len(PeriodIn))*N_mult
N_obs1000_22 = (len(observable_1000_22)/len(PeriodIn))*N_mult
N_obs_195 = (len(observable_195)/len(PeriodIn))*N_mult
N_obs03_195 = (len(observable_03_195)/len(PeriodIn))*N_mult
N_obs1_195 = (len(observable_1_195)/len(PeriodIn))*N_mult
N_obs10_195 = (len(observable_10_195)/len(PeriodIn))*N_mult
N_obs30_195 = (len(observable_30_195)/len(PeriodIn))*N_mult
N_obs100_195 = (len(observable_100_195)/len(PeriodIn))*N_mult
N_obs1000_195 = (len(observable_1000_195)/len(PeriodIn))*N_mult
N_rec = (len(recoverable)/len(PeriodIn))*N_mult
N_rec03 = (len(recoverable_03)/len(PeriodIn))*N_mult
N_rec1 = (len(recoverable_1)/len(PeriodIn))*N_mult
N_rec10 = (len(recoverable_10)/len(PeriodIn))*N_mult
N_rec30 = (len(recoverable_30)/len(PeriodIn))*N_mult
N_rec100 = (len(recoverable_100)/len(PeriodIn))*N_mult
N_rec1000 = (len(recoverable_1000)/len(PeriodIn))*N_mult
N_rec_22 = (len(recoverable_22)/len(PeriodIn))*N_mult
N_rec03_22 = (len(recoverable_03_22)/len(PeriodIn))*N_mult
N_rec1_22 = (len(recoverable_1_22)/len(PeriodIn))*N_mult
N_rec10_22 = (len(recoverable_10_22)/len(PeriodIn))*N_mult
N_rec30_22 = (len(recoverable_30_22)/len(PeriodIn))*N_mult
N_rec100_22 = (len(recoverable_100_22)/len(PeriodIn))*N_mult
N_rec1000_22 = (len(recoverable_1000_22)/len(PeriodIn))*N_mult
N_rec_195 = (len(recoverable_195)/len(PeriodIn))*N_mult
N_rec03_195 = (len(recoverable_03_195)/len(PeriodIn))*N_mult
N_rec1_195 = (len(recoverable_1_195)/len(PeriodIn))*N_mult
N_rec10_195 = (len(recoverable_10_195)/len(PeriodIn))*N_mult
N_rec30_195 = (len(recoverable_30_195)/len(PeriodIn))*N_mult
N_rec100_195 = (len(recoverable_100_195)/len(PeriodIn))*N_mult
N_rec1000_195 = (len(recoverable_1000_195)/len(PeriodIn))*N_mult
N_totalfast_array.append(float(N_all))
N_totalobservablefast_array.append(float(N_obs))
N_totalrecoverablefast_array.append(float(N_rec))
N_totalfast_array_03.append(float(N_all03))
N_totalobservablefast_array_03.append(float(N_obs03))
N_totalrecoverablefast_array_03.append(float(N_rec03))
N_totalfast_array_1.append(float(N_all1))
N_totalobservablefast_array_1.append(float(N_obs1))
N_totalrecoverablefast_array_1.append(float(N_rec1))
N_totalfast_array_10.append(float(N_all10))
N_totalobservablefast_array_10.append(float(N_obs10))
N_totalrecoverablefast_array_10.append(float(N_rec10))
N_totalfast_array_30.append(float(N_all30))
N_totalobservablefast_array_30.append(float(N_obs30))
N_totalrecoverablefast_array_30.append(float(N_rec30))
N_totalfast_array_100.append(float(N_all100))
N_totalobservablefast_array_100.append(float(N_obs100))
N_totalrecoverablefast_array_100.append(float(N_rec100))
N_totalfast_array_1000.append(float(N_all1000))
N_totalobservablefast_array_1000.append(float(N_obs1000))
N_totalrecoverablefast_array_1000.append(float(N_rec1000))
N_totalfast22_array.append(float(N_all_22))
N_totalobservablefast22_array.append(float(N_obs_22))
N_totalrecoverablefast22_array.append(float(N_rec_22))
N_totalfast22_array_03.append(float(N_all03_22))
N_totalobservablefast22_array_03.append(float(N_obs03_22))
N_totalrecoverablefast22_array_03.append(float(N_rec03_22))
N_totalfast22_array_1.append(float(N_all1_22))
N_totalobservablefast22_array_1.append(float(N_obs1_22))
N_totalrecoverablefast22_array_1.append(float(N_rec1_22))
N_totalfast22_array_10.append(float(N_all10_22))
N_totalobservablefast22_array_10.append(float(N_obs10_22))
N_totalrecoverablefast22_array_10.append(float(N_rec10_22))
N_totalfast22_array_30.append(float(N_all30_22))
N_totalobservablefast22_array_30.append(float(N_obs30_22))
N_totalrecoverablefast22_array_30.append(float(N_rec30_22))
N_totalfast22_array_100.append(float(N_all100_22))
N_totalobservablefast22_array_100.append(float(N_obs100_22))
N_totalrecoverablefast22_array_100.append(float(N_rec100_22))
N_totalfast22_array_1000.append(float(N_all1000_22))
N_totalobservablefast22_array_1000.append(float(N_obs1000_22))
N_totalrecoverablefast22_array_1000.append(float(N_rec1000_22))
N_totalfast195_array.append(float(N_all_195))
N_totalobservablefast195_array.append(float(N_obs_195))
N_totalrecoverablefast195_array.append(float(N_rec_195))
N_totalfast195_array_03.append(float(N_all03_195))
N_totalobservablefast195_array_03.append(float(N_obs03_195))
N_totalrecoverablefast195_array_03.append(float(N_rec03_195))
N_totalfast195_array_1.append(float(N_all1_195))
N_totalobservablefast195_array_1.append(float(N_obs1_195))
N_totalrecoverablefast195_array_1.append(float(N_rec1_195))
N_totalfast195_array_10.append(float(N_all10_195))
N_totalobservablefast195_array_10.append(float(N_obs10_195))
N_totalrecoverablefast195_array_10.append(float(N_rec10_195))
N_totalfast195_array_30.append(float(N_all30_195))
N_totalobservablefast195_array_30.append(float(N_obs30_195))
N_totalrecoverablefast195_array_30.append(float(N_rec30_195))
N_totalfast195_array_100.append(float(N_all100_195))
N_totalobservablefast195_array_100.append(float(N_obs100_195))
N_totalrecoverablefast195_array_100.append(float(N_rec100_195))
N_totalfast195_array_1000.append(float(N_all1000_195))
N_totalobservablefast195_array_1000.append(float(N_obs1000_195))
N_totalrecoverablefast195_array_1000.append(float(N_rec1000_195))
N_totalfast = np.sum(N_totalfast_array)
N_totalfast_03 = np.sum(N_totalfast_array_03)
N_totalfast_1 = np.sum(N_totalfast_array_1)
N_totalfast_10 = np.sum(N_totalfast_array_10)
N_totalfast_30 = np.sum(N_totalfast_array_30)
N_totalfast_100 = np.sum(N_totalfast_array_100)
N_totalfast_1000 = np.sum(N_totalfast_array_1000)
N_totalobservablefast = np.sum(N_totalobservablefast_array)
N_totalobservablefast_03 = np.sum(N_totalobservablefast_array_03)
N_totalobservablefast_1 = np.sum(N_totalobservablefast_array_1)
N_totalobservablefast_10 = np.sum(N_totalobservablefast_array_10)
N_totalobservablefast_30 = np.sum(N_totalobservablefast_array_30)
N_totalobservablefast_100 = np.sum(N_totalobservablefast_array_100)
N_totalobservablefast_1000 = np.sum(N_totalobservablefast_array_1000)
N_totalrecoverablefast = np.sum(N_totalrecoverablefast_array)
N_totalrecoverablefast_03 = np.sum(N_totalrecoverablefast_array_03)
N_totalrecoverablefast_1 = np.sum(N_totalrecoverablefast_array_1)
N_totalrecoverablefast_10 = np.sum(N_totalrecoverablefast_array_10)
N_totalrecoverablefast_30 = np.sum(N_totalrecoverablefast_array_30)
N_totalrecoverablefast_100 = np.sum(N_totalrecoverablefast_array_100)
N_totalrecoverablefast_1000 = np.sum(N_totalrecoverablefast_array_1000)
N_totalfast22 = np.sum(N_totalfast22_array)
N_totalfast22_03 = np.sum(N_totalfast22_array_03)
N_totalfast22_1 = np.sum(N_totalfast22_array_1)
N_totalfast22_10 = np.sum(N_totalfast22_array_10)
N_totalfast22_30 = np.sum(N_totalfast22_array_30)
N_totalfast22_100 = np.sum(N_totalfast22_array_100)
N_totalfast22_1000 = np.sum(N_totalfast22_array_1000)
N_totalobservablefast22 = np.sum(N_totalobservablefast22_array)
N_totalobservablefast22_03 = np.sum(N_totalobservablefast22_array_03)
N_totalobservablefast22_1 = np.sum(N_totalobservablefast22_array_1)
N_totalobservablefast22_10 = np.sum(N_totalobservablefast22_array_10)
N_totalobservablefast22_30 = np.sum(N_totalobservablefast22_array_30)
N_totalobservablefast22_100 = np.sum(N_totalobservablefast22_array_100)
N_totalobservablefast22_1000 = np.sum(N_totalobservablefast22_array_1000)
N_totalrecoverablefast22 = np.sum(N_totalrecoverablefast22_array)
N_totalrecoverablefast22_03 = np.sum(N_totalrecoverablefast22_array_03)
N_totalrecoverablefast22_1 = np.sum(N_totalrecoverablefast22_array_1)
N_totalrecoverablefast22_10 = np.sum(N_totalrecoverablefast22_array_10)
N_totalrecoverablefast22_30 = np.sum(N_totalrecoverablefast22_array_30)
N_totalrecoverablefast22_100 = np.sum(N_totalrecoverablefast22_array_100)
N_totalrecoverablefast22_1000 = np.sum(N_totalrecoverablefast22_array_1000)
N_totalfast195 = np.sum(N_totalfast195_array)
N_totalfast195_03 = np.sum(N_totalfast195_array_03)
N_totalfast195_1 = np.sum(N_totalfast195_array_1)
N_totalfast195_10 = np.sum(N_totalfast195_array_10)
N_totalfast195_30 = np.sum(N_totalfast195_array_30)
N_totalfast195_100 = np.sum(N_totalfast195_array_100)
N_totalfast195_1000 = np.sum(N_totalfast195_array_1000)
N_totalobservablefast195 = np.sum(N_totalobservablefast195_array)
N_totalobservablefast195_03 = np.sum(N_totalobservablefast195_array_03)
N_totalobservablefast195_1 = np.sum(N_totalobservablefast195_array_1)
N_totalobservablefast195_10 = np.sum(N_totalobservablefast195_array_10)
N_totalobservablefast195_30 = np.sum(N_totalobservablefast195_array_30)
N_totalobservablefast195_100 = np.sum(N_totalobservablefast195_array_100)
N_totalobservablefast195_1000 = np.sum(N_totalobservablefast195_array_1000)
N_totalrecoverablefast195 = np.sum(N_totalrecoverablefast195_array)
N_totalrecoverablefast195_03 = np.sum(N_totalrecoverablefast195_array_03)
N_totalrecoverablefast195_1 = np.sum(N_totalrecoverablefast195_array_1)
N_totalrecoverablefast195_10 = np.sum(N_totalrecoverablefast195_array_10)
N_totalrecoverablefast195_30 = np.sum(N_totalrecoverablefast195_array_30)
N_totalrecoverablefast195_100 = np.sum(N_totalrecoverablefast195_array_100)
N_totalrecoverablefast195_1000 = np.sum(N_totalrecoverablefast195_array_1000)
wholerecoverypercent_fast = (N_totalrecoverablefast/N_totalobservablefast)*100
wholerecoverypercent_fast_03 = (N_totalrecoverablefast_03/N_totalobservablefast_03)*100
wholerecoverypercent_fast_1 = (N_totalrecoverablefast_1/N_totalobservablefast_1)*100
wholerecoverypercent_fast_10 = (N_totalrecoverablefast_10/N_totalobservablefast_10)*100
wholerecoverypercent_fast_30 = (N_totalrecoverablefast_30/N_totalobservablefast_30)*100
wholerecoverypercent_fast_100 = (N_totalrecoverablefast_100/N_totalobservablefast_100)*100
wholerecoverypercent_fast_1000 = (N_totalrecoverablefast_1000/N_totalobservablefast_1000)*100
sigmafast = ((N_totalrecoverablefast**(1/2))/N_totalobservablefast)*100
sigmafast_03 = ((N_totalrecoverablefast_03**(1/2))/N_totalobservablefast_03)*100
sigmafast_1 = ((N_totalrecoverablefast_1**(1/2))/N_totalobservablefast_1)*100
sigmafast_10 = ((N_totalrecoverablefast_10**(1/2))/N_totalobservablefast_10)*100
sigmafast_30 = ((N_totalrecoverablefast_30**(1/2))/N_totalobservablefast_30)*100
sigmafast_100 = ((N_totalrecoverablefast_100**(1/2))/N_totalobservablefast_100)*100
sigmafast_1000 = ((N_totalrecoverablefast_1000**(1/2))/N_totalobservablefast_1000)*100
overallrecoverypercent_fast = (N_totalrecoverablefast/N_totalfast)*100
overallrecoverypercent_fast_03 = (N_totalrecoverablefast_03/N_totalfast_03)*100
overallrecoverypercent_fast_1 = (N_totalrecoverablefast_1/N_totalfast_1)*100
overallrecoverypercent_fast_10 = (N_totalrecoverablefast_10/N_totalfast_10)*100
overallrecoverypercent_fast_30 = (N_totalrecoverablefast_30/N_totalfast_30)*100
overallrecoverypercent_fast_100 = (N_totalrecoverablefast_100/N_totalfast_100)*100
overallrecoverypercent_fast_1000 = (N_totalrecoverablefast_1000/N_totalfast_1000)*100
overallsigmafast = ((N_totalrecoverablefast**(1/2))/N_totalfast)*100
overallsigmafast_03 = ((N_totalrecoverablefast_03**(1/2))/N_totalfast_03)*100
overallsigmafast_1 = ((N_totalrecoverablefast_1**(1/2))/N_totalfast_1)*100
overallsigmafast_10 = ((N_totalrecoverablefast_10**(1/2))/N_totalfast_10)*100
overallsigmafast_30 = ((N_totalrecoverablefast_30**(1/2))/N_totalfast_30)*100
overallsigmafast_100 = ((N_totalrecoverablefast_100**(1/2))/N_totalfast_100)*100
overallsigmafast_1000 = ((N_totalrecoverablefast_1000**(1/2))/N_totalfast_1000)*100
wholerecoverypercent_fast22 = (N_totalrecoverablefast22/N_totalobservablefast22)*100
wholerecoverypercent_fast22_03 = (N_totalrecoverablefast22_03/N_totalobservablefast22_03)*100
wholerecoverypercent_fast22_1 = (N_totalrecoverablefast22_1/N_totalobservablefast22_1)*100
wholerecoverypercent_fast22_10 = (N_totalrecoverablefast22_10/N_totalobservablefast22_10)*100
wholerecoverypercent_fast22_30 = (N_totalrecoverablefast22_30/N_totalobservablefast22_30)*100
wholerecoverypercent_fast22_100 = (N_totalrecoverablefast22_100/N_totalobservablefast22_100)*100
wholerecoverypercent_fast22_1000 = (N_totalrecoverablefast22_1000/N_totalobservablefast22_1000)*100
sigmafast22 = ((N_totalrecoverablefast22**(1/2))/N_totalobservablefast22)*100
sigmafast22_03 = ((N_totalrecoverablefast22_03**(1/2))/N_totalobservablefast22_03)*100
sigmafast22_1 = ((N_totalrecoverablefast22_1**(1/2))/N_totalobservablefast22_1)*100
sigmafast22_10 = ((N_totalrecoverablefast22_10**(1/2))/N_totalobservablefast22_10)*100
sigmafast22_30 = ((N_totalrecoverablefast22_30**(1/2))/N_totalobservablefast22_30)*100
sigmafast22_100 = ((N_totalrecoverablefast22_100**(1/2))/N_totalobservablefast22_100)*100
sigmafast22_1000 = ((N_totalrecoverablefast22_1000**(1/2))/N_totalobservablefast22_1000)*100
overallrecoverypercent_fast22 = (N_totalrecoverablefast22/N_totalfast22)*100
overallrecoverypercent_fast22_03 = (N_totalrecoverablefast22_03/N_totalfast22_03)*100
overallrecoverypercent_fast22_1 = (N_totalrecoverablefast22_1/N_totalfast22_1)*100
overallrecoverypercent_fast22_10 = (N_totalrecoverablefast22_10/N_totalfast22_10)*100
overallrecoverypercent_fast22_30 = (N_totalrecoverablefast22_30/N_totalfast22_30)*100
overallrecoverypercent_fast22_100 = (N_totalrecoverablefast22_100/N_totalfast22_100)*100
overallrecoverypercent_fast22_1000 = (N_totalrecoverablefast22_1000/N_totalfast22_1000)*100
overallsigmafast22 = ((N_totalrecoverablefast22**(1/2))/N_totalfast22)*100
overallsigmafast22_03 = ((N_totalrecoverablefast22_03**(1/2))/N_totalfast22_03)*100
overallsigmafast22_1 = ((N_totalrecoverablefast22_1**(1/2))/N_totalfast22_1)*100
overallsigmafast22_10 = ((N_totalrecoverablefast22_10**(1/2))/N_totalfast22_10)*100
overallsigmafast22_30 = ((N_totalrecoverablefast22_30**(1/2))/N_totalfast22_30)*100
overallsigmafast22_100 = ((N_totalrecoverablefast22_100**(1/2))/N_totalfast22_100)*100
overallsigmafast22_1000 = ((N_totalrecoverablefast22_1000**(1/2))/N_totalfast22_1000)*100
wholerecoverypercent_fast195 = (N_totalrecoverablefast195/N_totalobservablefast195)*100
wholerecoverypercent_fast195_03 = (N_totalrecoverablefast195_03/N_totalobservablefast195_03)*100
wholerecoverypercent_fast195_1 = (N_totalrecoverablefast195_1/N_totalobservablefast195_1)*100
wholerecoverypercent_fast195_10 = (N_totalrecoverablefast195_10/N_totalobservablefast195_10)*100
wholerecoverypercent_fast195_30 = (N_totalrecoverablefast195_30/N_totalobservablefast195_30)*100
wholerecoverypercent_fast195_100 = (N_totalrecoverablefast195_100/N_totalobservablefast195_100)*100
wholerecoverypercent_fast195_1000 = (N_totalrecoverablefast195_1000/N_totalobservablefast195_1000)*100
sigmafast195 = ((N_totalrecoverablefast195**(1/2))/N_totalobservablefast195)*100
sigmafast195_03 = ((N_totalrecoverablefast195_03**(1/2))/N_totalobservablefast195_03)*100
sigmafast195_1 = ((N_totalrecoverablefast195_1**(1/2))/N_totalobservablefast195_1)*100
sigmafast195_10 = ((N_totalrecoverablefast195_10**(1/2))/N_totalobservablefast195_10)*100
sigmafast195_30 = ((N_totalrecoverablefast195_30**(1/2))/N_totalobservablefast195_30)*100
sigmafast195_100 = ((N_totalrecoverablefast195_100**(1/2))/N_totalobservablefast195_100)*100
sigmafast195_1000 = ((N_totalrecoverablefast195_1000**(1/2))/N_totalobservablefast195_1000)*100
overallrecoverypercent_fast195 = (N_totalrecoverablefast195/N_totalfast195)*100
overallrecoverypercent_fast195_03 = (N_totalrecoverablefast195_03/N_totalfast195_03)*100
overallrecoverypercent_fast195_1 = (N_totalrecoverablefast195_1/N_totalfast195_1)*100
overallrecoverypercent_fast195_10 = (N_totalrecoverablefast195_10/N_totalfast195_10)*100
overallrecoverypercent_fast195_30 = (N_totalrecoverablefast195_30/N_totalfast195_30)*100
overallrecoverypercent_fast195_100 = (N_totalrecoverablefast195_100/N_totalfast195_100)*100
overallrecoverypercent_fast195_1000 = (N_totalrecoverablefast195_1000/N_totalfast195_1000)*100
overallsigmafast195 = ((N_totalrecoverablefast195**(1/2))/N_totalfast195)*100
overallsigmafast195_03 = ((N_totalrecoverablefast195_03**(1/2))/N_totalfast195_03)*100
overallsigmafast195_1 = ((N_totalrecoverablefast195_1**(1/2))/N_totalfast195_1)*100
overallsigmafast195_10 = ((N_totalrecoverablefast195_10**(1/2))/N_totalfast195_10)*100
overallsigmafast195_30 = ((N_totalrecoverablefast195_30**(1/2))/N_totalfast195_30)*100
overallsigmafast195_100 = ((N_totalrecoverablefast195_100**(1/2))/N_totalfast195_100)*100
overallsigmafast195_1000 = ((N_totalrecoverablefast195_1000**(1/2))/N_totalfast195_1000)*100\
print("N_totalfast = ", N_totalfast, "and in log = ", np.log10(N_totalfast), "**** N_totalobservablefast = ", N_totalobservablefast, "and in log = ", np.log10(N_totalobservablefast), "**** N_totalrecoverablefast = ", N_totalrecoverablefast, "and in log = ", np.log10(N_totalrecoverablefast))
print("N_totalfast_03 = ", N_totalfast_03, "and in log = ", np.log10(N_totalfast_03), "**** N_totalobservablefast_03 = ", N_totalobservablefast_03, "and in log = ", np.log10(N_totalobservablefast_03), "**** N_totalrecoverablefast_03 = ", N_totalrecoverablefast_03, "and in log = ", np.log10(N_totalrecoverablefast_03))
print("N_totalfast_1 = ", N_totalfast_1, "and in log = ", np.log10(N_totalfast_1), "**** N_totalobservablefast_1 = ", N_totalobservablefast_1, "and in log = ", np.log10(N_totalobservablefast_1), "**** N_totalrecoverablefast_1 = ", N_totalrecoverablefast_1, "and in log = ", np.log10(N_totalrecoverablefast_1))
print("N_totalfast_10 = ", N_totalfast_10, "and in log = ", np.log10(N_totalfast_10), "**** N_totalobservablefast_10 = ", N_totalobservablefast_10, "and in log = ", np.log10(N_totalobservablefast_10), "**** N_totalrecoverablefast_10 = ", N_totalrecoverablefast_10, "and in log = ", np.log10(N_totalrecoverablefast_10))
print("N_totalfast_30 = ", N_totalfast_30, "and in log = ", np.log10(N_totalfast_30), "**** N_totalobservablefast_30 = ", N_totalobservablefast_30, "and in log = ", np.log10(N_totalobservablefast_30), "**** N_totalrecoverablefast_30 = ", N_totalrecoverablefast_30, "and in log = ", np.log10(N_totalrecoverablefast_30))
print("N_totalfast_100 = ", N_totalfast_100, "and in log = ", np.log10(N_totalfast_100), "**** N_totalobservablefast_100 = ", N_totalobservablefast_100, "and in log = ", np.log10(N_totalobservablefast_100), "**** N_totalrecoverablefast_100 = ", N_totalrecoverablefast_100, "and in log = ", np.log10(N_totalrecoverablefast_100))
print("N_totalfast_1000 = ", N_totalfast_1000, "and in log = ", np.log10(N_totalfast_1000), "**** N_totalobservablefast_1000 = ", N_totalobservablefast_1000, "and in log = ", np.log10(N_totalobservablefast_1000), "**** N_totalrecoverablefast_1000 = ", N_totalrecoverablefast_1000, "and in log = ", np.log10(N_totalrecoverablefast_1000))
print("********************************")
print("wholerecoverypercent_fast = $", wholerecoverypercent_fast, "/pm", sigmafast, "$")
print("wholerecoverypercent_fast_03 = $", wholerecoverypercent_fast_03, "/pm", sigmafast_03, "$")
print("wholerecoverypercent_fast_1 = $", wholerecoverypercent_fast_1, "/pm", sigmafast_1, "$")
print("wholerecoverypercent_fast_10 = $", wholerecoverypercent_fast_10, "/pm", sigmafast_10, "$")
print("wholerecoverypercent_fast_30 = $", wholerecoverypercent_fast_30, "/pm", sigmafast_30, "$")
print("wholerecoverypercent_fast_100 = $", wholerecoverypercent_fast_100, "/pm", sigmafast_100, "$")
print("wholerecoverypercent_fast_1000 = $", wholerecoverypercent_fast_1000, "/pm", sigmafast_1000, "$")
print("********************************")
print("overallrecoverypercent_fast = $", overallrecoverypercent_fast, "/pm", overallsigmafast, "$")
print("overallrecoverypercent_fast_03 = $", overallrecoverypercent_fast_03, "/pm", overallsigmafast_03, "$")
print("overallrecoverypercent_fast_1 = $", overallrecoverypercent_fast_1, "/pm", overallsigmafast_1, "$")
print("overallrecoverypercent_fast_10 = $", overallrecoverypercent_fast_10, "/pm", overallsigmafast_10, "$")
print("overallrecoverypercent_fast_30 = $", overallrecoverypercent_fast_30, "/pm", overallsigmafast_30, "$")
print("overallrecoverypercent_fast_100 = $", overallrecoverypercent_fast_100, "/pm", overallsigmafast_100, "$")
print("overallrecoverypercent_fast_1000 = $", overallrecoverypercent_fast_1000, "/pm", overallsigmafast_1000, "$")
print("################################")
print("N_totalfast22 = ", N_totalfast22, "and in log = ", np.log10(N_totalfast22), "**** N_totalobservablefast22 = ", N_totalobservablefast22, "and in log = ", np.log10(N_totalobservablefast22), "**** N_totalrecoverablefast22 = ", N_totalrecoverablefast22, "and in log = ", np.log10(N_totalrecoverablefast22))
print("N_totalfast22_03 = ", N_totalfast22_03, "and in log = ", np.log10(N_totalfast22_03), "**** N_totalobservablefast22_03 = ", N_totalobservablefast22_03, "and in log = ", np.log10(N_totalobservablefast22_03), "**** N_totalrecoverablefast22_03 = ", N_totalrecoverablefast22_03, "and in log = ", np.log10(N_totalrecoverablefast22_03))
print("N_totalfast22_1 = ", N_totalfast22_1, "and in log = ", np.log10(N_totalfast22_1), "**** N_totalobservablefast22_1 = ", N_totalobservablefast22_1, "and in log = ", np.log10(N_totalobservablefast22_1), "**** N_totalrecoverablefast22_1 = ", N_totalrecoverablefast22_1, "and in log = ", np.log10(N_totalrecoverablefast22_1))
print("N_totalfast22_10 = ", N_totalfast22_10, "and in log = ", np.log10(N_totalfast22_10), "**** N_totalobservablefast22_10 = ", N_totalobservablefast22_10, "and in log = ", np.log10(N_totalobservablefast22_10), "**** N_totalrecoverablefast22_10 = ", N_totalrecoverablefast22_10, "and in log = ", np.log10(N_totalrecoverablefast22_10))
print("N_totalfast22_30 = ", N_totalfast22_30, "and in log = ", np.log10(N_totalfast22_30), "**** N_totalobservablefast22_30 = ", N_totalobservablefast22_30, "and in log = ", np.log10(N_totalobservablefast22_30), "**** N_totalrecoverablefast22_30 = ", N_totalrecoverablefast22_30, "and in log = ", np.log10(N_totalrecoverablefast22_30))
print("N_totalfast22_100 = ", N_totalfast22_100, "and in log = ", np.log10(N_totalfast22_100), "**** N_totalobservablefast22_100 = ", N_totalobservablefast22_100, "and in log = ", np.log10(N_totalobservablefast22_100), "**** N_totalrecoverablefast22_100 = ", N_totalrecoverablefast22_100, "and in log = ", np.log10(N_totalrecoverablefast22_100))
print("N_totalfast22_1000 = ", N_totalfast22_1000, "and in log = ", np.log10(N_totalfast22_1000), "**** N_totalobservablefast22_1000 = ", N_totalobservablefast22_1000, "and in log = ", np.log10(N_totalobservablefast22_1000), "**** N_totalrecoverablefast22_1000 = ", N_totalrecoverablefast22_1000, "and in log = ", np.log10(N_totalrecoverablefast22_1000))
print("********************************")
print("wholerecoverypercent_fast22 = $", wholerecoverypercent_fast22, "/pm", sigmafast22, "$")
print("wholerecoverypercent_fast22_03 = $", wholerecoverypercent_fast22_03, "/pm", sigmafast22_03, "$")
print("wholerecoverypercent_fast22_1 = $", wholerecoverypercent_fast22_1, "/pm", sigmafast22_1, "$")
print("wholerecoverypercent_fast22_10 = $", wholerecoverypercent_fast22_10, "/pm", sigmafast22_10, "$")
print("wholerecoverypercent_fast22_30 = $", wholerecoverypercent_fast22_30, "/pm", sigmafast22_30, "$")
print("wholerecoverypercent_fast22_100 = $", wholerecoverypercent_fast22_100, "/pm", sigmafast22_100, "$")
print("wholerecoverypercent_fast22_1000 = $", wholerecoverypercent_fast22_1000, "/pm", sigmafast22_1000, "$")
print("********************************")
print("overallrecoverypercent_fast22 = $", overallrecoverypercent_fast22, "/pm", overallsigmafast22, "$")
print("overallrecoverypercent_fast22_03 = $", overallrecoverypercent_fast22_03, "/pm", overallsigmafast22_03, "$")
print("overallrecoverypercent_fast22_1 = $", overallrecoverypercent_fast22_1, "/pm", overallsigmafast22_1, "$")
print("overallrecoverypercent_fast22_10 = $", overallrecoverypercent_fast22_10, "/pm", overallsigmafast22_10, "$")
print("overallrecoverypercent_fast22_30 = $", overallrecoverypercent_fast22_30, "/pm", overallsigmafast22_30, "$")
print("overallrecoverypercent_fast22_100 = $", overallrecoverypercent_fast22_100, "/pm", overallsigmafast22_100, "$")
print("overallrecoverypercent_fast22_1000 = $", overallrecoverypercent_fast22_1000, "/pm", overallsigmafast22_1000, "$")
print("###############################")
print("N_totalfast195 = ", N_totalfast195, "and in log = ", np.log10(N_totalfast195), "**** N_totalobservablefast195 = ", N_totalobservablefast195, "and in log = ", np.log10(N_totalobservablefast195), "**** N_totalrecoverablefast195 = ", N_totalrecoverablefast195, "and in log = ", np.log10(N_totalrecoverablefast195))
print("N_totalfast195_03 = ", N_totalfast195_03, "and in log = ", np.log10(N_totalfast195_03), "**** N_totalobservablefast195_03 = ", N_totalobservablefast195_03, "and in log = ", np.log10(N_totalobservablefast195_03), "**** N_totalrecoverablefast195_03 = ", N_totalrecoverablefast195_03, "and in log = ", np.log10(N_totalrecoverablefast195_03))
print("N_totalfast195_1 = ", N_totalfast195_1, "and in log = ", np.log10(N_totalfast195_1), "**** N_totalobservablefast195_1 = ", N_totalobservablefast195_1, "and in log = ", np.log10(N_totalobservablefast195_1), "**** N_totalrecoverablefast195_1 = ", N_totalrecoverablefast195_1, "and in log = ", np.log10(N_totalrecoverablefast195_1))
print("N_totalfast195_10 = ", N_totalfast195_10, "and in log = ", np.log10(N_totalfast195_10), "**** N_totalobservablefast195_10 = ", N_totalobservablefast195_10, "and in log = ", np.log10(N_totalobservablefast195_10), "**** N_totalrecoverablefast195_10 = ", N_totalrecoverablefast195_10, "and in log = ", np.log10(N_totalrecoverablefast195_10))
print("N_totalfast195_30 = ", N_totalfast195_30, "and in log = ", np.log10(N_totalfast195_30), "**** N_totalobservablefast195_30 = ", N_totalobservablefast195_30, "and in log = ", np.log10(N_totalobservablefast195_30), "**** N_totalrecoverablefast195_30 = ", N_totalrecoverablefast195_30, "and in log = ", np.log10(N_totalrecoverablefast195_30))
print("N_totalfast195_100 = ", N_totalfast195_100, "and in log = ", np.log10(N_totalfast195_100), "**** N_totalobservablefast195_100 = ", N_totalobservablefast195_100, "and in log = ", np.log10(N_totalobservablefast195_100), "**** N_totalrecoverablefast195_100 = ", N_totalrecoverablefast195_100, "and in log = ", np.log10(N_totalrecoverablefast195_100))
print("N_totalfast195_1000 = ", N_totalfast195_1000, "and in log = ", np.log10(N_totalfast195_1000), "**** N_totalobservablefast195_1000 = ", N_totalobservablefast195_1000, "and in log = ", np.log10(N_totalobservablefast195_1000), "**** N_totalrecoverablefast195_1000 = ", N_totalrecoverablefast195_1000, "and in log = ", np.log10(N_totalrecoverablefast195_1000))
print("********************************")
print("wholerecoverypercent_fast195 = $", wholerecoverypercent_fast195, "/pm", sigmafast195, "$")
print("wholerecoverypercent_fast195_03 = $", wholerecoverypercent_fast195_03, "/pm", sigmafast195_03, "$")
print("wholerecoverypercent_fast195_1 = $", wholerecoverypercent_fast195_1, "/pm", sigmafast195_1, "$")
print("wholerecoverypercent_fast195_10 = $", wholerecoverypercent_fast195_10, "/pm", sigmafast195_10, "$")
print("wholerecoverypercent_fast195_30 = $", wholerecoverypercent_fast195_30, "/pm", sigmafast195_30, "$")
print("wholerecoverypercent_fast195_100 = $", wholerecoverypercent_fast195_100, "/pm", sigmafast195_100, "$")
print("wholerecoverypercent_fast195_1000 = $", wholerecoverypercent_fast195_1000, "/pm", sigmafast195_1000, "$")
print("********************************")
print("overallrecoverypercent_fast195 = $", overallrecoverypercent_fast195, "/pm", overallsigmafast195, "$")
print("overallrecoverypercent_fast195_03 = $", overallrecoverypercent_fast195_03, "/pm", overallsigmafast195_03, "$")
print("overallrecoverypercent_fast195_1 = $", overallrecoverypercent_fast195_1, "/pm", overallsigmafast195_1, "$")
print("overallrecoverypercent_fast195_10 = $", overallrecoverypercent_fast195_10, "/pm", overallsigmafast195_10, "$")
print("overallrecoverypercent_fast195_30 = $", overallrecoverypercent_fast195_30, "/pm", overallsigmafast195_30, "$")
print("overallrecoverypercent_fast195_100 = $", overallrecoverypercent_fast195_100, "/pm", overallsigmafast195_100, "$")
print("overallrecoverypercent_fast195_1000 = $", overallrecoverypercent_fast195_1000, "/pm", overallsigmafast195_1000, "$")
print("#############################")
print("binarypercent_22 = $", (N_totalfast22/N_totalfast)*100, "/pm", ((N_totalfast22**(1/2))/N_totalfast)*100, "$")
print("binarypercent_195 = $", (N_totalfast195/N_totalfast)*100, "/pm", ((N_totalfast195**(1/2))/N_totalfast)*100, "$")
print("binarypercent_03 = $", (N_totalfast_03/N_totalfast)*100, "/pm", ((N_totalfast_03**(1/2))/N_totalfast)*100, "$")
print("binarypercent_1 = $", (N_totalfast_1/N_totalfast)*100, "/pm", ((N_totalfast_1**(1/2))/N_totalfast)*100, "$")
print("binarypercent_10 = $", (N_totalfast_10/N_totalfast)*100, "/pm", ((N_totalfast_10**(1/2))/N_totalfast)*100, "$")
print("binarypercent_30 = $", (N_totalfast_30/N_totalfast)*100, "/pm", ((N_totalfast_30**(1/2))/N_totalfast)*100, "$")
print("binarypercent_100 = $", (N_totalfast_100/N_totalfast)*100, "/pm", ((N_totalfast_100**(1/2))/N_totalfast)*100, "$")
print("binarypercent_1000 = $", (N_totalfast_1000/N_totalfast)*100, "/pm", ((N_totalfast_1000**(1/2))/N_totalfast)*100, "$")
print("observablepercent_03 = $", (N_totalobservablefast_03/N_totalfast_03)*100, "/pm", ((N_totalobservablefast_03**(1/2))/N_totalfast_03)*100, "$")
print("observablepercent_1 = $", (N_totalobservablefast_1/N_totalfast_1)*100, "/pm", ((N_totalobservablefast_1**(1/2))/N_totalfast_1)*100, "$")
print("observablepercent_10 = $", (N_totalobservablefast_10/N_totalfast_10)*100, "/pm", ((N_totalobservablefast_10**(1/2))/N_totalfast_10)*100, "$")
print("observablepercent_30 = $", (N_totalobservablefast_30/N_totalfast_30)*100, "/pm", ((N_totalobservablefast_30**(1/2))/N_totalfast_30)*100, "$")
print("observablepercent_100 = $", (N_totalobservablefast_100/N_totalfast_100)*100, "/pm", ((N_totalobservablefast_100**(1/2))/N_totalfast_100)*100, "$")
print("observablepercent_1000 = $", (N_totalobservablefast_1000/N_totalfast_1000)*100, "/pm", ((N_totalobservablefast_1000**(1/2))/N_totalfast_1000)*100, "$")
print("observablepercent = $", (N_totalobservablefast/N_totalfast)*100, "/pm", ((N_totalobservablefast**(1/2))/N_totalfast)*100, "$")
print("observablepercent22 = $", (N_totalobservablefast22/N_totalfast22)*100, "/pm", ((N_totalobservablefast22**(1/2))/N_totalfast22)*100, "$")
print("observablepercent195 = $", (N_totalobservablefast195/N_totalfast195)*100, "/pm", ((N_totalobservablefast195**(1/2))/N_totalfast195)*100, "$")
for fileobsDist_ in sorted(allFiles_obsDist):
filename = fileobsDist_[77:] #when file path no longer has /old in it, will be fileobsDist_[73:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datobsDist = pd.read_csv(fileobsDist_, sep = ',', header=2)
PeriodIn = datobsDist['p'] # input period -- 'p' in data file
##########################################################
datobsDist1 = pd.read_csv(fileobsDist_, sep = ',', header=0, nrows=1)
N_tri = datobsDist1["NstarsTRILEGAL"][0]
Nall = len(PeriodIn)
m1hAll0, m1b = np.histogram(datobsDist["m1"], bins=mbins)
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/Nall*fbFit(m1val))
N_mult = N_tri*fb
##########################################################
if len(PeriodIn) == 0.:
continue
if N_tri == 0:
continue
else:
PeriodOut = datobsDist['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean = datobsDist['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
observable = datobsDist.loc[PeriodOut != -999].index
observable_03 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999)].index
observable_1 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999)].index
observable_10 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999)].index
observable_30 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999)].index
observable_100 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999)].index
observable_1000 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999)].index
observable_22 = datobsDist.loc[(PeriodOut != -999) & (appMagMean <= 22.)].index
observable_03_22 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1_22 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_10_22 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_30_22 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_100_22 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1000_22 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_195 = datobsDist.loc[(PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_03_195 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1_195 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_10_195 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_30_195 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_100_195 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1000_195 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
fullP = abs(PeriodOut - PeriodIn)/PeriodIn
halfP = abs(PeriodOut - 0.5*PeriodIn)/(0.5*PeriodIn)
twiceP = abs(PeriodOut - 2*PeriodIn)/(2*PeriodIn)
recoverable = datobsDist.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_03 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_10 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_30 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_100 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1000 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_22 = datobsDist.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_03_22 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1_22 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_10_22 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_30_22 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_100_22 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1000_22 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_195 = datobsDist.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_03_195 = datobsDist.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1_195 = datobsDist.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_10_195 = datobsDist.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_30_195 = datobsDist.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_100_195 = datobsDist.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1000_195 = datobsDist.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
P03 = datobsDist.loc[PeriodIn <= 0.3].index
P1 = datobsDist.loc[PeriodIn <= 1].index
P10 = datobsDist.loc[PeriodIn <= 10].index
P30 = datobsDist.loc[PeriodIn <= 30].index
P100 = datobsDist.loc[PeriodIn <= 100].index
P1000 = datobsDist.loc[PeriodIn <= 1000].index
P_22 = datobsDist.loc[appMagMean <= 22.].index
P03_22 = datobsDist.loc[(PeriodIn <= 0.3) & (appMagMean <= 22.)].index
P1_22 = datobsDist.loc[(PeriodIn <= 1) & (appMagMean <= 22.)].index
P10_22 = datobsDist.loc[(PeriodIn <= 10) & (appMagMean <= 22.)].index
P30_22 = datobsDist.loc[(PeriodIn <= 30) & (appMagMean <= 22.)].index
P100_22 = datobsDist.loc[(PeriodIn <= 100) & (appMagMean <= 22.)].index
P1000_22 = datobsDist.loc[(PeriodIn <= 1000) & (appMagMean <= 22.)].index
P_195 = datobsDist.loc[appMagMean <= 19.5].index
P03_195 = datobsDist.loc[(PeriodIn <= 0.3) & (appMagMean <= 19.5)].index
P1_195 = datobsDist.loc[(PeriodIn <= 1) & (appMagMean <= 19.5)].index
P10_195 = datobsDist.loc[(PeriodIn <= 10) & (appMagMean <= 19.5)].index
P30_195 = datobsDist.loc[(PeriodIn <= 30) & (appMagMean <= 19.5)].index
P100_195 = datobsDist.loc[(PeriodIn <= 100) & (appMagMean <= 19.5)].index
P1000_195 = datobsDist.loc[(PeriodIn <= 1000) & (appMagMean <= 19.5)].index
N_all = (len(PeriodIn)/len(PeriodIn))*N_mult
N_all03 = (len(P03)/len(PeriodIn))*N_mult
N_all1 = (len(P1)/len(PeriodIn))*N_mult
N_all10 = (len(P10)/len(PeriodIn))*N_mult
N_all30 = (len(P30)/len(PeriodIn))*N_mult
N_all100 = (len(P100)/len(PeriodIn))*N_mult
N_all1000 = (len(P1000)/len(PeriodIn))*N_mult
N_all_22 = (len(P_22)/len(PeriodIn))*N_mult
N_all03_22 = (len(P03_22)/len(PeriodIn))*N_mult
N_all1_22 = (len(P1_22)/len(PeriodIn))*N_mult
N_all10_22 = (len(P10_22)/len(PeriodIn))*N_mult
N_all30_22 = (len(P30_22)/len(PeriodIn))*N_mult
N_all100_22 = (len(P100_22)/len(PeriodIn))*N_mult
N_all1000_22 = (len(P1000_22)/len(PeriodIn))*N_mult
N_all_195 = (len(P_195)/len(PeriodIn))*N_mult
N_all03_195 = (len(P03_195)/len(PeriodIn))*N_mult
N_all1_195 = (len(P1_195)/len(PeriodIn))*N_mult
N_all10_195 = (len(P10_195)/len(PeriodIn))*N_mult
N_all30_195 = (len(P30_195)/len(PeriodIn))*N_mult
N_all100_195 = (len(P100_195)/len(PeriodIn))*N_mult
N_all1000_195 = (len(P1000_195)/len(PeriodIn))*N_mult
N_obs = (len(observable)/len(PeriodIn))*N_mult
N_obs03 = (len(observable_03)/len(PeriodIn))*N_mult
N_obs1 = (len(observable_1)/len(PeriodIn))*N_mult
N_obs10 = (len(observable_10)/len(PeriodIn))*N_mult
N_obs30 = (len(observable_30)/len(PeriodIn))*N_mult
N_obs100 = (len(observable_100)/len(PeriodIn))*N_mult
N_obs1000 = (len(observable_1000)/len(PeriodIn))*N_mult
N_obs_22 = (len(observable_22)/len(PeriodIn))*N_mult
N_obs03_22 = (len(observable_03_22)/len(PeriodIn))*N_mult
N_obs1_22 = (len(observable_1_22)/len(PeriodIn))*N_mult
N_obs10_22 = (len(observable_10_22)/len(PeriodIn))*N_mult
N_obs30_22 = (len(observable_30_22)/len(PeriodIn))*N_mult
N_obs100_22 = (len(observable_100_22)/len(PeriodIn))*N_mult
N_obs1000_22 = (len(observable_1000_22)/len(PeriodIn))*N_mult
N_obs_195 = (len(observable_195)/len(PeriodIn))*N_mult
N_obs03_195 = (len(observable_03_195)/len(PeriodIn))*N_mult
N_obs1_195 = (len(observable_1_195)/len(PeriodIn))*N_mult
N_obs10_195 = (len(observable_10_195)/len(PeriodIn))*N_mult
N_obs30_195 = (len(observable_30_195)/len(PeriodIn))*N_mult
N_obs100_195 = (len(observable_100_195)/len(PeriodIn))*N_mult
N_obs1000_195 = (len(observable_1000_195)/len(PeriodIn))*N_mult
N_rec = (len(recoverable)/len(PeriodIn))*N_mult
N_rec03 = (len(recoverable_03)/len(PeriodIn))*N_mult
N_rec1 = (len(recoverable_1)/len(PeriodIn))*N_mult
N_rec10 = (len(recoverable_10)/len(PeriodIn))*N_mult
N_rec30 = (len(recoverable_30)/len(PeriodIn))*N_mult
N_rec100 = (len(recoverable_100)/len(PeriodIn))*N_mult
N_rec1000 = (len(recoverable_1000)/len(PeriodIn))*N_mult
N_rec_22 = (len(recoverable_22)/len(PeriodIn))*N_mult
N_rec03_22 = (len(recoverable_03_22)/len(PeriodIn))*N_mult
N_rec1_22 = (len(recoverable_1_22)/len(PeriodIn))*N_mult
N_rec10_22 = (len(recoverable_10_22)/len(PeriodIn))*N_mult
N_rec30_22 = (len(recoverable_30_22)/len(PeriodIn))*N_mult
N_rec100_22 = (len(recoverable_100_22)/len(PeriodIn))*N_mult
N_rec1000_22 = (len(recoverable_1000_22)/len(PeriodIn))*N_mult
N_rec_195 = (len(recoverable_195)/len(PeriodIn))*N_mult
N_rec03_195 = (len(recoverable_03_195)/len(PeriodIn))*N_mult
N_rec1_195 = (len(recoverable_1_195)/len(PeriodIn))*N_mult
N_rec10_195 = (len(recoverable_10_195)/len(PeriodIn))*N_mult
N_rec30_195 = (len(recoverable_30_195)/len(PeriodIn))*N_mult
N_rec100_195 = (len(recoverable_100_195)/len(PeriodIn))*N_mult
N_rec1000_195 = (len(recoverable_1000_195)/len(PeriodIn))*N_mult
N_totalobsDist_array.append(float(N_all))
N_totalobservableobsDist_array.append(float(N_obs))
N_totalrecoverableobsDist_array.append(float(N_rec))
N_totalobsDist_array_03.append(float(N_all03))
N_totalobservableobsDist_array_03.append(float(N_obs03))
N_totalrecoverableobsDist_array_03.append(float(N_rec03))
N_totalobsDist_array_1.append(float(N_all1))
N_totalobservableobsDist_array_1.append(float(N_obs1))
N_totalrecoverableobsDist_array_1.append(float(N_rec1))
N_totalobsDist_array_10.append(float(N_all10))
N_totalobservableobsDist_array_10.append(float(N_obs10))
N_totalrecoverableobsDist_array_10.append(float(N_rec10))
N_totalobsDist_array_30.append(float(N_all30))
N_totalobservableobsDist_array_30.append(float(N_obs30))
N_totalrecoverableobsDist_array_30.append(float(N_rec30))
N_totalobsDist_array_100.append(float(N_all100))
N_totalobservableobsDist_array_100.append(float(N_obs100))
N_totalrecoverableobsDist_array_100.append(float(N_rec100))
N_totalobsDist_array_1000.append(float(N_all1000))
N_totalobservableobsDist_array_1000.append(float(N_obs1000))
N_totalrecoverableobsDist_array_1000.append(float(N_rec1000))
N_totalobsDist22_array.append(float(N_all_22))
N_totalobservableobsDist22_array.append(float(N_obs_22))
N_totalrecoverableobsDist22_array.append(float(N_rec_22))
N_totalobsDist22_array_03.append(float(N_all03_22))
N_totalobservableobsDist22_array_03.append(float(N_obs03_22))
N_totalrecoverableobsDist22_array_03.append(float(N_rec03_22))
N_totalobsDist22_array_1.append(float(N_all1_22))
N_totalobservableobsDist22_array_1.append(float(N_obs1_22))
N_totalrecoverableobsDist22_array_1.append(float(N_rec1_22))
N_totalobsDist22_array_10.append(float(N_all10_22))
N_totalobservableobsDist22_array_10.append(float(N_obs10_22))
N_totalrecoverableobsDist22_array_10.append(float(N_rec10_22))
N_totalobsDist22_array_30.append(float(N_all30_22))
N_totalobservableobsDist22_array_30.append(float(N_obs30_22))
N_totalrecoverableobsDist22_array_30.append(float(N_rec30_22))
N_totalobsDist22_array_100.append(float(N_all100_22))
N_totalobservableobsDist22_array_100.append(float(N_obs100_22))
N_totalrecoverableobsDist22_array_100.append(float(N_rec100_22))
N_totalobsDist22_array_1000.append(float(N_all1000_22))
N_totalobservableobsDist22_array_1000.append(float(N_obs1000_22))
N_totalrecoverableobsDist22_array_1000.append(float(N_rec1000_22))
N_totalobsDist195_array.append(float(N_all_195))
N_totalobservableobsDist195_array.append(float(N_obs_195))
N_totalrecoverableobsDist195_array.append(float(N_rec_195))
N_totalobsDist195_array_03.append(float(N_all03_195))
N_totalobservableobsDist195_array_03.append(float(N_obs03_195))
N_totalrecoverableobsDist195_array_03.append(float(N_rec03_195))
N_totalobsDist195_array_1.append(float(N_all1_195))
N_totalobservableobsDist195_array_1.append(float(N_obs1_195))
N_totalrecoverableobsDist195_array_1.append(float(N_rec1_195))
N_totalobsDist195_array_10.append(float(N_all10_195))
N_totalobservableobsDist195_array_10.append(float(N_obs10_195))
N_totalrecoverableobsDist195_array_10.append(float(N_rec10_195))
N_totalobsDist195_array_30.append(float(N_all30_195))
N_totalobservableobsDist195_array_30.append(float(N_obs30_195))
N_totalrecoverableobsDist195_array_30.append(float(N_rec30_195))
N_totalobsDist195_array_100.append(float(N_all100_195))
N_totalobservableobsDist195_array_100.append(float(N_obs100_195))
N_totalrecoverableobsDist195_array_100.append(float(N_rec100_195))
N_totalobsDist195_array_1000.append(float(N_all1000_195))
N_totalobservableobsDist195_array_1000.append(float(N_obs1000_195))
N_totalrecoverableobsDist195_array_1000.append(float(N_rec1000_195))
N_totalobsDist = np.sum(N_totalobsDist_array)
N_totalobsDist_03 = np.sum(N_totalobsDist_array_03)
N_totalobsDist_1 = | np.sum(N_totalobsDist_array_1) | numpy.sum |
"""a module that houses TOV solvers in the "standard" formulation
"""
__author__ = "<NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
from scipy.integrate import odeint
from scipy.special import hyp2f1
from universality.utils.units import (G, c2, Msun)
#-------------------------------------------------
#DEFAULT_MAX_DR = 1e5 ### maximum step size allowed within the integrator (in standard units, which should be in cm)
DEFAULT_MAX_DR = 1e6
DEFAULT_MIN_DR = 1.0 ### the smallest step size we allow (in standard units, which should be cm)
DEFAULT_GUESS_FRAC = 0.1 ### how much of the way to the vanishing pressure we guess via Newton's method
DEFAULT_INITIAL_FRAC = 1e-3 ### the initial change in pressure we allow when setting the intial conditions
DEFAULT_RTOL = 1e-4
DEFAULT_MXSTEP = 10000
#------------------------
TWOPI = 2*np.pi
FOURPI = 2*TWOPI
Gc2 = G/c2
#-------------------------------------------------
### Standard formulation of the TOV equations
#-------------------------------------------------
### basic evolutionary equations
def dmdr(r, epsc2):
return FOURPI * r**2 * epsc2
def dmbdr(r, rho, m):
return dmdr(r, rho) * (1 - 2*Gc2*m/r)**-0.5
def dpc2dr(r, pc2, m, epsc2):
return - Gc2 * (epsc2 + pc2)*(m + FOURPI * r**3 * pc2)/(r * (r - 2*Gc2*m))
def detadr(r, pc2, m, eta, epsc2, cs2c2):
invf = (1. - 2.*Gc2*m/r)**-1
A = 2. * invf * (1. - 3.*Gc2*m/r - TWOPI*Gc2*r**2 * (epsc2 + 3.*pc2))
B = invf * (6. - FOURPI*Gc2*r**2 * (epsc2 + pc2)*(3. + 1./cs2c2))
return -1.*(eta*(eta - 1.) + A*eta - B)/r
def domegadr(r, pc2, m, omega, epsc2):
P = FOURPI * Gc2 * r**3 * (epsc2 + pc2)/ (r - 2.*Gc2*m)
return (P*(omega + 4.) - omega*(omega + 3.))/r
#-------------------------------------------------
# functions for values at the stellar surface
#-------------------------------------------------
def eta2lambda(r, m, eta): ### dimensionless tidal deformability
C = Gc2*m/r # compactness
fR = 1.-2.*C
F = hyp2f1(3., 5., 6., 2.*C) # a hypergeometric function
z = 2.*C
dFdz = (5./(2.*z**6.)) * (z*(z*(z*(3.*z*(5. + z) - 110.) + 150.) - 60.) / (z - 1.)**3 + 60.*np.log(1. - z))
RdFdr = -2.*C*dFdz # log derivative of hypergeometric function
k2el = 0.5*(eta - 2. - 4.*C/fR) / (RdFdr -F*(eta + 3. - 4.*C/fR)) # gravitoelectric quadrupole Love number
return (2./3.)*(k2el/C**5)
def omega2i(r, omega): ### moment of inertia
return (omega/(3. + omega)) * r**3/(2.*Gc2)
#-------------------------------------------------
# initial conditions
#-------------------------------------------------
def initial_pc2(pc2i, frac):
return (1. - frac)*pc2i ### assume a constant slope over a small change in the pressure
def initial_r(pc2i, ec2i, frac):
return (frac*pc2i / ( G * (ec2i + pc2i) * (ec2i/3. + pc2i) * TWOPI ) )**0.5 ### solve for the radius that corresponds to that small change
def initial_m(r, ec2i):
return FOURPI * r**3 * ec2i / 3. # gravitational mass
def initial_mb(r, rhoi):
return FOURPI * r**3 * rhoi / 3. # gravitational mass
def initial_eta(r, pc2i, ec2i, cs2c2i):
return 2. + FOURPI * Gc2 * r**2 * (9.*pc2i + 13.*ec2i + 3.*(pc2i+ec2i)/cs2c2i)/21. # intial perturbation for dimensionless tidal deformability
def initial_omega(r, pc2i, ec2i):
return 16.*np.pi * Gc2 * r**2 * (pc2i + ec2i)/5. # initial frame-dgragging function
#-------------------------------------------------
# central loop that solves the TOV equations given a set of coupled ODEs
#-------------------------------------------------
def engine(
r,
vec,
eos,
dvecdr_func,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
mxstp=DEFAULT_MXSTEP,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density)
"""
vec = np.array(vec, dtype=float)
while vec[0] > 0: ### continue until pressure vanishes
vec0 = vec[:] # store the current location as the old location
r0 = r
### estimate the radius at which this p will vanish via Newton's method
r = r0 + max(min_dr, min(max_dr, guess_frac * abs(vec[0]/dvecdr_func(vec, r, eos)[0])))
### integrate out until we hit that estimate
vec[:] = odeint(dvecdr_func, vec0, (r0, r), args=(eos,), rtol=rtol, hmax=max_dr, mxstep=mxstep)[-1,:] ### retain only the last point
### return to client, who will then interpolate to find the surface
### interpolate to find stellar surface
p = [vec0[0], vec[0]]
# radius
r = np.interp(0, p, [r0, r])
# the rest of the macro properties
vals = [np.interp(0, p, [vec0[i], vec[i]]) for i in range(1, len(vec))]
return r, vals
#-------------------------------------------------
### the solver that yields all known macroscopic quantites
MACRO_COLS = ['M', 'R', 'Lambda', 'I', 'Mb'] ### the column names for what we compute
def dvecdr(vec, r, eos):
pc2, m, eta, omega, mb = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
cs2c2 = np.interp(pc2, eos[0], eos[3])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2), \
detadr(r, pc2, m, eta, epsc2, cs2c2), \
domegadr(r, pc2, m, omega, epsc2), \
dmbdr(r, rho, m)
def initial_condition(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
cs2c2i = np.interp(pc2i, eos[0], eos[3])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
mb = initial_mb(r, rhoi)
eta = initial_eta(r, pc2i, ec2i, cs2c2i)
omega = initial_omega(r, pc2i, ec2i)
return r, (pc2, m, eta, omega, mb)
def integrate(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m, eta, omega, mb) = engine(
r,
vec,
eos,
dvecdr,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# compute moment of inertia
i = omega2i(r, omega)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
mb /= Msun
r *= 1e-5 ### convert from cm to km
i /= 1e45 ### normalize this to a common value but still in CGS
return m, r, l, i, mb
#-------------------------------------------------
### light-weight solver that only includes M and R
MACRO_COLS_MR = ['M', 'R']
def dvecdr_MR(vec, r, eos):
'''returns d(p, m)/dr
expects: pressurec2, energy_densityc2 = eos
'''
pc2, m = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2)
def initial_condition_MR(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
return r, (pc2, m)
def integrate_MR(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition_MR(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m,) = engine(
r,
vec,
eos,
dvecdr_MR,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
r *= 1e-5 ### convert from cm to km
return m, r
#-------------------------------------------------
### light-weight solver that only includes M, R, and Lambda
MACRO_COLS_MRLambda = ['M', 'R', 'Lambda']
def dvecdr_MRLambda(vec, r, eos):
'''returns d(p, m)/dr
expects: pressurec2, energy_densityc2 = eos
'''
pc2, m, eta = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = | np.interp(pc2, eos[0], eos[2]) | numpy.interp |
"""
Tests functions in the spiketools module
"""
import numpy as np
import pyret.spiketools as spk
def test_binspikes():
# assert the proper indices are returned
spike_times = [1.0, 2.0, 2.5, 3.0]
dt = 0.01
bin_edges = np.arange(0, 3, dt)
bspk = spk.binspikes(spike_times, bin_edges)
assert np.allclose(np.where(bspk)[0], [100, 200, 250, 299])
# maximum absolute error is dt
binned_times = bin_edges[np.where(bspk)]
assert np.all(np.abs(binned_times - spike_times) <= dt)
# test for no spikes
assert np.allclose(spk.binspikes([], bin_edges), np.zeros_like(bin_edges))
def test_estfr():
T = 100
dt = 1e-2
# test an empty array
bspk = np.zeros(T,)
time = np.arange(0, 1, dt)
fr = spk.estfr(bspk, time, sigma=0.01)
assert np.allclose(fr, bspk)
# test a single spike
bspk[T // 2] = 1.
fr = spk.estfr(bspk, time, sigma=0.01)
assert np.isclose((fr.sum() * dt), bspk.sum())
def test_spiking_events():
np.random.seed(1234)
# generate spike times
spiketimes = np.array([0.1, 0.25, 0.5, 0.75, 0.9])
N = len(spiketimes)
T = 50
jitter = 0.01
spikes = []
for trial_index in range(T):
s = spiketimes + jitter * np.random.randn(N,)
spikes.append(np.stack((s, trial_index * np.ones(N,))))
spikes = np.hstack(spikes).T
# detect events
t, psth, bspk, events = spk.detectevents(spikes)
# correct number of events
assert len(events) == N
# test SpikingEvent class
ev = events[0]
assert isinstance(ev, spk.SpikingEvent)
# mean jitter should be close to the selected amount of jitter
mean_jitter = np.mean([e.jitter() for e in events])
assert np.allclose(mean_jitter, jitter, atol=1e-3)
# time to first spike (TTFS) should match the only spike in each trial
assert np.allclose(ev.spikes[:, 0], ev.ttfs())
# one spike per trial
mu, sigma = ev.stats()
assert mu == 1
assert sigma == 0
# test sorting
sorted_spks = ev.sort()
sorted_spks = sorted_spks[ | np.argsort(sorted_spks[:, 1]) | numpy.argsort |
####################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
import types
import random
#################################################################################################################################################
# Main stochastic model
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
p_extern Probability of spontaneous infection
p_periodic Prob of period test
period Period for resting
batch True - periodic testing done in batches of p_periodic*numNodes new people
min_time : minimum time period to pass between testing same person twice
count_non_random : count tests apart from random routine tests
policy: policy function
"""
def policy(self):
pass
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False, p_extern=0, p_periodic=0, period=1, batch=True, min_time=1,
count_non_random=False, policy=None, test_recovered=False, initT=0):
self.has_policy = False
self.verbose = False
if policy:
self.has_policy = True
policy.__name__ = "policy"
self.policy = types.MethodType(policy, self)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if (Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = {'beta': beta, 'sigma': sigma, 'gamma': gamma, 'xi': xi, 'mu_I': mu_I, 'mu_0': mu_0, 'nu': nu,
'beta_D': beta_D, 'sigma_D': sigma_D, 'gamma_D': gamma_D, 'mu_D': mu_D,
'beta_local': beta_local, 'beta_D_local': beta_D_local, 'p': p, 'q': q,
'theta_E': theta_E, 'theta_I': theta_I, 'phi_E': phi_E, 'phi_I': phi_I, 'psi_E': psi_E,
'psi_I': psi_I,
'p_extern': p_extern, 'p_periodic': p_periodic, "period": period, "batch": batch,
"min_time": min_time,
"count_non_random": count_non_random, "test_recovered": test_recovered}
self.init_parameters = dict(self.parameters)
self.node_groups = node_groups
self.update_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5 * self.numNodes)
self.numE = numpy.zeros(5 * self.numNodes)
self.numI = numpy.zeros(5 * self.numNodes)
self.numD_E = numpy.zeros(5 * self.numNodes)
self.numD_I = numpy.zeros(5 * self.numNodes)
self.numR = numpy.zeros(5 * self.numNodes)
self.numF = numpy.zeros(5 * self.numNodes)
self.numS = numpy.zeros(5 * self.numNodes)
self.N = numpy.zeros(5 * self.numNodes)
self.numTested = numpy.zeros(5 * self.numNodes)
self.numPositive = numpy.zeros(5 * self.numNodes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if isinstance(initT, (int, float)):
self.t = initT
else:
self.t = random.uniform(initT[0], initT[1])
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = self.t
self.wait_until_t = 0
self.currentR = 0
if (node_groups):
self.nodeToTest = {groupName: 0 for groupName in node_groups}
else:
self.nodeToTest = 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - \
self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array(
[self.S] * int(self.numS[0]) + [self.E] * int(self.numE[0]) + [self.I] * int(self.numI[0]) + [
self.D_E] * int(self.numD_E[0]) + [self.D_I] * int(self.numD_I[0]) + [self.R] * int(self.numR[0]) + [
self.F] * int(self.numF[0])).reshape((self.numNodes, 1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if (store_Xseries):
self.Xseries = numpy.zeros(shape=(5 * self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0, :] = self.X.T
self.transitions = {
'StoE': {'currentState': self.S, 'newState': self.E},
'EtoI': {'currentState': self.E, 'newState': self.I},
'ItoR': {'currentState': self.I, 'newState': self.R},
'ItoF': {'currentState': self.I, 'newState': self.F},
'RtoS': {'currentState': self.R, 'newState': self.S},
'EtoDE': {'currentState': self.E, 'newState': self.D_E},
'ItoDI': {'currentState': self.I, 'newState': self.D_I},
'DEtoDI': {'currentState': self.D_E, 'newState': self.D_I},
'DItoR': {'currentState': self.D_I, 'newState': self.R},
'DItoF': {'currentState': self.D_I, 'newState': self.F},
'_toS': {'currentState': True, 'newState': self.S},
'StoNS': {'currentState': self.S, 'newState': self.S}
}
self.last_tested = numpy.zeros((self.numNodes, 1)) - 100 # everybody has a fake last tested time of -100 days
self.time_detected = 0
self.small_step = False
self.count_non_random = count_non_random
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if (node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape(
(self.numNodes, 1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numI'] = | numpy.zeros(5 * self.numNodes) | numpy.zeros |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import libs.utils.gpy_finite_diff as gpy_finite_diff
import libs.utils.gpy_estimation_lib as gpy_estimation_lib
from GPy.util.linalg import pdinv, dpotrs
from GPy.util import diag
import sklearn.linear_model
def plot_paramz_likelihood_path(model, label):
nominal = np.array([27.04301504, 83.37540132])
param_one = nominal.copy()
param_two = model.X.std(0)
scipy_param_one = model.kern.lengthscale.constraints.properties()[0].finv(param_one)
scipy_param_two = model.kern.lengthscale.constraints.properties()[0].finv(param_two)
grid_1d = np.linspace(-0.3, 1.5, 1000)
y_1d = []
for i in range(grid_1d.shape[0]):
x = grid_1d[i]
scipy_param = x * scipy_param_one + (1 - x) * scipy_param_two
model.kern.lengthscale.optimizer_array = scipy_param.copy()
model = gpy_estimation_lib.analytical_mean_and_variance_optimization(model)
y_1d.append(model.objective_function())
plt.plot(grid_1d, y_1d, label=label)
def plot_likelihood_path(model, estimate_mean=True, estimate_var=True):
param_one = np.array([27.04301504, 83.37540132])
param_two = np.array([8.76182561, 21.70946319])
mean_one = 1210.116506
variance_one = 2274398.204448
mean_two = 176.754115
variance_two = 18221.51397
grid_1d = np.linspace(0, 1, 1000)
y_1d = []
gradient_norm = []
for i in range(grid_1d.shape[0]):
x = grid_1d[i]
param = x * param_one + (1 - x) * param_two
if estimate_mean:
mean_value = None
else:
mean_value = np.array([[x * mean_one + (1 - x) * mean_two]])
if estimate_var:
variance_value = None
else:
variance_value = np.array([[x * variance_one + (1 - x) * variance_two]])
obj, grad, hessian, model = gpy_finite_diff.get_cost_and_grad_and_hessian(model, param, mean_value, variance_value)
if i == 0:
print("first mode, obj : {}, grad : {}, hessian : \n {}, \n spec hessian : {}".format(obj, grad,
hessian,
np.linalg.eig(
hessian)[0]))
elif i == grid_1d.shape[0] - 1:
print("second mode, obj : {}, grad : {}, hessian : \n {}, \n spec hessian : {}".format(obj, grad,
hessian,
np.linalg.eig(
hessian)[0]))
y_1d.append(obj)
gradient_norm.append((grad ** 2).sum())
plot_xaxis = "path : lengthscales"
if not estimate_mean:
plot_xaxis = plot_xaxis + ', mean'
if not estimate_var:
plot_xaxis = plot_xaxis + ', var'
plt.figure()
plt.plot(grid_1d, y_1d)
plt.title("NLL vs Path")
plt.xlabel(plot_xaxis)
plt.ylabel("Negative log likelihood")
plt.show()
plt.figure()
plt.semilogy()
plt.plot(grid_1d, gradient_norm)
plt.title("Log gradient norm vs Path")
plt.xlabel(plot_xaxis)
plt.ylabel("Log gradient norm of negative log likelihood")
plt.show()
def plot_neg_likelihood_var(model):
var_init = model.Mat52.variance.values[0]
cost_var_init = model._objective_grads(model.optimizer_array)[0]
grid_1d = np.linspace(-1, 1, 2001)
grid_1d = [var_init * math.exp(x * math.log(10)) for x in grid_1d]
y_1d = []
for x in grid_1d:
model.Mat52.variance = x
y_1d.append((model._objective_grads(model.optimizer_array)[0]))
plt.figure()
plt.semilogx()
plt.plot(grid_1d, y_1d)
plt.title("Negative log likelihood vs var : lengthscales : [{}, {}]".format(model.Mat52.lengthscale[0],
model.Mat52.lengthscale[1]))
plt.xlabel("var")
plt.ylabel("Negative log likelihood")
plt.vlines(var_init, ymin=min(y_1d), ymax=max(y_1d), label='estimated_var : {0:.3f}, nll : {1:.3f}'.format(var_init, cost_var_init))
plt.legend()
plt.show()
def plot_multistart_optimization(model, n, mean_value,
variance_value,
optimum,
init_type):
model.constmap.C = mean_value
model.Mat52.variance = variance_value
bounds = [-1, 1]
log_rho_data = np.random.random((n, 2)) * (bounds[1] - bounds[0]) + bounds[0] + np.log10(optimum)
rho_data = np.exp(log_rho_data * math.log(10))
data = pd.DataFrame({'rho1': [], 'rho2': [], 'sigma2': [], 'm': [], 'cost': [], 'status': []})
for rho in rho_data:
model.Mat52.lengthscale = rho
if init_type == 'profiled':
model = gpy_estimation_lib.analytical_mean_and_variance_optimization(model)
elif init_type == 'classic':
model.constmap.C = model.Y.mean()
model.kern.variance = model.Y.var()
else:
ValueError(init_type)
optim = model.optimize()
data = data.append(pd.DataFrame({'rho1': [model.Mat52.lengthscale[0]],
'rho2': [model.Mat52.lengthscale[1]],
'sigma2': model.Mat52.variance,
'm': [model.constmap.C],
'cost': [model._objective_grads(model.optimizer_array)[0]],
'status': optim.status}),
ignore_index=True)
colors = {"Errorb'ABNORMAL_TERMINATION_IN_LNSRCH'": 'red', 'Converged': 'blue', 'Maximum number of f evaluations reached': 'green'}
if not data['status'].apply(lambda x: x in colors.keys()).min():
raise ValueError('Unknown status : {}'.format(data['status'].unique()))
plt.figure()
plt.scatter(x=np.log10(data['rho1']), y=np.log10(data['rho2']),
c=data['status'].apply(lambda x: colors[x]))
plt.scatter(x=math.log10(optimum[0]), y=math.log10(optimum[1]), c='k')
plt.xlabel("ln(rho_1)")
plt.ylabel("ln(rho_2)")
plt.vlines(x=math.log(10) * bounds[0] + math.log10(optimum[0]),
ymin=math.log(10) * bounds[0] + math.log10(optimum[1]),
ymax=math.log(10) * bounds[1] + math.log10(optimum[1]),
linestyles="--", colors="g")
plt.vlines(x=math.log(10) * bounds[1] + math.log10(optimum[0]),
ymin=math.log(10) * bounds[0] + math.log10(optimum[1]),
ymax=math.log(10) * bounds[1] + math.log10(optimum[1]),
linestyles="--", colors="g")
plt.hlines(y=math.log(10) * bounds[0] + math.log10(optimum[1]),
xmin=math.log(10) * bounds[0] + math.log10(optimum[0]),
xmax=math.log(10) * bounds[1] + math.log10(optimum[0]),
linestyles="--", colors="g")
plt.hlines(y=math.log(10) * bounds[1] + math.log10(optimum[1]),
xmin=math.log(10) * bounds[0] + math.log10(optimum[0]),
xmax=math.log(10) * bounds[1] + math.log10(optimum[0]),
linestyles="--", colors="g")
plt.plot([math.log10(optimum[0]) - 2, math.log10(optimum[0]) + 2],
[math.log10(optimum[1]) - 2, math.log10(optimum[1]) + 2],
label='constant anisotropy')
plt.legend()
plt.title(init_type)
plt.show()
#############################################
plt.figure()
plt.scatter(x=np.log10(data['rho1']), y=np.log10(data['sigma2']),
c=data['status'].apply(lambda x: colors[x]))
plt.scatter(x=math.log10(optimum[0]), y=math.log10(variance_value), c='k')
plt.vlines(x=math.log(10) * bounds[0] + math.log10(optimum[0]), ymin=np.log10(data['sigma2']).min(), ymax=np.log10(data['sigma2']).max(),
linestyles="--", colors="g")
plt.vlines(x=math.log(10) * bounds[1] + math.log10(optimum[0]), ymin=np.log10(data['sigma2']).min(), ymax=np.log10(data['sigma2']).max(),
linestyles="--", colors="g")
plt.plot([np.log10(data['rho1']).min(), np.log10(data['rho1']).max()],
[math.log10(variance_value) - (math.log10(optimum[0]) - np.log10(data['rho1']).min())*5,
math.log10(variance_value) + (np.log10(data['rho1']).max() - math.log10(optimum[0]))*5], label='constant microergodicity')
plt.xlabel("ln(rho_1)")
plt.ylabel("ln(sigma2)")
plt.legend()
plt.title(init_type)
plt.show()
return data
def get_noise_level(x, y):
sk_model = sklearn.linear_model.LinearRegression(fit_intercept=True)
X_data = np.concatenate((np.array(x).reshape(-1, 1), (np.array(x)**2).reshape(-1, 1)), axis=1)
Y_data = np.array(y).reshape(-1, 1)
sk_model.fit(X_data, Y_data)
print("noise level (std) : {}".format((Y_data - sk_model.predict(X_data)).std(ddof=3)))
def plot_taylor(model, idx_param, diagonalize=False, width=1e-2, n=1000):
obj_value, grad = model._objective_grads(model.optimizer_array)
print("obj value : {}".format(obj_value))
hessian, model = gpy_finite_diff.get_hessian(model)
if diagonalize:
v, W = | np.linalg.eig(hessian) | numpy.linalg.eig |
from __future__ import print_function
import unittest
import numpy as np
import sqaod as sq
import sqaod.common as common
from .example_problems import *
from math import log
from math import exp
class TestBipartiteGraphBFSearcherBase:
def __init__(self, anpkg, dtype) :
self.anpkg = anpkg
self.dtype = dtype
self.epu = 1.e-6 if dtype == np.float32 else 1.e-12
def new_searcher(self, N0, N1) :
searcher = self.anpkg.bipartite_graph_bf_searcher(dtype=self.dtype)
b0, b1, W = bipartite_graph_random(N0, N1, self.dtype)
searcher.set_qubo(b0, b1, W)
return searcher
def test_calling_sequence(self) :
N0, N1 = 8, 8
searcher = self.new_searcher(N0, N1)
searcher.prepare()
searcher.search_range()
searcher.make_solution()
searcher.search()
searcher.calculate_E()
searcher.get_E()
searcher.get_problem_size()
searcher.get_preferences()
x = searcher.get_x()
def test_problem_size(self) :
N0, N1 = 8, 8
searcher = self.new_searcher(N0, N1)
N0out, N1out = searcher.get_problem_size()
self.assertEqual(N0, N0out)
self.assertEqual(N1, N1out)
def _test_search(self, opt, b0, b1, W, Eexp, xexp):
N0, N1 = b0.shape[0], b1.shape[0]
searcher = self.new_searcher(N0, N1)
searcher.set_qubo(b0, b1, W, opt)
searcher.search()
searcher.calculate_E()
E = searcher.get_E()
res = np.allclose(E[0], Eexp, atol=self.epu)
#print(E[0], Eexp)
self.assertTrue(res)
searcher.make_solution()
xlist = searcher.get_x()
self.assertEqual(len(xlist), 1)
x0, x1 = xlist[0]
#print(x0, x1)
self.assertTrue(np.allclose(x0, xexp))
self.assertTrue(np.allclose(x1, xexp))
def test_min_energy_positive_W(self):
N0, N1 = 8, 8
b0 = np.ones((N0), np.int8)
b1 = np.ones((N1), np.int8)
W = np.ones((N1, N0), np.int8)
self._test_search(sq.minimize, b0, b1, W, 0, 0)
def test_min_energy_negative_W(self):
N0, N1 = 8, 8
b0 = - np.ones((N0), np.int8)
b1 = - np.ones((N1), np.int8)
W = - np.ones((N1, N0), np.int8)
#print(b0, b1, W)
self._test_search(sq.minimize, b0, b1, W, np.sum(b0) + | np.sum(b1) | numpy.sum |
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import numpy as np
from scipy.stats import gamma
from statsmodels.nonparametric import bandwidths
__all__ = ['get_kernel_width', 'get_gram_matrix', 'hsic_teststat', 'hsic_test_gamma']
def get_kernel_width(X):
"""Calculate the bandwidth to median distance between points.
Use at most 100 points (since median is only a heuristic,
and 100 points is sufficient for a robust estimate).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
Returns
-------
float
The bandwidth parameter.
"""
n_samples = X.shape[0]
if n_samples > 100:
X_med = X[:100, :]
n_samples = 100
else:
X_med = X
G = np.sum(X_med * X_med, 1).reshape(n_samples, 1)
Q = np.tile(G, (1, n_samples))
R = np.tile(G.T, (n_samples, 1))
dists = Q + R - 2 * np.dot(X_med, X_med.T)
dists = dists - np.tril(dists)
dists = dists.reshape(n_samples ** 2, 1)
return np.sqrt(0.5 * np.median(dists[dists > 0]))
def _rbf_dot(X, Y, width):
"""Compute the inner product of radial basis functions."""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
G = | np.sum(X * X, 1) | numpy.sum |
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 5, 2019
# Author: <NAME>
#######################################################################
from __future__ import division, print_function, absolute_import
from queue import *
import os
import numpy as np
import os
os.environ['SDL_AUDIODRIVER'] = 'dsp'
import pygame
from urban_env.road.graphics import WorldSurface, RoadGraphics
from urban_env.vehicle.graphics import VehicleGraphics
from urban_env.envdict import ACTIONS_DICT
from urban_env.vehicle.dynamics import Obstacle
class EnvViewer(object):
"""
A viewer to render a urban driving environment.
"""
SCREEN_WIDTH = 1750
SCREEN_HEIGHT = 150
SAVE_IMAGES = False
def __init__(self, env):
self.env = env
pygame.init()
panel_size = (self.SCREEN_WIDTH, self.SCREEN_HEIGHT)
self.screen = pygame.display.set_mode([self.SCREEN_WIDTH, self.SCREEN_HEIGHT])
self.sim_surface = WorldSurface(panel_size, 0, pygame.Surface(panel_size))
self.sim_surface.centering_position = env.config.get("centering_position", self.sim_surface.INITIAL_CENTERING)
self.clock = pygame.time.Clock()
self.enabled = True
if "SDL_VIDEODRIVER" in os.environ and os.environ["SDL_VIDEODRIVER"] == "dummy":
self.enabled = False
self.agent_display = None
self.agent_surface = None
self.vehicle_trajectory = None
self.vehicle_trajectories = []
self.frame = 0
def set_agent_display(self, agent_display):
"""
Set a display callback provided by an agent, so that they can render their behaviour on a dedicated
agent surface, or even on the simulation surface.
:param agent_display: a callback provided by the agent to display on surfaces
"""
if self.agent_display is None:
if self.SCREEN_WIDTH > self.SCREEN_HEIGHT:
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, 2 * self.SCREEN_HEIGHT))
else:
self.screen = pygame.display.set_mode((2 * self.SCREEN_WIDTH, self.SCREEN_HEIGHT))
self.agent_surface = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))
self.agent_display = agent_display
def handle_events(self):
"""
Handle pygame events by forwarding them to the display and environment vehicle.
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.env.close()
self.sim_surface.handle_event(event)
if self.env.vehicle:
VehicleGraphics.handle_event(self.env.vehicle, event)
def display(self):
"""
Display the road and vehicles on a pygame window.
"""
if not self.enabled:
return
'''if self.env.actions is not None:
if self.env.actions:
self.set_agent_action_sequence(self.env.actions)
el'''
del self.vehicle_trajectories[:]
self.vehicle_trajectories.append(self.env.vehicle.projection)
self.sim_surface.move_display_window_to(self.window_position())
RoadGraphics.display(self.env.road, self.sim_surface)
if self.vehicle_trajectories:
for vehicle_trajectory in self.vehicle_trajectories:
if vehicle_trajectory is not None:
VehicleGraphics.display_trajectory(
vehicle_trajectory,
self.sim_surface)
RoadGraphics.display_traffic(self.env.road, self.sim_surface)
if self.agent_display:
self.agent_display(self.agent_surface, self.sim_surface)
if self.SCREEN_WIDTH > self.SCREEN_HEIGHT:
self.screen.blit(self.agent_surface, (0, self.SCREEN_HEIGHT))
else:
self.screen.blit(self.agent_surface, (self.SCREEN_WIDTH, 0))
self.screen.blit(self.sim_surface, (0, 0))
self.clock.tick(self.env.config["SIMULATION_FREQUENCY"]+1)
pygame.display.flip()
if self.SAVE_IMAGES:
pygame.image.save(self.screen, "urban-env_{}.png".format(self.frame))
self.frame += 1
caption = "Urban-AD ( "
#caption += "action = " + str(ACTIONS_DICT[self.env.previous_action])
caption += " v = {:.2f}".format(self.env.vehicle.velocity)
if self.env.vehicle.PRNDL:
caption += " PRNDL = "+self.env.vehicle.PRNDL
if self.env.vehicle.braking is not None:
caption += " brake = {:.2f}".format(self.env.vehicle.braking)
if self.env.vehicle.throttle is not None:
caption += " throttle = {:.2f}".format(self.env.vehicle.throttle)
if self.env.vehicle.control_action:
caption += " accel = {:.2f}".format(self.env.vehicle.control_action['acceleration'])
caption += " steering = {:.2f}".format(self.env.vehicle.control_action['steering'])
caption += " steps = " + str(self.env.steps)
if hasattr( self.env, 'episode_travel'):
caption += ', ep travel = {:.2f}'.format(self.env.episode_travel)
#caption += ', reward = {:.2f}'.format(self.env.reward)
caption += ', ep reward = {:.2f}'.format(self.env.episode_reward)
caption += " )"
pygame.display.set_caption(caption)
def get_image(self):
"""
:return: the rendered image as a rbg array
"""
data = pygame.surfarray.array3d(self.screen)
return np.moveaxis(data, 0, 1)
def window_position(self):
"""
:return: the world position of the center of the displayed window.
"""
if self.env.vehicle:
return self.env.vehicle.position
else:
return | np.array([0, 0]) | numpy.array |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Common functions that are utilized by neural network models
@author: dbasaran
"""
import numpy as np
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau,Callback
import keras as K
import logging
import h5py
import json
import os
import csv
import sys
import pandas as pd
import mir_eval
from sklearn.preprocessing import LabelBinarizer,normalize
import argparse
def parse_input(input_args):
'''
Parsing the input arguments
:param input_args: Input arguments from the console
:return: args: List of parsed arguments
'''
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose",
action="store_true",
help="Print the procedure")
parser.add_argument("--gpu-segment",
action="store",
dest="gpu", type=int,
help="CUDA_VISIBLE_DEVICES setting",
default=0)
parser.add_argument("--patch-size",
action="store",
dest="patch_size", type=int,
help="The width (length) of a CNN patch/image",
default=25)
parser.add_argument("--number-of-patches",
action="store",
dest="number_of_patches", type=int,
help="The number of consecutive CNN patches to be fed into RNN layer",
default=20)
parser.add_argument("--batch-size",
action="store",
dest="batch_size", type=int,
help="The number of samples in the training phase",
default=16)
parser.add_argument("--epochs",
action="store",
dest="epochs", type=int,
help="The number epochs in the training phase",
default=100)
parser.add_argument("--drop-out",
action="store",
dest="drop_out", type=float,
help="The dropout ratio in the network",
default=0.3)
parser.add_argument("--number-of-classes",
action="store",
dest="number_of_classes", type=int,
help="The number of target note classes (Including the non-melody class)",
default=62)
parser.add_argument("--step-notes",
action="store",
dest="step_notes", type=int,
help="The number of F0's between each semitone",
default=5)
parser.add_argument("--sampling-rate",
action="store",
dest="SR", type=int,
help="Sampling rate for the signals",
default=22050)
parser.add_argument("--hop-size",
action="store",
dest="hop_size", type=int,
help="Hopsize for the signals",
default=256)
parser.add_argument("--dataset-number",
action="store",
dest="dataset_number", type=int,
help="The number of the dataset i.e., dataset 1, dataset 2 etc.",
default=1)
parser.add_argument("--RNN-type",
action="store",
dest="RNN", type=str,
help="Type of the RNN LSTM/GRU",
default='GRU')
parser.add_argument("--model-name",
action="store",
dest="model_name", type=str,
help="The name of the model",
default=None)
parser.add_argument("--early-stopping-patience",
action="store",
dest="early_stopping_patience", type=int,
help="The patience value for the EarlyStopping callback",
default=20)
parser.add_argument("--reduce-LR-patience",
action="store",
dest="reduce_LR_patience", type=int,
help="The patience value for the ReduceLROnPlateau callback",
default=10)
parser.add_argument("--feature-size",
action="store",
dest="feature_size", type=int,
help="The feature size of the input (Default for step_size=5, minFreq=55, maxFreq=1760)",
default=301)
parser.add_argument("--augment-data",
action="store_true",
default=False,
help="Use augmentation if this option is assigned to True")
parser.add_argument("--dataset-name",
action="store",
dest="dataset_name", type=str,
help="The name of dataset medleydb/jazzomat",
default='medleydb')
parser.add_argument("--use-part-of-training-set",
action="store_true",
default=False,
help="Use augmentation if this option is assigned to True")
parser.add_argument("--training-amount-percentage",
action="store",
dest="training_amount_percentage", type=float,
help="The amount of the training data that is going to be used for training. Effective only if "
"--use-part-of-training indicator is True.",
default=80.)
parser.add_argument("--use-part-of-training-set-per-epoch",
action="store_true",
default=False,
help="Use augmentation if this option is assigned to True")
parser.add_argument("--training-amount-number-of-samples",
action="store",
dest="training_amount_number_of_samples", type=float,
help="The number of batches in the training data that is going to be used for training in one "
"epoch. Effective only if --use-part-of-training-set-per-epoch indicator is True.",
default=120.)
args = parser.parse_args(input_args)
return args
class Logger(object):
def __init__(self, args):
self.terminal = sys.stdout
self.log = open('{0}/CRNN-model_{1}-dataset-{2}.log'.format(get_path(),
args.model_name, args.dataset_number), "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def read_arguments(input_args):
'''
Reads the arguments from the user and makes the number_of_classes, step_notes and dataset_name parameters compatible
with the chosen dataset.
:param input_args: Input arguments
:return: args: List of parsed arguments
'''
args = parse_input(input_args[1:])
args.model_name = input_args[0].split('_')[-1].split('.')[0].split('del')[-1]
with open('{0}/dataset-{1}_parameters.json'.format(get_dataset_load_path(), args.dataset_number), 'r') as f:
parameters = json.load(f)
args.number_of_classes = parameters['number_of_classes']
args.step_notes = parameters['step_notes']
args.dataset_name = parameters['dataset_name']
return args
def print_arguments(args):
print('\n\n******* Experiment *******')
print('Model {0}: '.format(args.model_name))
print('Parameters:')
print(' number_of_classes: {0}'.format(args.number_of_classes))
print(' step_notes: {0}'.format(args.step_notes))
print(' patch_size: {0}'.format(args.patch_size))
print(' number_of_patches: {0}'.format(args.number_of_patches))
print(' feature_size: {0}'.format(args.feature_size))
print(' segment length: {0}'.format(np.int(args.patch_size*args.number_of_patches)))
print(' augment: {0}'.format(args.augment_data))
print(' batch_size:{0}'.format(args.batch_size))
print(' number of epochs: {0}'.format(args.epochs))
print(' dropout value: {0}'.format(args.drop_out))
print(' dataset_number: {0}'.format(args.dataset_number))
print(' dataset_name: {0}'.format(args.dataset_name))
print(' use_part_of_training_set: {0}'.format(args.use_part_of_training_set))
if args.use_part_of_training_set:
print(' training_amount_percentage: {0:.1f}'.format(args.training_amount_percentage))
#########################################################
## GET PATH FUNCTIONS: Functions to return paths
def get_path():
'''
Gets the path of the main folder
:return: path (string)
'''
path = os.getcwd()
path = path[:path.rfind('/')]
return path
def get_path_to_quantized_annotations():
quantized_annotations_path = '{0}/quantized_annotations'.format(get_path())
return quantized_annotations_path
def get_path_to_pitch_estimations():
# Wrapper function
results_path = get_model_output_save_path()
return results_path
def get_model_output_save_path():
model_output_save_path = '{0}/medleydb_melody_results/C-RNN_results'.format(get_path())
if not os.path.exists(model_output_save_path):
os.makedirs(model_output_save_path)
return model_output_save_path
def get_dataset_splits_save_path():
dataset_splits_save_path = '{0}/medleydb_dataset_splits'.format(get_path())
if not os.path.exists(dataset_splits_save_path):
os.makedirs(dataset_splits_save_path)
return dataset_splits_save_path
def get_hf0_path():
path = '{0}/medleydb_features/HF0s_STFT'.format(get_path())
return path
def get_dataset_test_load_path():
dataset_test_load_path = get_hf0_path()
return dataset_test_load_path
def get_dataset_load_path():
dataset_load_path = get_dataset_splits_save_path()
return dataset_load_path
def get_trained_model_save_path(dataset_name):
trained_model_save_path = '{0}/trained_models'.format(get_path(dataset_name=dataset_name))
if not os.path.exists(trained_model_save_path):
os.makedirs(trained_model_save_path)
return trained_model_save_path
def get_model_output_save_path(dataset_name, args):
model_output_save_path = '{0}/{1}_melody_results/C-RNN_results/model-{2}_datasetNumber-{3}_batchSize-{4}_patchSize-{5}_numberOfPatches-{6}'.format(get_path(dataset_name=dataset_name),
dataset_name,
args.model_name,
args.dataset_number,
args.batch_size,
args.patch_size,
args.number_of_patches)
if not os.path.exists(model_output_save_path):
os.makedirs(model_output_save_path)
return model_output_save_path
#######################################################
def get_labels(track_name):
'''
Get labels for the track
:param track_name: String - Name of the track in the MedleyDB dataset
:return: labels: Numpy array - quantized labels of the track with -1 for non-melody and all other target classes starting from 0
'''
quantized_annotation_path = get_path_to_quantized_annotations() \
+ '/{0}_quantized_labels_Fs-22050_hop-256.h5'.format(track_name)
labels_file = h5py.File(quantized_annotation_path , 'r')
labels = np.array(labels_file['labels'])
return labels
def get_pitch_estimation_from_csv(track_name):
'''
Gets the pitch estimation of a track from the csv file
:param track_name: String - Name of the track in the MedleyDB dataset
:return: pitch_estimation: Numpy array - Estimations for each frame
'''
estimation_path = get_path_to_pitch_estimations() + '/{0}.csv'.format(track_name)
data = pd.read_csv(estimation_path, delimiter=',', header=None)
pitch_estimation = np.array(data)[:, 1]
return pitch_estimation
def train_model(model, args):
'''
The function that trains a certain neural network model with the given arguments.
:param model: Keras.Model - Constructed model
:param args: List - Input arguments
:return:
'''
x_train, y_train, x_validation, y_validation = load_dataset_TD(dataset_number=args.dataset_number, args=args)
dataset_train_size = x_train.shape[0] # First dimension gives the number of samples
dataset_validation_size = x_validation.shape[0]
# Set the optimizers
opt_ADAM = Adam(clipnorm=1., clipvalue=0.5)
opt_SGD = SGD(lr=0.0005, decay=1e-4, momentum=0.9, nesterov=True)
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer=opt_ADAM, metrics=['accuracy'])
# Use either a part of training set per epoch or all the set per epoch
if args.use_part_of_training_set_per_epoch:
number_of_batches_train = np.int(np.floor(args.training_amount_number_of_samples/args.batch_size))
else:
number_of_batches_train = np.max((np.floor((dataset_train_size) / args.batch_size), 1))
number_of_batches_validation = np.max(( | np.floor(dataset_validation_size / args.batch_size) | numpy.floor |
"""Differential privacy computing of count, sum, mean, variance."""
import numpy as np
import pipeline_dp
# TODO: import only modules https://google.github.io/styleguide/pyguide.html#22-imports
from pipeline_dp.aggregate_params import NoiseKind
from dataclasses import dataclass
@dataclass
class MeanVarParams:
"""The parameters used for computing the dp sum, count, mean, variance."""
eps: float
delta: float
low: float
high: float
max_partitions_contributed: int
max_contributions_per_partition: int
noise_kind: NoiseKind # Laplace or Gaussian
def l0_sensitivity(self):
""""Returns the L0 sensitivity of the parameters."""
return self.max_partitions_contributed
def squares_interval(self):
"""Returns the bounds of the interval [low^2, high^2]."""
if self.low < 0 and self.high > 0:
return 0, max(self.low**2, self.high**2)
return self.low**2, self.high**2
def compute_middle(low: float, high: float):
""""Returns the middle point of the interval [low, high]."""
return low + (high - low) / 2
def compute_l1_sensitivity(l0_sensitivity: float, linf_sensitivity: float):
"""Calculates the L1 sensitivity based on the L0 and Linf sensitivities.
Args:
l0_sensitivity: The L0 sensitivity.
linf_sensitivity: The Linf sensitivity.
Returns:
The L1 sensitivity.
"""
return l0_sensitivity * linf_sensitivity
def compute_l2_sensitivity(l0_sensitivity: float, linf_sensitivity: float):
"""Calculates the L2 sensitivity based on the L0 and Linf sensitivities.
Args:
l0_sensitivity: The L0 sensitivity.
linf_sensitivity: The Linf sensitivity.
Returns:
The L2 sensitivity.
"""
return np.sqrt(l0_sensitivity) * linf_sensitivity
def compute_sigma(eps: float, delta: float, l2_sensitivity: float):
"""Returns the optimal value of sigma for the Gaussian mechanism.
Args:
eps: The epsilon value.
delta: The delta value.
l2_sensitivity: The L2 sensitivity.
"""
# TODO: use the optimal sigma.
# Theorem 3.22: https://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
return np.sqrt(2 * np.log(1.25 / delta)) * l2_sensitivity / eps
def apply_laplace_mechanism(value: float, eps: float, l1_sensitivity: float):
"""Applies the Laplace mechanism to the value.
Args:
value: The initial value.
eps: The epsilon value.
l1_sensitivity: The L1 sensitivity.
Returns:
The value resulted after adding the noise.
"""
# TODO: use the secure noise instead of np.random
return value + np.random.laplace(0, l1_sensitivity / eps)
def apply_gaussian_mechanism(value: float, eps: float, delta: float,
l2_sensitivity: float):
"""Applies the Gaussian mechanism to the value.
Args:
value: The initial value.
eps: The epsilon value.
delta: The delta value.
l2_sensitivity: The L2 sensitivity.
Returns:
The value resulted after adding the noise.
"""
sigma = compute_sigma(eps, delta, l2_sensitivity)
# TODO: use the secure noise instead of np.random
return value + np.random.normal(0, sigma)
def _add_random_noise(
value: float,
eps: float,
delta: float,
l0_sensitivity: float,
linf_sensitivity: float,
noise_kind: NoiseKind,
):
"""Adds random noise according to the parameters.
Args:
value: The initial value.
eps: The epsilon value.
delta: The delta value.
l0_sensitivity: The L0 sensitivity.
linf_sensitivity: The Linf sensitivity.
noise_kind: The kind of noise used.
Returns:
The value resulted after adding the random noise.
"""
if noise_kind == NoiseKind.LAPLACE:
l1_sensitivity = compute_l1_sensitivity(l0_sensitivity,
linf_sensitivity)
return apply_laplace_mechanism(value, eps, l1_sensitivity)
if noise_kind == NoiseKind.GAUSSIAN:
l2_sensitivity = compute_l2_sensitivity(l0_sensitivity,
linf_sensitivity)
return apply_gaussian_mechanism(value, eps, delta, l2_sensitivity)
raise ValueError("Noise kind must be either Laplace or Gaussian.")
@dataclass
class AdditiveVectorNoiseParams:
eps_per_coordinate: float
delta_per_coordinate: float
max_norm: float
l0_sensitivity: float
linf_sensitivity: float
norm_kind: pipeline_dp.NormKind
noise_kind: NoiseKind
def _clip_vector(vec: np.ndarray, max_norm: float,
norm_kind: pipeline_dp.NormKind):
norm_kind = norm_kind.value # type: str
if norm_kind == "linf":
return np.clip(vec, -max_norm, max_norm)
if norm_kind in {"l1", "l2"}:
norm_kind = int(norm_kind[-1])
vec_norm = | np.linalg.norm(vec, ord=norm_kind) | numpy.linalg.norm |
import sys
import gym
import numpy as np
from gym import spaces
from .edit_photo import PhotoEditor, edit_demo
import cv2
import random
import logging
import os
DATASET_DIR = "./fivek_dataset/"
TARGET_DIR = "expertC/"
ORIGINAL_DIR = "original/"
class PhotoEnhancementEnv(gym.Env):
action_space = None
observation_space = None
reward_range = None
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array'],
}
def __init__(self,
batch_size,
logger=None,
imsize=512,
max_episode_steps=1):
super().__init__()
self.tags = {'max_episode_steps': max_episode_steps}
self.logger = logger or logging.getLogger(__name__)
self.imsize = imsize
self.batch_size = batch_size
try:
self.file_names
except:
self.file_names = []
with open(os.path.join(DATASET_DIR, "trainSource.txt")) as f:
s = f.read()
self.file_names.extend(s.split("\n")[:-1])
self.file_names = \
list(map(lambda x: os.path.join(DATASET_DIR, ORIGINAL_DIR, x), self.file_names))
self.photo_editor = PhotoEditor()
self.num_parameters = self.photo_editor.num_parameters
# action space
self.action_space = spaces.Dict({
'parameters':
spaces.Box(low=-1.0, high=1.0,
shape=(self.batch_size, self.num_parameters), dtype=np.float32),
})
# observation space
self.observation_space = spaces.Dict({
'image':
spaces.Box(low=0,
high=255,
shape=(self.batch_size, self.imsize, self.imsize, 3),
dtype=np.uint8)
})
# reset canvas and set current position of the pen
self.reset()
def reset(self):
self.logger.debug('reset the drawn picture')
self.original_images = []
self.editted_images = []
for i in range(self.batch_size):
original_image = cv2.imread(random.choice(self.file_names))
original_image = cv2.resize(original_image, (64, 64)) / 255.0
if random.randint(0, 1) == 0:
original_image = original_image[:, ::-1, :]
editted_image = original_image.copy()
self.original_images.append(original_image)
self.editted_images.append(editted_image)
ob = {
'images': self._get_rgb_array()
}
return ob
def step(self, action):
parameters_space = self.action_space.spaces['parameters']
clipped_action = np.clip(action['parameters'] / 1.0, parameters_space.low, parameters_space.high)
for i in range(self.batch_size):
self.editted_images[i] = self.photo_editor(self.original_images[i].copy(), clipped_action[i])
reward = 0.0
done = False
ob = {
'images': self._get_rgb_array()
}
return ob, reward, done, {}
def render(self, mode='human'):
""" render the current drawn picture image for human """
if mode == 'human':
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(self._get_rgb_array())
elif mode == 'rgb_array':
return self._get_rgb_array()
else:
raise NotImplementedError
def _get_rgb_array(self, cut=True):
""" render the current canvas as a rgb array
"""
rgb_array = np.zeros((self.batch_size, self.imsize, self.imsize, 3), dtype=np.uint8)
for i in range(self.batch_size):
shape = self.original_images[i].shape
rgb_array[i, :shape[0], :shape[1], :] = \
(self.editted_images[i][:, :, ::-1] * 255).astype(np.uint8)
return rgb_array
def calc_mse(self):
return (( | np.array(self.original_images) | numpy.array |
import numpy as np
import HyperUtils as hu
check_eps = np.array([0.3,0.05])
check_sig = np.array([8.0,0.5])
check_alp = np.array([[0.2,0.1], [0.18,0.1], [0.16,0.1], [0.14,0.1], [0.14,0.1]])
check_chi = np.array([[0.9,0.1], [1.0,0.1], [1.1,0.1], [1.2,0.1], [1.2,0.1]])
file = "hnepmk_ser_cbh"
name = "nD Linear Elastic - Plastic with Multisurface Kinematic Hardening - Series Bounding HARM"
mode = 1
const = [2, 100.0, 4, 0.154936, 100.0, 0.436529, 33.33333, 1.653964, 20.0, 1.169595, 10.0, 0.1]
mu = 0.1
def deriv():
global E, k, recip_k, H, R, name_const, n_int, n_inp, n_y, n_const, ndim
ndim = int(const[0])
E = float(const[1])
n_int = int(const[2]) + 1
n_inp = int(const[2])
n_y = 1
n_const = 3 + 2*n_int + 1
k = np.array(const[3:3 + 2*n_inp:2])
H = np.array(const[4:4 + 2*n_inp:2])
recip_k = 1.0 / k
R = float(const[3 + 2*n_inp])
name_const = ["ndim", "E", "N"]
for i in range(n_inp):
name_const.append("k"+str(i+1))
name_const.append("H"+str(i+1))
name_const.append("R")
deriv()
def ep(alp): return np.einsum("ni->i",alp)
def f(eps,alp): return (E*sum((eps-ep(alp))**2)/2.0 +
np.einsum("n,ni,ni->",H,alp[:n_inp],alp[:n_inp])/2.0)
def dfde(eps,alp): return E*(eps-ep(alp))
def dfda(eps,alp):
temp = | np.zeros([n_int,ndim]) | numpy.zeros |
import numpy as np
import warnings
import os
import pickle as pkl
from sklearn.model_selection import train_test_split
from sklearn.metrics.scorer import _BaseScorer
from solnml.components.ensemble.base_ensemble import BaseEnsembleModel
from solnml.components.utils.constants import CLS_TASKS
from solnml.components.evaluators.base_evaluator import fetch_predict_estimator
class Blending(BaseEnsembleModel):
def __init__(self, stats,
ensemble_size: int,
task_type: int,
metric: _BaseScorer,
output_dir=None,
meta_learner='lightgbm'):
super().__init__(stats=stats,
ensemble_method='blending',
ensemble_size=ensemble_size,
task_type=task_type,
metric=metric,
output_dir=output_dir)
try:
from lightgbm import LGBMClassifier
except:
warnings.warn("Lightgbm is not imported! Blending will use linear model instead!")
meta_learner = 'linear'
self.meta_method = meta_learner
# We use Xgboost as default meta-learner
if self.task_type in CLS_TASKS:
if meta_learner == 'linear':
from sklearn.linear_model.logistic import LogisticRegression
self.meta_learner = LogisticRegression(max_iter=1000)
elif meta_learner == 'gb':
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
self.meta_learner = GradientBoostingClassifier(learning_rate=0.05, subsample=0.7, max_depth=4,
n_estimators=250)
elif meta_learner == 'lightgbm':
from lightgbm import LGBMClassifier
self.meta_learner = LGBMClassifier(max_depth=4, learning_rate=0.05, n_estimators=150)
else:
if meta_learner == 'linear':
from sklearn.linear_model import LinearRegression
self.meta_learner = LinearRegression()
elif meta_learner == 'lightgbm':
from lightgbm import LGBMRegressor
self.meta_learner = LGBMRegressor(max_depth=4, learning_rate=0.05, n_estimators=70)
def fit(self, data):
# Split training data for phase 1 and phase 2
test_size = 0.2
# Train basic models using a part of training data
model_cnt = 0
suc_cnt = 0
feature_p2 = None
for algo_id in self.stats["include_algorithms"]:
model_to_eval = self.stats[algo_id]['model_to_eval']
for idx, (node, config) in enumerate(model_to_eval):
X, y = node.data
if self.task_type in CLS_TASKS:
x_p1, x_p2, y_p1, y_p2 = train_test_split(X, y, test_size=test_size,
stratify=data.data[1], random_state=self.seed)
else:
x_p1, x_p2, y_p1, y_p2 = train_test_split(X, y, test_size=test_size,
random_state=self.seed)
if self.base_model_mask[model_cnt] == 1:
estimator = fetch_predict_estimator(self.task_type, config, x_p1, y_p1,
weight_balance=node.enable_balance,
data_balance=node.data_balance
)
with open(os.path.join(self.output_dir, '%s-blending-model%d' % (self.timestamp, model_cnt)),
'wb') as f:
pkl.dump(estimator, f)
if self.task_type in CLS_TASKS:
pred = estimator.predict_proba(x_p2)
n_dim = np.array(pred).shape[1]
if n_dim == 2:
# Binary classificaion
n_dim = 1
# Initialize training matrix for phase 2
if feature_p2 is None:
num_samples = len(x_p2)
feature_p2 = | np.zeros((num_samples, self.ensemble_size * n_dim)) | numpy.zeros |
# ------------------------------------------------------------------ #
# ╦═╗╔═╗╔╦╗╔═╗╔═╗╔═╗
# ╠╦╝║ ║║║║╚═╗║ ║║
# ╩╚═╚═╝╩ ╩╚═╝╚═╝╚═╝
# Reduced Order Modelling, Simulation, Optimization of Coupled Systems
# 2017-2021
#
# Authors :
# <NAME>, <NAME>, <NAME>
#
# Disclaimer :
# In downloading this SOFTWARE you are deemed to have read and agreed
# to the following terms: This SOFTWARE has been designed with an
# exclusive focus on civil applications. It is not to be used for any
# illegal, deceptive, misleading or unethical purpose or in any
# military applications. This includes ANY APPLICATION WHERE THE USE
# OF THE SOFTWARE MAY RESULT IN DEATH, PERSONAL INJURY OR SEVERE
# PHYSICAL OR ENVIRONMENTAL DAMAGE. Any redistribution of the software
# must retain this disclaimer. BY INSTALLING, COPYING, OR OTHERWISE
# USING THE SOFTWARE, YOU AGREE TO THE TERMS ABOVE. IF YOU DO NOT
# AGREE TO THESE TERMS, DO NOT INSTALL OR USE THE SOFTWARE.
#
# Acknowledgements:
# The ROMSOC project has received funding from the European Union’s
# Horizon 2020 research and innovation programme under the Marie
# Skłodowska-Curie Grant Agreement No. 765374.
# ------------------------------------------------------------------- #
# ------------------------------------------------------------------- #
# Planewave Scattering by a rigid Sphere
# using the classical quadratic PML absorption function
#
# NOTE:
# It can also used for approximating the strength of (frequency-specific)
# PML absorption coefficients, since exact solution is known and errors
# can be computed. Although, the monopole test case is suggested for it.
# ------------------------------------------------------------------- #
import dolfin as d
from complex import *
import os.path as path
import numpy as np
#d.set_log_level(d.LogLevel.DEBUG)
# -------------------------------------------------------------------------------------------------
# PARAMETERS
print('[*] Reading Parameters')
# PHYSICAL PARAMS
# Acoustic Fluid Properties
c = 340. # sound speed
rho = 1.2
# MESH INPUT
meshfile = '../../00_meshes/sphere.xml.gz'
# MODEL PARAMS
# # Incident plane wave params
inc_mag = 1. # Incident plane wave amplitude
inc_dir = np.array([1., 0., 0.]) # Incident plane wave direction
# # Geometry parameters
radius = 0.05
Lx = Ly = Lz = Lpml = 0.2
# PROBLEM PARAMETERS
freqs = np.array([800, 1200]) # np.arange(50, 401, 50)
nfreqs = freqs.size
omegas = 2 * np.pi * freqs
# PML calibration params
sigma0s = np.array([1.32e4, 1.05e5])
# IO PARAMS
# Directory where results are written. End with '/'
results_path = path.abspath('results/')
# -------------------------------------------------------------------------------------------------
# PROBLEM SETUP
print('[*] Setting up objects acc to Parameters')
# MESH
mesh = d.Mesh(meshfile)
# SUB-DOMAINS
# # Extract Sub-domain markers
subdomains = d.MeshFunction("size_t", mesh, mesh.topology().dim())
# # Sub-domain specifications
tol = 1e-10
pml_x = d.CompiledSubDomain("fabs(x[0])>Lx-tol && fabs(x[1])<Ly+tol && fabs(x[2])<Lz+tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_y = d.CompiledSubDomain("fabs(x[0])<Lx+tol && fabs(x[1])>Ly-tol && fabs(x[2])<Lz+tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_z = d.CompiledSubDomain("fabs(x[0])<Lx+tol && fabs(x[1])<Ly+tol && fabs(x[2])>Lz-tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_xy = d.CompiledSubDomain("fabs(x[0])>Lx-tol && fabs(x[1])>Ly-tol && fabs(x[2])<Lz+tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_yz = d.CompiledSubDomain("fabs(x[0])<Lx+tol && fabs(x[1])>Ly-tol && fabs(x[2])>Lz-tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_zx = d.CompiledSubDomain("fabs(x[0])>Lx-tol && fabs(x[1])<Ly+tol && fabs(x[2])>Lz-tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
pml_xyz = d.CompiledSubDomain("fabs(x[0])>Lx-tol && fabs(x[1])>Ly-tol && fabs(x[2])>Lz-tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
# # Mark Sub-domains
subdomains.set_all(0)
pml_x.mark(subdomains, 1)
pml_y.mark(subdomains, 2)
pml_z.mark(subdomains, 3)
pml_xy.mark(subdomains, 4)
pml_yz.mark(subdomains, 5)
pml_zx.mark(subdomains, 6)
pml_xyz.mark(subdomains, 7)
# # Obtain volume integral measures for each sub-domain for volume integrals
dx = d.Measure('dx', domain=mesh, subdomain_data=subdomains)
# BOUNDARIES
# # Get all face markers
boundary_markers = d.MeshFunction("size_t", mesh, mesh.topology().dim()-1)
# # Specify expressions for boundaries
pml_bnd = d.CompiledSubDomain("on_boundary && (fabs(x[0])>Lx+Lpml-tol || fabs(x[1])>Ly+Lpml-tol || fabs(x[2])>Lz+Lpml-tol)",
Lx=Lx, Ly=Ly, Lz=Lz, Lpml=Lpml, tol=tol)
object_bnd = d.CompiledSubDomain("on_boundary && fabs(x[0])<Lx+tol && fabs(x[1])<Ly+tol && fabs(x[2])<Lz+tol",
Lx=Lx, Ly=Ly, Lz=Lz, tol=tol)
# # Enumerate boundaries
boundary_markers.set_all(0)
object_bnd.mark(boundary_markers, 2)
pml_bnd.mark(boundary_markers, 1)
# # Obtain measures for boundary for boundary integrals
ds = d.Measure('ds', domain=mesh, subdomain_data=boundary_markers)
# NORMALS
# Compute normal vectors
n_vec = d.FacetNormal(mesh)
# FUNCTION SPACE SPECIFICATIONS
# # Define function space (Lagrange 1st polynomials)
P1 = d.FiniteElement("Lagrange", mesh.ufl_cell(), 1)
Q = d.FunctionSpace(mesh, P1)
# # Define 2D-Real function space to substitute for Complex space
V = d.FunctionSpace(mesh, P1 * P1) # NOTE: V.sub(0) and V.sub(1) are the subspaces
# INIT OUTPUT FILES
xdmf_file = d.XDMFFile(path.join(results_path, 'results.xdmf'))
xdmf_file.parameters['rewrite_function_mesh'] = False
xdmf_file.parameters['functions_share_mesh'] = True
# -------------------------------------------------------------------------------------------------
# DIAGNOSTIC OUTPUTS
# Write Problem Configuration log
finfo = open(path.join(results_path, 'INFO.log'), 'w')
finfo.write('MESH INFO:\n')
finfo.write('Mesh Name = '+meshfile)
finfo.write('\nNumber of vertices in mesh = ' + str(mesh.coordinates().shape[0])+'\n')
finfo.write('h_min = ' + str(mesh.hmin()) + '\n')
finfo.write('h_max = ' + str(mesh.hmax()) + '\n')
finfo.write('\nPHYSICAL PARAMETERS : \n')
finfo.write('Lx = '+str(Lx)+'\nLy = '+str(Ly)+'\nLz = '+str(Lz)+'\nLpml = '+str(Lpml)+'\n')
finfo.write('\nFLUID PARAMETERS:\n')
finfo.write('c = '+str(c)+'\nrho = '+str(rho)+'\n')
finfo.write('\nPROBLEM CONFIG:\n')
finfo.write('Freqs = '+str(freqs)+'\nSigma0 = '+str(sigma0s)+'\n')
finfo.close()
# Write to file boundary and domain markers
fboundaries = d.File(path.join(results_path, 'BoundaryMarkers.pvd'))
fboundaries << boundary_markers
fdomains = d.File(path.join(results_path, 'DomainMarkers.pvd'))
fdomains << subdomains
# -------------------------------------------------------------------------------------------------
# SOLVER
print('[*] Setting up solver objects and expressions')
# Zero Expression (needed?)
zero = d.Constant("0.")
# Declare Variational form expressions
# # PML Absorption function
sx = d.Expression('fabs(x[0]) > Lx ? s0*pow(fabs(x[0])-Lx,2)/(Lpml*Lpml*w) : 0.',
Lx=Lx,
Lpml=Lpml,
w=0., # Will be overwritten later
s0=0., # Will be overwritten later
degree=2)
sy = d.Expression('fabs(x[1]) > Ly ? s0*pow(fabs(x[1])-Ly,2)/(Lpml*Lpml*w) : 0.',
Ly=Ly,
Lpml=Lpml,
w=0., # Will be overwritten later
s0=0., # Will be overwritten later
degree=2)
sz = d.Expression('fabs(x[2]) > Lz ? s0*pow(fabs(x[2])-Lz,2)/(Lpml*Lpml*w) : 0.',
Lz=Lz,
Lpml=Lpml,
w=0., # Will be overwritten later
s0=0., # Will be overwritten later
degree=2)
# Neumann boundary conditions
# # At Object Boundary
cpp_g_re = ("A*k*k0_*sin(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))",
"A*k*k1_*sin(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))",
"A*k*k2_*sin(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))")
g_re = d.Expression(cpp_code=cpp_g_re,
A=inc_mag,
k=0., # Will be overwritten later
k0_=inc_dir[0],
k1_=inc_dir[1],
k2_=inc_dir[2],
degree=4)
cpp_g_im = ("-A*k*k0_*cos(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))",
"-A*k*k1_*cos(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))",
"-A*k*k2_*cos(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))")
g_im = d.Expression(cpp_code=cpp_g_im,
A=inc_mag,
k=0., # Will be overwritten later
k0_=inc_dir[0],
k1_=inc_dir[1],
k2_=inc_dir[2],
degree=4)
# Field due to Plane Wave : $ p = A * exp( |k| \hat{k} \cdot \vec{r}) $
u_pw_re = d.Expression("A*cos(k*( k0_*x[0] + k1_*x[1] + k2_*x[2]))",
A=inc_mag,
k=0., # Will be overwritten later
k0_=inc_dir[0],
k1_=inc_dir[1],
k2_=inc_dir[2],
degree=4)
u_pw_im = d.Expression("A*sin(k*(k0_*x[0] + k1_*x[1] + k2_*x[2]))",
A=inc_mag,
k=0., # Will be overwritten later
k0_=inc_dir[0],
k1_=inc_dir[1],
k2_=inc_dir[2],
degree=4)
# Exact Solution expressions
cpp_uex_re = open("exact/uex_re.cpp").read()
cpp_uex_im = open("exact/uex_im.cpp").read()
# # Real part
uex_re_exp = d.CompiledExpression(d.compile_cpp_code(cpp_uex_re).ScatteringExact_Re(), degree=4)
uex_re_exp.p0 = inc_mag
uex_re_exp.a = radius
# # Imaginary Part
uex_im_exp = d.CompiledExpression(d.compile_cpp_code(cpp_uex_im).ScatteringExact_Im(), degree=4)
uex_im_exp.p0 = inc_mag
uex_im_exp.a = radius
# Total Field expressions
u_tot_exp = d.Expression('f1+f2',
f1=d.Function(Q), # Will be overwritten later
f2=d.Function(Q), # Will be overwritten later
degree=4)
# Error expressions
error_exp = d.Expression('f1-f2',
f1=d.Function(Q), # Will be overwritten later
f2=d.Function(Q), # Will be overwritten later
degree=4)
# Store L2 Errors
errors = | np.zeros(nfreqs) | numpy.zeros |
import time
import numpy as np
import pandas as pd
import streamlit as st
from keras.layers import Bidirectional
from keras.layers import ConvLSTM2D
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.models import Sequential
from numpy import array
from tensorflow.keras.layers import Flatten
def main():
"""Sentiment Analysis Emoji App """
st.title("Jamaica Stock Exchange LSTM prediction")
st.subheader('About LSTM:')
st.write('Long Short-Term Memory networks, or LSTMs for short, can be applied to time series forecasting. '
'There are many types of LSTM models that can be used for each specific type of time series '
'forecasting problem. In this scenario we are using Univariate LSTM Models based on linear data '
'Though this method is not ideal for investment purposes it is interesting to see it put into practice')
st.markdown(
"**Acknowledgments:** I'd like to thank <NAME> for his contribution of algorithms and explanation towards this "
"project. His work relating to LSTM sequence predictions can be found at: "
"https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/ ")
LSTM_T = ["Vanilla LSTM", "Bidirectional LSTM", "CNN LSTM", "Conv LSTM", "Multilayer Perceptron Regression"]
choice = st.sidebar.selectbox("vanilla LSTM", LSTM_T)
# if choice == 'Multilayer Perceptron Regression':
if choice == 'Vanilla LSTM':
url = 'https://www.jamstockex.com/market-data/download-data/index-history/main-market/JSE-Index/2010-08-08/2020-08-10'
df = pd.read_html(url)
jse = df[0]
# JSE PLot
# plt.figure(figsize=(18, 18))
# plt.plot(jse['Value'])
# New DF
jse_value = jse['Value']
raw_seq = jse_value
st.subheader("Jamaica stock Exchange graph:")
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence) - 1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# choose a number of time steps
n_steps = 3
# split into samples
X, y = split_sequence(raw_seq, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=200, verbose=0)
# demonstrate prediction
x_input = array(jse_value.tail(3).tolist())
x_input = x_input.reshape((1, n_steps, n_features))
yhat = model.predict(x_input, verbose=0)
# Charts
progress_bar = st.sidebar.progress(0)
status_text = st.sidebar.empty()
last_rows = np.random.randn(1, 1)
chart = st.line_chart(jse_value, use_container_width=True)
for i in range(1, 101):
new_rows = last_rows[-1, :] + np.random.randn(5, 1).cumsum(axis=0)
status_text.text("%i%% Complete" % i)
# chart.add_rows(new_rows)
progress_bar.progress(i)
# last_rows = new_rows
time.sleep(0.05)
progress_bar.empty()
# rerun.
st.button("Re-run")
st.subheader('About Vanilla LSTM:')
st.write(' Vanilla LSTM is an LSTM model that has a single hidden layer of LSTM units, and an output layer '
'used to make a prediction. ')
st.write('The LSTM model has predicted that the next value in the sequence will be ' + str(yhat[0][0]))
last_price = jse_value.iloc[-1]
st.write('The current jamaica stock exchange index value is ' + str(last_price))
difference = yhat[0][0] - jse_value.iloc[-1]
st.write('This reflects a difference of ' + str(round(difference, 2)))
if choice == 'Bidirectional LSTM':
st.subheader("Bidirectional LSTM")
url = 'https://www.jamstockex.com/market-data/download-data/index-history/main-market/JSE-Index/2010-08-08/2021-01-01'
df = pd.read_html(url)
jse = df[0]
# New DF
jse_value = jse['Value']
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence) - 1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence) - 1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
raw_seq = jse_value
# choose a number of time steps
n_steps = 3
# split into samples
X, y = split_sequence(raw_seq, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
# define model
model = Sequential()
model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=200, verbose=0)
# demonstrate prediction
x_input = array(jse_value.tail(3).tolist())
x_input = x_input.reshape((1, n_steps, n_features))
yhat = model.predict(x_input, verbose=0)
progress_bar = st.sidebar.progress(0)
status_text = st.sidebar.empty()
last_rows = np.random.randn(1, 1)
chart = st.line_chart(jse_value, use_container_width=True)
for i in range(1, 101):
new_rows = last_rows[-1, :] + | np.random.randn(5, 1) | numpy.random.randn |
"""Tests for Argo checks."""
import numpy as np
from numpy import ma
import pytest
import argortqcpy.profile
from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck
def test_check_is_required(fake_check):
"""Check that the base check is required."""
assert fake_check.is_required()
def test_output_ensure_output_for_property(profile_from_dataset):
"""Test ensuring a property is given an output array."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property(profile_from_dataset):
"""Test setting a flag for a given property."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.GOOD)
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property_where(profile_from_dataset):
"""Test setting a flag for a given property for a limited set of indices."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.PROBABLY_GOOD, where=slice(None, 2))
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[:2] == ArgoQcFlag.PROBABLY_GOOD.value)
assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property_where_array(profile_from_dataset):
"""Test setting a flag for a given property for indices limited by array."""
output = CheckOutput(profile=profile_from_dataset)
where = np.full_like(profile_from_dataset.get_property_data("PRES"), False, dtype=bool)
where[0] = True
where[-1] = True
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.PROBABLY_GOOD, where=where)
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[0] == ArgoQcFlag.PROBABLY_GOOD.value)
assert np.all(flags[1:-1] == ArgoQcFlag.GOOD.value)
assert np.all(flags[-1] == ArgoQcFlag.PROBABLY_GOOD.value)
@pytest.mark.parametrize(
"lower,higher",
(
(ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.BAD),
(ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.PROBABLY_BAD),
(ArgoQcFlag.PROBABLY_BAD, ArgoQcFlag.BAD),
),
)
def test_output_set_output_flag_for_property_with_precendence(profile_from_dataset, lower, higher):
"""Test setting a flag for a given property for a limited set of indices."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", lower, where=slice(None, 2))
output.set_output_flag_for_property("PRES", higher, where=slice(None, 1))
output.set_output_flag_for_property("PRES", lower, where=slice(None, 2))
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[:1] == higher.value)
assert np.all(flags[1:2] == lower.value)
assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)
@pytest.mark.parametrize(
"pressure_values",
(
range(10),
[1, 3, 5, 10, 100],
[0, 2, 2.5, 6.85],
),
)
def test_pressure_increasing_check_all_pass(mocker, pressure_values):
"""Test that the pressure increasing test succeeds."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))
pic = PressureIncreasingCheck(profile, None)
output = pic.run()
assert np.all(output.get_output_flags_for_property("PRES").data == ArgoQcFlag.GOOD.value)
@pytest.mark.parametrize(
"pressure_values,expected",
(
(
[0, 2, 1, 5],
[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.value],
),
),
)
def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):
"""Test that the pressure increasing works when some values are bad."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value= | ma.masked_array(pressure_values) | numpy.ma.masked_array |
#!/usr/bin/env python3
"""utils tests"""
import numpy as np
import tensorflow as tf
from lm_human_preferences.utils import core as utils
def test_exact_div():
assert utils.exact_div(12, 4) == 3
assert utils.exact_div(12, 3) == 4
try:
utils.exact_div(7, 3)
assert False
except ValueError:
pass
def test_ceil_div():
for b in range(1, 10 + 1):
for a in range(-10, 10 + 1):
assert utils.ceil_div(a, b) == int(np.ceil(a / b))
def test_expand_tile():
np.random.seed(7)
size = 11
with tf.Session():
for shape in (), (7,), (3, 5):
data = np.asarray(np.random.randn(*shape), dtype=np.float32)
x = tf.constant(data)
for axis in range(-len(shape) - 1, len(shape) + 1):
y = utils.expand_tile(x, size, axis=axis).eval()
assert np.all(np.expand_dims(data, axis=axis) == y)
def test_sample_buffer():
capacity = 100
batch = 17
lots = 100
with tf.Graph().as_default(), tf.Session() as sess:
buffer = utils.SampleBuffer(capacity=capacity, schemas=dict(x=utils.Schema(tf.int32, ())))
tf.variables_initializer(tf.global_variables() + tf.local_variables()).run()
i_p = tf.placeholder(dtype=tf.int32, shape=())
add = buffer.add(x=batch * i_p + tf.range(batch))
sample = buffer.sample(lots, seed=7)['x']
all_data_1 = buffer.data()
all_data_2 = buffer.read(tf.range(buffer.size()))
for i in range(20):
add.run(feed_dict={i_p: i})
samples = sample.eval()
hi = batch * (i + 1)
lo = max(0, hi - capacity)
assert lo <= samples.min() <= lo + 3
assert hi - 5 <= samples.max() < hi
np.testing.assert_equal(sess.run(all_data_1), sess.run(all_data_2))
def test_where():
with tf.Session():
assert np.all(utils.where([False, True], 7, 8).eval() == [8, 7])
assert np.all(utils.where([False, True, True], [1, 2, 3], 8).eval() == [8, 2, 3])
assert np.all(utils.where([False, False, True], 8, [1, 2, 3]).eval() == [1, 2, 8])
assert np.all(utils.where([False, True], [[1, 2], [3, 4]], -1).eval() == [[-1, -1], [3, 4]])
assert np.all(utils.where([False, True], -1, [[1, 2], [3, 4]]).eval() == [[1, 2], [-1, -1]])
def test_map_flat():
with tf.Session() as sess:
inputs = [2], [3, 5], [[7, 11], [13, 17]]
inputs = map(np.asarray, inputs)
outputs = sess.run(utils.map_flat(tf.square, inputs))
for i, o in zip(inputs, outputs):
assert np.all(i * i == o)
def test_map_flat_bits():
with tf.Session() as sess:
inputs = [2], [3, 5], [[7, 11], [13, 17]], [True, False, True]
dtypes = np.uint8, np.uint16, np.int32, np.int64, np.bool
inputs = [np.asarray(i, dtype=d) for i, d in zip(inputs, dtypes)]
outputs = sess.run(utils.map_flat_bits(lambda x: x + 1, inputs))
def tweak(n):
return n + sum(2 ** (8 * i) for i in range(n.dtype.itemsize))
for i, o in zip(inputs, outputs):
assert np.all(tweak(i) == o)
def test_cumulative_max():
np.random.seed(7)
with tf.Session().as_default():
for x in [
np.random.randn(10),
np.random.randn(11, 7),
np.random.randint(-10, 10, size=10),
np.random.randint(-10, 10, size=(12, 8)),
np.random.randint(-10, 10, size=(3, 3, 4)),
]:
assert np.all(utils.cumulative_max(x).eval() == np.maximum.accumulate(x, axis=-1))
def test_index_each():
np.random.seed(7)
x = | np.random.randn(7, 11) | numpy.random.randn |
"""Utility functions for operating on geometry. See the :class:`Geometry3D`
documentation for the core geometry class.
.. versionadded:: 0.8.6
[functions moved here from :mod:`klampt.model.sensing`]
Working with geometric primitives
=================================
:func:`box` and :func:`sphere` are aliases for the functions in
:mod:`klampt.model.create.primitives`.
Working with point clouds
=========================
:func:`point_cloud_normals` estimates normals from a normal-free
:class:`PointCloud`.
The :func:`fit_plane`, :func:`fit_plane3`, and :class:`PlaneFitter` class help
with plane estimation.
:func:`point_cloud_simplify` simplifies a PointCloud.
:func:`point_cloud_colors` and :func:`point_cloud_set_colors` sets / gets
colors from a PointCloud.
"""
from ..robotsim import Geometry3D,PointCloud
import math
from .create import primitives
from ..math import vectorops,so3,se3
_has_numpy = False
_tried_numpy_import = False
np = None
_has_scipy = False
_tried_scipy_import = False
sp = None
box = primitives.box
"""Alias for :func:`klampt.model.create.primitives.box`"""
sphere = primitives.sphere
"""Alias for :func:`klampt.model.create.primitives.sphere`"""
def _try_numpy_import():
global _has_numpy,_tried_numpy_import
global np
if _tried_numpy_import:
return _has_numpy
_tried_numpy_import = True
try:
import numpy as np
_has_numpy = True
#sys.modules['numpy'] = numpy
except ImportError:
import warnings
warnings.warn("klampt.model.geometry.py: numpy not available.",ImportWarning)
_has_numpy = False
return _has_numpy
def _try_scipy_import():
global _has_scipy,_tried_scipy_import
global sp
if _tried_scipy_import:
return _has_scipy
_tried_scipy_import = True
try:
import scipy as sp
_has_scipy = True
#sys.modules['scipy'] = scipy
except ImportError:
import warnings
warnings.warn("klampt.model.geometry.py: scipy not available.",ImportWarning)
_has_scipy = False
return _has_scipy
class PlaneFitter:
"""
Online fitting of planes through 3D point clouds
Attributes:
normal (3-vector): best-fit normal
centroid (3-vector): centroid of points
count (int): # of points
sse (float): fitting sum of squared errors
cov (3x3 array): covariance of points
"""
def __init__(self,points=None):
_try_numpy_import()
if points is None:
self.count = 0
self.centroid = np.zeros(3)
self.cov = np.zeros((3,3))
self.normal = np.array([0,0,1])
self.sse = 0
else:
self.count = len(points)
self.centroid = np.average(points,axis=0)
pprime = points - [self.centroid]*len(points)
self.cov = np.dot(pprime.T,pprime)/self.count
self._update_plane()
def plane_equation(self):
"""Returns (a,b,c,d) with ax+by+cz+d=0 the plane equation"""
offset = np.dot(self.centroid,self.normal)
return (self.normal[0],self.normal[1],self.normal[2],-offset)
def goodness_of_fit(self):
"""Returns corrected RMSE"""
if self.count <= 3:
return float('inf')
return math.sqrt(self.sse*self.count / (self.count-3))
def add_point(self,pt):
"""Online estimation of best fit plane"""
new_count = self.count + 1
new_centroid = self.centroid + (pt-self.centroid)/new_count
old_sse = (self.cov + np.outer(self.centroid,self.centroid))*self.count
new_sse = old_sse + np.outer(pt,pt)
new_cov = new_sse/new_count - np.outer(new_centroid,new_centroid)
self.count = new_count
self.centroid = new_centroid
self.cov = new_cov
self._update_plane()
def merge(self,fitter,inplace = False):
"""Online merging of two plane fitters.
If inplace = False, returns a new PlaneFitter.
If inplace = True, self is updated with the result.
"""
if not inplace:
res = PlaneFitter()
else:
res = self
new_count = self.count + fitter.count
old_sum = self.centroid*self.count
new_sum = old_sum + fitter.centroid*fitter.count
new_centroid = new_sum/new_count
old_sse = (self.cov + np.outer(self.centroid,self.centroid))*self.count
fitter_sse = (fitter.cov + np.outer(fitter.centroid,fitter.centroid))*fitter.count
new_sse = old_sse + fitter_sse
new_cov = new_sse/new_count - np.outer(new_centroid,new_centroid)
res.count = new_count
res.centroid = new_centroid
res.cov = new_cov
res._update_plane()
return res
def distance(self,pt):
"""Returns the signed distance to this plane"""
return np.dot(self.normal,pt)-np.dot(self.normal,self.centroid)
def _update_plane(self):
w,v = np.linalg.eig(self.cov)
index = np.argmin(w)
self.normal = v[:,index]
self.sse = self.count * np.dot(self.normal,np.dot(self.cov,self.normal))
def point_cloud_simplify(pc,radius):
"""Simplifies a point cloud by averaging points within neighborhoods. Uses
a fast hash grid data structure.
Args:
pc (Geometry3D or PointCloud): the point cloud
radius (float): the neighborhood radius.
"""
if radius <= 0:
raise ValueError("radius must be > 0")
if isinstance(pc,Geometry3D):
assert pc.type() == 'PointCloud',"Must provide a point cloud to point_cloud_simplify"
return pc.convert('PointCloud',radius)
else:
return Geometry3D(pc).convert('PointCloud',radius).getPointCloud()
def point_cloud_normals(pc,estimation_radius=None,estimation_knn=None,estimation_viewpoint=None,add=True):
"""Returns the normals of the point cloud. If pc has the standard
``normal_x, normal_y, normal_z`` properties, these will be returned.
Otherwise, they will be estimated using plane fitting.
The plane fitting method uses scipy nearest neighbor detection if
scipy is available. Otherwise it uses a spatial grid. The process is as
follows:
- If ``estimation_radius`` is provided, then it will use neighbors within
this range. For a spatial grid, this is the grid size.
- If ``estimation_knn`` is provided, then planes will be fit to these
number of neighbors.
- If neither is provided, then estimation_radius is set to 3 * max
dimension of the point cloud / sqrt(N).
- If not enough points are within a neighborhood (either 4 or
``estimation_knn``, whichever is larger), then the normal is set to 0.
- If ``estimation_viewpoint`` is provided, this must be a 3-list. The
normals are oriented such that they point toward the viewpoint.
Returns:
A list of N 3-lists, or an N x 3 numpy array if numpy is available.
If ``add=True``, estimated normals will be added to the point cloud
under the ``normal_x, normal_y, normal_z`` properties.
"""
geom = None
if isinstance(pc,Geometry3D):
assert pc.type() == 'PointCloud',"Must provide a point cloud to point_cloud_normals"
geom = pc
pc = pc.getPointCloud()
assert isinstance(pc,PointCloud)
inds = [-1,-1,-1]
props = ['normal_x','normal_y','normal_z']
for i in range(pc.numProperties()):
try:
ind = props.index(pc.propertyNames[i])
inds[ind] = i
except ValueError:
pass
if all(i>=0 for i in inds):
#has the properties!
normal_x = pc.getProperties(inds[0])
normal_y = pc.getProperties(inds[1])
normal_z = pc.getProperties(inds[2])
if _has_numpy:
return np.array([normal_x,normal_y,normal_z]).T
else:
return list(zip(normal_x,normal_y,normal_z))
if not all(i < 0 for i in inds):
raise ValueError("Point cloud has some normal components but not all of them?")
#need to estimate normals
_try_numpy_import()
_try_scipy_import()
N = len(pc.vertices)//3
if not _has_numpy:
raise RuntimeError("Need numpy to perform plane fitting")
positions = np.array(pc.vertices)
positions = positions.reshape((N,3))
if estimation_radius is None and estimation_knn is None:
R = max(positions.max(axis=0)-positions.min(axis=0))
estimation_radius = 3*R/math.sqrt(N)
if estimation_knn is None or estimation_knn < 4:
estimation_knn = 4
normals = []
if _has_scipy:
import scipy.spatial
tree = scipy.spatial.cKDTree(positions)
if estimation_radius is not None:
neighbors = tree.query_ball_point(positions,estimation_radius)
for n in neighbors:
if len(n) < estimation_knn:
normals.append([0,0,0])
else:
#fit a plane to neighbors
normals.append(fit_plane([positions[i] for i in n])[:3])
else:
d,neighbors = tree.query(positions,estimation_knn)
for n in neighbors:
normals.append(fit_plane([positions[i] for i in n])[:3])
else:
if estimation_radius is None:
raise ValueError("Without scipy, can't do a k-NN plane estimation")
#do a spatial hash
normals = np.zeros((N,3))
indices = (positions * (1.0/estimation_radius)).astype(int)
from collections import defaultdict
pt_hash = defaultdict(list)
for i,(ind,p) in enumerate(zip(indices,positions)):
pt_hash[ind].append((i,p))
successful = 0
for (ind,iplist) in pt_hash.items():
if len(iplist) < estimation_knn:
pass
else:
pindices = [ip[0] for ip in iplist]
pts = [ip[1] for ip in iplist]
n = fit_plane(pts)[:3]
normals[pindices,:] = n
successful += len(pindices)
normals = np.asarray(normals)
if estimation_viewpoint is not None:
#flip back-facing normals
disp = positions - estimation_viewpoint
for i,(n,d) in enumerate(zip(normals,disp)):
if np.dot(n,d) < 0:
normals[i,:] = -n
else:
#flip back-facing normals assuming centroid is interior
centroid = np.average(positions,axis=0)
for i,(n,p) in enumerate(zip(normals,positions)):
if np.dot(n,p-centroid) < 0:
normals[i,:] = -n
if add:
normal_x = normals[:,0].tolist()
normal_y = normals[:,1].tolist()
normal_z = normals[:,2].tolist()
pc.addProperty('normal_x',normal_x)
pc.addProperty('normal_y',normal_y)
pc.addProperty('normal_z',normal_z)
if geom is not None:
geom.setPointCloud(pc)
return normals
def fit_plane3(point1,point2,point3):
"""Returns a 3D plane equation fitting the 3 points.
The result is (a,b,c,d) with the plane equation ax+by+cz+d=0
"""
_try_numpy_import()
normal = np.cross(point2-point1,point3-point1)
nlen = np.linalg.norm(normal)
if nlen < 1e-4:
#degenerate
raise ValueError("Points are degenerate")
normal = normal / nlen
offset = -np.dot(normal,point1)
return (normal[0],normal[1],normal[2],offset)
def fit_plane(points):
"""Returns a 3D plane equation that is a least squares fit
through the points (len(points) >= 3)."""
centroid,normal = fit_plane_centroid(points)
return normal[0],normal[1],normal[2],-vectorops.dot(centroid,normal)
def fit_plane_centroid(points):
"""Similar to :func:`fit_plane`, but returns a (centroid,normal) pair."""
if len(points)<3:
raise ValueError("Need to have at least 3 points to fit a plane")
#if len(points)==3:
# return fit_plane3(points[0],points[1],points[2])
_try_numpy_import()
points = np.asarray(points)
centroid = np.average(points,axis=0)
U,W,Vt = np.linalg.svd(points-[centroid]*len(points),full_matrices=False)
if np.sum(W<1e-6) > 1:
raise ValueError("Point set is degenerate")
normal = Vt[2,:]
return centroid.tolist(),normal.tolist()
def _color_format_from_uint8_channels(format,r,g,b,a=None):
import numpy as np
if a is None:
a = 0xff
if format == 'rgb':
return np.bitwise_or.reduce((np.left_shift(r,16),np.left_shift(g,8),b)).tolist()
elif format == 'bgr':
return np.bitwise_or.reduce((np.left_shift(b,16),np.left_shift(g,8),r)).tolist()
elif format=='rgba':
return np.bitwise_or.reduce((np.left_shift(r,24),np.left_shift(g,16),np.left_shift(b,8),a)).tolist()
elif format=='bgra':
return np.bitwise_or.reduce((np.left_shift(g,24),np.left_shift(g,16),np.left_shift(r,8),a)).tolist()
elif format=='argb':
return np.bitwise_or.reduce((np.left_shift(a,24),np.left_shift(r,16),np.left_shift(g,8),b)).tolist()
elif format=='abgr':
return np.bitwise_or.reduce((np.left_shift(a,24),np.left_shift(b,16),np.left_shift(g,8),r)).tolist()
elif format=='channels':
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
return (r*one_255).tolist(),(g*one_255).tolist(),(b*one_255).tolist()
else:
return (r*one_255).tolist(),(g*one_255).tolist(),(b*one_255).tolist(),(a*one_255).tolist()
elif format=='opacity':
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
return np.ones(len(r))
return (a*one_255).tolist()
elif tuple(format)==('r','g','b'):
one_255 = 1.0/255.0
return np.column_stack((r*one_255,g*one_255,b*one_255)).tolist()
elif tuple(format)==('r','g','b','a'):
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
a = np.full(len(r),a)
return np.column_stack((r*one_255,g*one_255,b*one_255,a*one_255)).tolist()
else:
raise ValueError("Invalid format specifier "+str(format))
def _color_format_to_uint8_channels(format,colors):
import numpy as np
if format=='channels':
return tuple((np.asarray(c)*255).astype(np.uint8).tolist() for c in colors)
colors = np.asarray(colors)
if format == 'rgb':
r,g,b = np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist()
elif format == 'bgr':
b,g,r = np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist()
elif format=='rgba':
r,g,b,a = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='bgra':
b,g,r,a = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='argb':
a,r,g,b = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift( | np.bitwise_and(colors,0xff00) | numpy.bitwise_and |
"""
Read the FISS fts file and its header.
"""
from __future__ import absolute_import, division
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from astropy.io import fits
from scipy.signal import savgol_filter
from scipy.signal import fftconvolve as conv
import numpy as np
import os
__all__ = ['frame', 'pca_read', 'raster', 'getheader', 'frame2raster',
'sp_av', 'sp_med', 'wavecalib', 'simple_wvcalib']
def frame(file, x1=0, x2=False, pca=True, ncoeff=False, xmax=False,
smooth=False, **kwargs):
"""Read the FISS fts file.
Parameters
----------
file : str
A string of file name to be read.
x1 : int
A starting index of the frame along the scanning direction.
x2 : (optional) int
An ending index of the frame along the scanning direction.
If not, then the only x1 frame is read.
pca : (optional) bool
If True, the frame is read from the PCA file.
Default is True, but the function automatically check
the existance of the pca file.
ncoeff : (optional) int
The number of coefficients to be used for
the construction of frame in a pca file.
xmax : (optional) bool
If True, the x2 value is set as the maximum end point of the frame.
* Default is False.
smooth : (optional) bool
If True, apply the Savitzky-Golay filter to increase the signal to
noise without greatly distorting the signal of the given fts file.
* Default is False.
nsmooth : (optional) int
The number of smooting.
Default is 1 for the case of the compressed file,
and is 2 for the case of the uncompresseed file.
kwargs
The parameters for smooth (savitzky-golay filter), \n
See the docstring of the `scipy.signal.savgol_filter`.
Returns
-------
frame : ~numpy.ndarray
FISS data frame with the information of (wavelength, y, x).
References
----------
`Savitzky-Golay filter <https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter>`_.\n
`scipy.signal.savgol_filter <https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.savgol_filter.html#scipy.signal.savgol_filter>`_
Notes
-----
This function is based on the IDL code FISS_READ_FRAME.PRO
written by <NAME>, 2013.
This function automatically check the existance of the pca
file by reading the fts header.
Example
-------
.. plot::
:include-source:
import matplotlib.pyplot as plt
from fisspy.io import read
import fisspy
import fisspy.data.sample
data=read.frame(fisspy.data.sample.FISS_IMAGE,xmax=True)
plt.imshow(data[:,75],cmap=fisspy.cm.ca,origin='lower',interpolation='bilinear')
plt.title(r"GST/FISS 8542 $\AA$ Spectrogram")
plt.show()
"""
if not file:
raise ValueError('Empty filename')
if x2 and x2 <= x1:
raise ValueError('x2 must be larger than x1')
header=fits.getheader(file)
try:
header['pfile']
except:
pca=False
if xmax and not x2:
x2=header['naxis3']
elif not x2:
x2=x1+1
if pca:
spec=pca_read(file,header,x1,x2,ncoeff=ncoeff)
else:
spec=fits.getdata(file)[x1:x2]
if x1+1 == x2:
spec=spec[0]
return spec
spec=spec.transpose((1,0,2)).astype(float)
if smooth:
winl=kwargs.pop('window_length',7)
pord=kwargs.pop('polyorder',3)
deriv=kwargs.pop('deriv',0)
delta=kwargs.pop('delta',1.0)
mode=kwargs.pop('mode','interp')
cval=kwargs.pop('cval',0.0)
nsmooth=kwargs.pop('nsmooth',int(not pca)+1)
for i in range(nsmooth):
spec=savgol_filter(spec,winl,pord,deriv=deriv,
delta=delta,mode=mode,cval=cval)
return spec
def pca_read(file,header,x1,x2=False,ncoeff=False):
"""
Read the pca compressed FISS fts file.
Parameters
----------
file : str
A string of file name to be read.
header : astropy.io.fits.header.Header
The fts file header.
x1 : int
A starting index of the frame along the scanning direction.
x2 : (optional) int
An ending index of the frame along the scanning direction.
If not, then the only x1 frame is read.
ncoeff : (optional) int
The number of coefficients to be used for
the construction of frame in a pca file.
Returns
-------
frame : ~numpy.ndarry
FISS data frame with the information of (wavelength, y, x).
Notes
-----
This function is based on the IDL code FISS_PCA_READ.PRO
written by <NAME>, 2013.
The required fts data are two. One is the "_c.fts",
and the other is "_p.fts"
"""
if not file:
raise ValueError('Empty filename')
if not x2:
x2 = x1+1
dir = os.path.dirname(file)
pfile = header['pfile']
if dir:
pfile = os.path.join(dir, pfile)
pdata = fits.getdata(pfile)
data = fits.getdata(file)[x1:x2]
ncoeff1 = data.shape[2]-1
if not ncoeff:
ncoeff = ncoeff1
elif ncoeff > ncoeff1:
ncoeff = ncoeff1
spec = np.dot(data[:,:,0:ncoeff],pdata[0:ncoeff,:])
spec *= 10.**data[:,:,ncoeff][:,:,None]
return spec
def raster(file, wv, hw=0.05, x1=0, x2=False, y1=0, y2=False,
pca=True, smooth=False, absScale = False, **kwargs):
"""
Make raster images for a given file at wv of wavelength within width hw
Parameters
----------
file : str
A string of file name to be read.
wv : float or 1d ndarray
Referenced wavelengths.
hw : float
A half-width of wavelength integration in unit of Angstrom.
Default is 0.05
x1 : (optional) int
A starting index of the frame along the scanning direction.
x2 : (optional) int
An ending index of the frame along the scanning direction.
If not, x2 is set to the maximum end point of the frame.
y1 : (optional) int
A starting index of the frame along the slit position.
y2 : (optional0 int
A ending index of the frame along the slit position.
pca : (optional) bool
If True, the frame is read from the PCA file.
Default is True, but the function automatically check
the existance of the pca file.
absScale : (optional) bool
If True, the wavelength should be given in absolute scale.
If Flase, the wavelength should be given in relative scale.
smooth : (optional) bool
If True, apply the Savitzky-Golay filter to increase the signal to
noise without greatly distorting the signal of the given fts file.
* Default is False.
kwargs
Any additional keyword arguments to read frame.
See the docstring of `fisspy.io.read.frame`
Returns
-------
Raster : ~numpy.ndarray
Raster image at given wavelengths.
Notes
-----
This function is based on the IDL code FISS_RASTER.PRO
written by <NAME>, 2013.
This function automatically check the existance of the pca file by
reading the fts header.
Example
-------
.. plot::
:include-source:
import matplotlib.pyplot as plt
from fisspy.io import read
from fisspy import cm
import fisspy.data.sample
raster=read.raster(fisspy.data.sample.FISS_IMAGE,0.3)
plt.imshow(raster, cmap=cm.ca, origin='lower', interpolation='bilinear')
plt.title(r"GST/FISS 8542+0.3 $\AA$ Spectrogram")
plt.show()
"""
header = getheader(file,pca)
ny = header['NAXIS2']
nx = header['NAXIS3']
dldw = header['CDELT1']
if not file:
raise ValueError('Empty filename')
if x2 and x2 <= x1+1:
raise ValueError('x2 must be larger than x1+1')
try:
num = wv.shape[0]
except:
num = 1
wv = np.array([wv])
if not x2:
x2 = int(nx)
if not y2:
y2 = int(ny)
wl = simple_wvcalib(header, absScale= absScale)
if hw < abs(dldw)/2.:
hw = abs(dldw)/2.
s = np.abs(wl-wv[:,None])<=hw
sp = frame(file,x1,x2,pca=pca,smooth=smooth,**kwargs)
leng = s.sum(1)
if num == 1:
img = sp[y1:y2,:,s[0,:]].sum(2)/leng[0]
return img.reshape((y2-y1,x2-x1))
else:
img=np.array([sp[y1:y2,:,s[i,:]].sum(2)/leng[i] for i in range(num)])
return img.reshape((num,y2-y1,x2-x1))
def getheader(file,pca=True):
"""
Get the FISS fts file header.
Parameters
----------
file : str
A string of file name to be read.
pca : (optional) bool
If True, the frame is read from the PCA file.
Default is True, but the function automatically check
the existance of the pca file.
Returns
-------
header : astropy.io.fits.Header
The fts file header.
Notes
-----
This function automatically check the existance of the pca file by
reading the fts header.
Example
-------
>>> from fisspy.io import read
>>> h=read.getheader(file)
>>> h['date']
'2014-06-03T16:49:42'
"""
header0 = fits.getheader(file)
pfile = header0.pop('pfile',False)
if not pfile:
return header0
header = fits.Header()
if pca:
header['pfile']=pfile
for i in header0['comment']:
sori = i.split('=')
if len(sori) == 1:
skv = sori[0].split(None,1)
if len(skv) == 1:
pass
else:
header[skv[0]] = skv[1]
else:
key = sori[0]
svc = sori[1].split('/')
try:
item = float(svc[0])
except:
item = svc[0].split("'")
if len(item) != 1:
item = item[1].split(None,0)[0]
else:
item = item[0].split(None,0)[0]
try:
if item-int(svc[0]) == 0:
item = int(item)
except:
pass
if len(svc) == 1:
header[key] = item
else:
header[key] = (item,svc[1])
header['simple'] = True
alignl=header0.pop('alignl',-1)
if alignl == 0:
keys=['reflect','reffr','reffi','cdelt2','cdelt3','crota2',
'crpix3','shift3','crpix2','shift2','margin2','margin3']
header['alignl'] = (alignl,'Alignment level')
for i in keys:
header[i] = (header0[i],header0.comments[i])
header['history'] = str(header0['history'])
if alignl == 1:
keys=['reflect','reffr','reffi','cdelt2','cdelt3','crota1',
'crota2','crpix3','crval3','shift3','crpix2','crval2',
'shift2','margin2','margin3']
header['alignl'] = (alignl,'Alignment level')
for i in keys:
header[i] = (header0[i],header0.comments[i])
header['history'] = str(header0['history'])
return header
def frame2raster(frame, header, wv, absScale = False):
"""
Make a raster image by using the frame data.
Parameters
----------
frame : ~numpy.ndarray
Data which is read from the fisspy.io.read.frame
header : astropy.io.fits.Header
FISS data header
wv : float or ~numpy.ndarray
Referenced wavelengths to draw raster image. It must be the one single float,
or 1D array
absScale : (optional) bool
If True, the wavelength should be given in absolute scale.
If Flase, the wavelength should be given in relative scale.
Returns
-------
Raster : ~numpy.ndarray
Raster image at gieven wavelength.
"""
hw = 0.05
wl = simple_wvcalib(header, absScale= absScale)
s = np.abs(wl - wv) <= hw
img = frame[:, :, s].sum(2) / s.sum()
return img
def sp_av(file) :
a = frame(file, xmax = True)
return a.mean(axis = 1)
def sp_med(file) :
a = frame(file, xmax = True)
return np.median(a, axis = 1)
def wavecalib(band,profile,method=True):
"""
Calibrate the wavelength for FISS spectrum profile.
Parameters
----------
band : str
A string to identify the wavelength.
Allowable wavelength bands are '6562','8542','5890','5434'
profile : ~numpy.ndarray
A 1 dimensional numpy array of spectral profile.
Method : (optional) bool
* Default is True.
If true, the reference lines for calibration are the telluric lines.
Else if False, the reference lines are the solar absorption lines.
Returns
-------
wavelength : ~numpy.ndarray
Calibrated wavelength.
Notes
-----
This function is based on the FISS IDL code FISS_WV_CALIB.PRO
written by <NAME>, 2013.
Example
-------
>>> from fisspy.analysis import doppler
>>> wv=doppler.wavecalib('6562',profile)
"""
band=band[0:4]
nw=profile.shape[0]
if method:
if band == '6562':
line=np.array([6561.097,6564.206])
lamb0=6562.817
dldw=0.019182
elif band == '8542':
line=np.array([8540.817,8546.222])
lamb0=8542.090
dldw=-0.026252
elif band == '5890':
line=np.array([5889.951,5892.898])
lamb0=5889.9509
dldw=0.016847
elif band == '5434':
line=np.array([5434.524,5436.596])
lamb0=5434.5235
dldw=-0.016847
else:
raise ValueError("The wavelength band value is not allowable.\n"+
"Please select the wavelenth "+
"among '6562','8542','5890','5434'")
else:
if band == '6562':
line=np.array([6562.817,6559.580])
lamb0=6562.817
dldw=0.019182
elif band == '8542':
line=np.array([8542.089,8537.930])
lamb0=8542.090
dldw=-0.026252
else:
raise ValueError("The wavelength band value is not allowable.\n"
"Please select the wavelenth "
"among '6562','8542','5890','5434'")
w=np.arange(nw)
wl= | np.zeros(2) | numpy.zeros |
from __future__ import print_function
# modiofied based on PythTB python tight binding module.
import numpy as np
from builtins import range, zip
from pyglib.iface.wannierio import get_wannier_data
def tb_wigner_seitz(ngrid,lat):
deg_ws = []
rvec_ws = []
ndiff = np.zeros(3)
for n0 in range(-ngrid[0], ngrid[0]+1):
for n1 in range(-ngrid[1], ngrid[1]+1):
for n2 in range(-ngrid[2], ngrid[2]+1):
dist_list = []
for i0 in [0,1,2,-1,-2]:
ndiff[0] = n0 - i0*ngrid[0]
for i1 in [0,1,2,-1,-2]:
ndiff[1] = n1 - i1*ngrid[1]
for i2 in [0,1,2,-1,-2]:
ndiff[2] = n2 - i2*ngrid[2]
dist_list.append(np.linalg.norm(ndiff.dot(lat)))
dist_list = np.asarray(dist_list)
dist_min = np.min(dist_list)
if np.abs(dist_list[0]-dist_min) < 1.e-7:
deg_ws.append(np.count_nonzero(\
np.abs(dist_list-dist_min) < 1.e-7))
rvec_ws.append(np.array([n0,n1,n2]))
# sum-rule check
deg_ws = np.array(deg_ws)
rvec_ws = np.asarray(rvec_ws)
tot1 = np.sum(1./deg_ws)
tot2 = np.prod(ngrid)
if np.abs(tot1 - tot2) > 1.e-7:
raise ValueError("error in finding wigner-seitz points {} vs {}".\
format(tot1, tot2))
return deg_ws, rvec_ws
def get_tb_hr(kpoints,rpoints,wfwans,evals):
phase_mat = np.exp(-2.j*np.pi*np.asarray(kpoints).dot(rpoints.T)) \
/len(kpoints)
hk_list = [[wfwansk1.T.conj().dot(np.diag(evalsk1)).dot(wfwansk1) \
for wfwansk1, evalsk1 in zip(wfwans1,evals1)]\
for wfwans1, evals1 in zip(wfwans,evals)]
hk_list = np.array(hk_list).swapaxes(1,2).swapaxes(2,3)
hr_list = np.tensordot(hk_list, phase_mat, axes=(3,0))
return hr_list
class tb_model(object):
r"""
This is the main class of the PythTB package which contains all
information for the tight-binding model.
:param lat: Array containing lattice vectors in Cartesian
coordinates (in arbitrary units). In example the below, the first
lattice vector has coordinates [1.0,0.5] while the second
one has coordinates [0.0,2.0]. By default, lattice vectors
are an identity matrix.
"""
def __init__(self,lat,deg_ws,rpoints,hr_list):
self._dim_k = 3
self._dim_r = 3
self._lat=np.array(lat,dtype=float)
if self._lat.shape != (self._dim_r,self._dim_r):
raise Exception("\nWrong lat array dimensions")
# check that volume is not zero and that have right handed system
if np.abs(np.linalg.det(self._lat))<1.0E-6:
raise Exception(\
"\nLattice vectors length/area/volume too"+\
" close to zero, or zero.")
if np.linalg.det(self._lat)<0.0:
raise Exception(\
"\n\nLattice vectors need to form right handed system.")
self.deg_ws = np.asarray(deg_ws)
self.rpoints = np.asarray(rpoints)
self.hr_list = np.asarray(hr_list)
self._norb = self.hr_list.shape[2]
def _gen_ham(self,kpt,isp):
"""Generate Hamiltonian for a certain k-point,
which is given in reduced coordinates!"""
phase_mat = np.exp(-2.j*np.pi*self.rpoints.dot(kpt))/self.deg_ws
ham = np.tensordot(self.hr_list[isp],phase_mat,axes=(2,0))
return ham
def _sol_ham(self,ham,eig_vectors=False):
"""Solves Hamiltonian and returns eigenvectors, eigenvalues"""
# check that matrix is hermitian
if np.max(ham-ham.T.conj())>1.0E-9:
raise Exception("\n\nHamiltonian matrix is not hermitian?!")
#solve matrix
if eig_vectors==False: # only find eigenvalues
eval = np.linalg.eigvalsh(ham)
# sort eigenvalues and convert to real numbers
eval = _nicefy_eig(eval)
return np.array(eval,dtype=float)
else: # find eigenvalues and eigenvectors
eval,eig = np.linalg.eigh(ham)
# sort evectors, eigenvalues and convert to real numbers
eval,eig = _nicefy_eig(eval,eig)
# reshape eigenvectors if doing a spinfull calculation
return eval, eig
def k_uniform_mesh(self,mesh_size):
r"""
Returns a uniform grid of k-points that can be passed to
passed to function :func:`pythtb.tb_model.solve_all`. This
function is useful for plotting density of states histogram
and similar.
Returned uniform grid of k-points always contains the origin.
:param mesh_size: Number of k-points in the mesh in each
periodic direction of the model.
:returns:
* **k_vec** -- Array of k-vectors on the mesh that can be
directly passed to function :func:`pythtb.tb_model.solve_all`.
Example usage::
# returns a 10x20x30 mesh of a tight binding model
# with three periodic directions
k_vec = my_model.k_uniform_mesh([10,20,30])
# solve model on the uniform mesh
my_model.solve_all(k_vec)
"""
# get the mesh size and checks for consistency
use_mesh=np.array(list(map(round,mesh_size)),dtype=int)
if use_mesh.shape!=(self._dim_k,):
print(use_mesh.shape)
raise Exception("\n\nIncorrect size of the specified k-mesh!")
if np.min(use_mesh)<=0:
raise Exception("\n\nMesh must have positive non-zero number of elements.")
# construct the mesh
if self._dim_k==1:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[1])
norm=norm.transpose([1,0])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,0]).reshape([use_mesh[0],1])
elif self._dim_k==2:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[2])
norm=norm.transpose([2,0,1])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,0]).reshape([use_mesh[0]*use_mesh[1],2])
elif self._dim_k==3:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1],0:use_mesh[2]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[3])
norm=norm.transpose([3,0,1,2])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,3,0]).reshape([use_mesh[0]*use_mesh[1]*use_mesh[2],3])
else:
raise Exception("\n\nUnsupported dim_k!")
return k_vec
def k_path(self,kpts,nk,report=True):
r"""
Interpolates a path in reciprocal space between specified
k-points. In 2D or 3D the k-path can consist of several
straight segments connecting high-symmetry points ("nodes"),
and the results can be used to plot the bands along this path.
The interpolated path that is returned contains as
equidistant k-points as possible.
:param kpts: Array of k-vectors in reciprocal space between
which interpolated path should be constructed. These
k-vectors must be given in reduced coordinates. As a
special case, in 1D k-space kpts may be a string:
* *"full"* -- Implies *[ 0.0, 0.5, 1.0]* (full BZ)
* *"fullc"* -- Implies *[-0.5, 0.0, 0.5]* (full BZ, centered)
* *"half"* -- Implies *[ 0.0, 0.5]* (half BZ)
:param nk: Total number of k-points to be used in making the plot.
:param report: Optional parameter specifying whether printout
is desired (default is True).
:returns:
* **k_vec** -- Array of (nearly) equidistant interpolated
k-points. The distance between the points is calculated in
the Cartesian frame, however coordinates themselves are
given in dimensionless reduced coordinates! This is done
so that this array can be directly passed to function
:func:`pythtb.tb_model.solve_all`.
* **k_dist** -- Array giving accumulated k-distance to each
k-point in the path. Unlike array *k_vec* this one has
dimensions! (Units are defined here so that for an
one-dimensional crystal with lattice constant equal to for
example *10* the length of the Brillouin zone would equal
*1/10=0.1*. In other words factors of :math:`2\pi` are
absorbed into *k*.) This array can be used to plot path in
the k-space so that the distances between the k-points in
the plot are exact.
* **k_node** -- Array giving accumulated k-distance to each
node on the path in Cartesian coordinates. This array is
typically used to plot nodes (typically special points) on
the path in k-space.
Example usage::
# Construct a path connecting four nodal points in k-space
# Path will contain 401 k-points, roughly equally spaced
path = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.5], [0.0, 0.0]]
(k_vec,k_dist,k_node) = my_model.k_path(path,401)
# solve for eigenvalues on that path
evals = tb.solve_all(k_vec)
# then use evals, k_dist, and k_node to plot bandstructure
# (see examples)
"""
# processing of special cases for kpts
if kpts=='full':
# full Brillouin zone for 1D case
k_list=np.array([[0.],[0.5],[1.]])
elif kpts=='fullc':
# centered full Brillouin zone for 1D case
k_list=np.array([[-0.5],[0.],[0.5]])
elif kpts=='half':
# half Brillouin zone for 1D case
k_list=np.array([[0.],[0.5]])
else:
k_list=np.array(kpts)
# in 1D case if path is specified as a vector, convert it to an (n,1) array
if len(k_list.shape)==1 and self._dim_k==1:
k_list=np.array([k_list]).T
# make sure that k-points in the path have correct dimension
if k_list.shape[1]!=self._dim_k:
print('input k-space dimension is',k_list.shape[1])
print('k-space dimension taken from model is',self._dim_k)
raise Exception("\n\nk-space dimensions do not match")
# must have more k-points in the path than number of nodes
if nk<k_list.shape[0]:
raise Exception("\n\nMust have more points in the path than number of nodes.")
# number of nodes
n_nodes=k_list.shape[0]
# extract the lattice vectors from the TB model
lat_per=np.copy(self._lat)
# compute k_space metric tensor
k_metric = np.linalg.inv(np.dot(lat_per,lat_per.T))
# Find distances between nodes and set k_node, which is
# accumulated distance since the start of the path
# initialize array k_node
k_node=np.zeros(n_nodes,dtype=float)
for n in range(1,n_nodes):
dk = k_list[n]-k_list[n-1]
dklen = np.sqrt(np.dot(dk,np.dot(k_metric,dk)))
k_node[n]=k_node[n-1]+dklen
# Find indices of nodes in interpolated list
node_index=[0]
for n in range(1,n_nodes-1):
frac=k_node[n]/k_node[-1]
node_index.append(int(round(frac*(nk-1))))
node_index.append(nk-1)
# initialize two arrays temporarily with zeros
# array giving accumulated k-distance to each k-point
k_dist=np.zeros(nk,dtype=float)
# array listing the interpolated k-points
k_vec=np.zeros((nk,self._dim_k),dtype=float)
# go over all kpoints
k_vec[0]=k_list[0]
for n in range(1,n_nodes):
n_i=node_index[n-1]
n_f=node_index[n]
kd_i=k_node[n-1]
kd_f=k_node[n]
k_i=k_list[n-1]
k_f=k_list[n]
for j in range(n_i,n_f+1):
frac=float(j-n_i)/float(n_f-n_i)
k_dist[j]=kd_i+frac*(kd_f-kd_i)
k_vec[j]=k_i+frac*(k_f-k_i)
if report==True:
if self._dim_k==1:
print(' Path in 1D BZ defined by nodes at '+str(k_list.flatten()))
else:
print('----- k_path report begin ----------')
original=np.get_printoptions()
| np.set_printoptions(precision=5) | numpy.set_printoptions |
#
# Copyright (C) 2019 <NAME>
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import os
import numpy
import logging
import matplotlib.pyplot as plot
import matplotlib.ticker as mticker
import enum
import time
# Import required src
from usienarl import Environment, Volley, Agent, Interface
class EpisodeVolleyType(enum.Enum):
"""
Enum type of the episode volley: training, validation or test.
"""
training = 0
validation = 1
test = 2
class EpisodeVolley(Volley):
"""
Episode based volley. Used for training, validation and test volleys. It is executed for a certain amount of
episodes. When run in training and validation modes, plots of each episodes (averaged to always be 100 episodes)
are saved.
"""
def __init__(self,
environment: Environment,
agent: Agent,
interface: Interface,
parallel: int,
episode_volley_type: EpisodeVolleyType,
plots_path: str or None, plots_dpi: int or None,
episodes_required: int, episode_length: int):
# Generate base volley
super(EpisodeVolley, self).__init__(environment, agent, interface, parallel)
# Make sure additional parameters are correct
assert(episode_volley_type is not None)
# Note: plots path and DPI are required only if the volley is not a test one
if episode_volley_type != EpisodeVolleyType.test:
assert(plots_path is not None and plots_path)
assert(plots_dpi is not None and plots_dpi > 0)
assert(episodes_required > 0 and episode_length > 0)
assert(parallel > 0)
assert(episodes_required % parallel == 0)
# Define internal attributes
self._episode_volley_type: EpisodeVolleyType = episode_volley_type
self._plots_path: str or None = plots_path
self._plots_dpi: int = plots_dpi
self._episodes_required: int = episodes_required
self._episode_length: int = episode_length
# Define empty attributes
self._last_episode_done: numpy.ndarray or None = None
self._last_reward: numpy.ndarray or None = None
self._avg_total_reward: float or None = None
self._avg_scaled_reward: float or None = None
self._std_total_reward: float or None = None
self._std_scaled_reward: float or None = None
self._avg_episode_length: int or None = None
self._avg_action_duration: float or None = None
self._rewards: [] = []
self._total_rewards: [] = []
self._scaled_rewards: [] = []
self._episode_lengths: [] = []
self._actions_durations: [] = []
def _initialize(self) -> bool:
# Reset empty attributes
self._last_episode_done = None
self._last_reward = None
self._avg_total_reward = None
self._avg_scaled_reward = None
self._std_total_reward = None
self._std_scaled_reward = None
self._avg_episode_length = None
self._avg_action_duration = None
self._rewards = []
self._total_rewards = []
self._scaled_rewards = []
self._episode_lengths = []
self._actions_durations = []
# This initialization always succeed
return True
def run(self,
logger: logging.Logger,
session,
render: bool = False):
# Print info
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Training for " + str(self._episodes_required) + " episodes...")
elif self._episode_volley_type == EpisodeVolleyType.validation:
logger.info("Validating for " + str(self._episodes_required) + " episodes...")
else:
logger.info("Testing for " + str(self._episodes_required) + " episodes...")
# Get the amount of parallel batches required
parallel_episode_batches: int = self._episodes_required // self._parallel
# Execute the parallel episode batches
for parallel_episode_batch in range(parallel_episode_batches):
# Print current progress every once in a while (if length is not too short)
if parallel_episode_batches >= 100 and (parallel_episode_batch + 1) % (parallel_episode_batches // 10) == 0 and parallel_episode_batch > 0:
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Trained for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
elif self._episode_volley_type == EpisodeVolleyType.validation:
logger.info("Validated for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
else:
logger.info("Tested for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
# Initialize last reward and last episode done flags
self._last_reward = numpy.nan * numpy.ones(self._environment.parallel, dtype=float)
self._last_episode_done = numpy.zeros(self._environment.parallel, dtype=bool)
# Execute actions until the all parallel step batches are completed or the maximum episode length is exceeded
episode_rewards: [] = []
episode_actions_durations: [] = []
state_current: numpy.ndarray = self._environment.reset(logger, session)
for parallel_step_batch in range(self._episode_length):
# Get the action decided by the agent
observation_current: numpy.ndarray = self._interface.environment_state_to_observation(logger, session, state_current)
time_before_action = time.clock()
if self._episode_volley_type == EpisodeVolleyType.training:
agent_action: numpy.ndarray = self._agent.act_train(logger, session, self._interface, observation_current,
self._start_steps + self._steps, self._start_episodes + self._episodes)
else:
agent_action: numpy.ndarray = self._agent.act_inference(logger, session, self._interface, observation_current,
self._start_steps + self._steps, self._start_episodes + self._episodes)
time_after_action = time.clock()
# Save the time, converted to milliseconds
episode_actions_durations.append((time_after_action - time_before_action) * 1000)
# Get the next state with relative reward and episode done flag
environment_action: numpy.ndarray = self._interface.agent_action_to_environment_action(logger, session, agent_action)
state_next, reward, episode_done = self._environment.step(logger, session, environment_action)
# Send back information to the agent
observation_next: numpy.ndarray = self._interface.environment_state_to_observation(logger, session, state_next)
# Complete the step
if self._episode_volley_type == EpisodeVolleyType.training:
self._agent.complete_step_train(logger, session, self._interface,
observation_current, agent_action, reward, episode_done, observation_next,
self._start_steps + self._steps, self._start_episodes + self._episodes)
else:
self._agent.complete_step_inference(logger, session, self._interface,
observation_current, agent_action, reward, episode_done, observation_next,
self._start_steps + self._steps, self._start_episodes + self._episodes)
# Render if required
if render:
self._environment.render(logger, session)
# Save the reward at the last step
self._last_reward = numpy.where(episode_done * (1 - self._last_episode_done), reward, self._last_reward)
if parallel_step_batch + 1 == self._episode_length:
self._last_reward = numpy.where(numpy.isnan(self._last_reward), reward, self._last_reward)
# Add the reward to the list of rewards for this episode
# Note: make sure the reward saved is NaN for all already completed episodes in the parallel batch
episode_rewards.append(numpy.where(self._last_episode_done, numpy.nan, reward))
# Update the current state with the previously next state
state_current = state_next.copy()
# Increase the number of trained steps
# Note: the counter should be increased according to the completed episodes of the current parallel batch
self._steps += numpy.count_nonzero(episode_done == 0)
# Save volley steps at termination time
# Note: saving the step of each final step of each volley is required to compute the average episode length
step_array: numpy.ndarray = numpy.ones(self._environment.parallel, dtype=int) * (parallel_step_batch + 1)
self._episode_lengths += step_array[episode_done * numpy.logical_not(self._last_episode_done)].tolist()
if parallel_step_batch + 1 == self._episode_length:
self._episode_lengths += step_array[ | numpy.logical_not(episode_done) | numpy.logical_not |
'''
'''
import os
import pickle
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from itertools import chain, combinations_with_replacement
# -- astropy --
import astropy.units as u
from astropy.time import Time
# -- specsim --
import specsim
from specsim.atmosphere import Moon
# -- feasibgs --
from . import util as UT
def Isky_regression(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and
sun parameters. The sky surface brightness uses a regression model fit
using BOSS and DESI CMX sky fibers to predict V-band moonlight surface
brightness. This V-band magnitude is then used to scale up the dark time
sky.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom], sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='regression')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
Isky = specsim_sky.surface_brightness.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_newKS_twi(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and sun parameters.
The sky surface brightness uses the KS model scaling with coefficients re-fit to match
BOSS sky data and includes a twilight contribution from Parker's thesis.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom] and sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='refit_ks')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
# updated KS coefficients
specsim_sky.moon.KS_CR = 458173.535128
specsim_sky.moon.KS_CM0 = 5.540103
specsim_sky.moon.KS_CM1 = 178.141045
_sky = specsim_sky._surface_brightness_dict['dark'].copy()
_sky *= specsim_sky.extinction
I_ks_rescale = specsim_sky.surface_brightness
Isky = I_ks_rescale.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep):
''' Parker's sky model, which is a function of:
:param airmass:
airmass
:param ecl_lat:
ecliptic latitude (used for zodiacal light contribution)
:param gal_lat:
galactic latitude (used for ISL contribution)
:param gal_lon:
galactic longitude (used for ISL contribution)
:param tai:
time in seconds
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
'''
from astroplan import Observer
from astropy.coordinates import EarthLocation
X = airmass # air mass
beta = ecl_lat # ecliptic latitude ( used for zodiacal light contribution )
l = gal_lat # galactic latitude ( used for ISL contribution )
b = gal_lon # galactic longitude ( used for ISL contribution )
_kpno = EarthLocation.of_site('kitt peak')
obs_time = Time(tai/86400., scale='tai', format='mjd', location=_kpno)
mjd = obs_time.mjd
# fractional months ( used for seasonal contribution)
month_frac = obs_time.datetime.month + obs_time.datetime.day/30.
# fractional hour ( used for hourly contribution)
kpno = Observer(_kpno)
sun_rise = kpno.sun_rise_time(obs_time, which='next')
sun_set = kpno.sun_set_time(obs_time, which='previous')
hour = ((obs_time - sun_set).sec)/3600.
hour_frac = hour/((Time(sun_rise, format='mjd') - Time(sun_set,format = 'mjd')).sec/3600.)
alpha = sun_alt # sun altitude
delta = sun_sep # sun separation (separation between the target and the sun's location)
# used for scattered moonlight
g = moon_phase # moon phase
altm = moon_alt
illm = moon_ill
delm = moon_sep
# get coefficients
coeffs = _read_parkerCoeffs()
# sky continuum
_w, _Icont = _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g)
S_continuum = _Icont / np.pi # BOSS has 2 arcsec diameter
# sky emission from the UVES continuum subtraction
w_uves, S_uves = np.loadtxt(''.join([UT.code_dir(), 'dat/sky/UVES_sky_emission.dat']),
unpack=True, usecols=[0,1])
f_uves = interp1d(w_uves, S_uves, bounds_error=False, fill_value='extrapolate')
S_emission = f_uves(_w)
return _w, S_continuum + S_emission
def Isky_parker_radecobs(ra, dec, obs_time):
''' wrapper for Isky_parker, where the input parameters are calculated based
on RA, Dec, and obs_time
'''
from astroplan import download_IERS_A
from astropy.coordinates import EarthLocation, SkyCoord, AltAz, get_sun, get_moon
download_IERS_A()
# target coordinates
coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg)
# observed time (UTC)
utc_time = Time(obs_time)
kpno = EarthLocation.of_site('kitt peak')
kpno_altaz = AltAz(obstime=utc_time, location=kpno)
coord_altaz = coord.transform_to(kpno_altaz)
airmass = coord_altaz.secz
elc_lat = coord.barycentrictrueecliptic.lat.deg
gal_lat = coord.galactic.l.deg # galactic latitude ( used for ISL contribution )
gal_lon = coord.galactic.b.deg # galactic longitude ( used for ISL contribution )
tai = utc_time.tai
# sun altitude (degrees)
sun = get_sun(utc_time)
sun_altaz = sun.transform_to(kpno_altaz)
sunalt = sun_altaz.alt.deg
# sun separation
sunsep = sun.separation(coord).deg
# used for scattered moonlight
moon = get_moon(utc_time)
moon_altaz = moon.transform_to(kpno_altaz)
moon_alt = moon_altaz.alt.deg
moon_sep = moon.separation(coord).deg #coord.separation(self.moon).deg
elongation = sun.separation(moon)
phase = np.arctan2(sun.distance * np.sin(elongation), moon.distance - sun.distance*np.cos(elongation))
moon_phase = phase.value
moon_ill = (1. + np.cos(phase))/2.
return Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep)
def _specsim_initialize(config, model='regression'):
''' hacked version of specsim.atmosphere.initialize, which initializes the
atmosphere model from configuration parameters.
'''
if specsim.config.is_string(config):
config = specsim.config.load_config(config)
atm_config = config.atmosphere
# Load tabulated data.
surface_brightness_dict = config.load_table(
atm_config.sky, 'surface_brightness', as_dict=True)
extinction_coefficient = config.load_table(
atm_config.extinction, 'extinction_coefficient')
# Initialize an optional atmospheric seeing PSF.
psf_config = getattr(atm_config, 'seeing', None)
if psf_config:
seeing = dict(
fwhm_ref=specsim.config.parse_quantity(psf_config.fwhm_ref),
wlen_ref=specsim.config.parse_quantity(psf_config.wlen_ref),
moffat_beta=float(psf_config.moffat_beta))
else:
seeing = None
# Initialize an optional lunar scattering model.
moon_config = getattr(atm_config, 'moon', None)
if moon_config:
moon_spectrum = config.load_table(moon_config, 'flux')
c = config.get_constants(moon_config,
['moon_zenith', 'separation_angle', 'moon_phase'])
moon = _Moon(
config.wavelength, moon_spectrum, extinction_coefficient,
atm_config.airmass, c['moon_zenith'], c['separation_angle'],
c['moon_phase'], model=model)
else:
moon = None
atmosphere = specsim.atmosphere.Atmosphere(
config.wavelength, surface_brightness_dict, extinction_coefficient,
atm_config.extinct_emission, atm_config.sky.condition,
atm_config.airmass, seeing, moon)
if config.verbose:
print(
"Atmosphere initialized with condition '{0}' from {1}."
.format(atmosphere.condition, atmosphere.condition_names))
if seeing:
print('Seeing is {0} at {1} with Moffat beta {2}.'
.format(seeing['fwhm_ref'], seeing['wlen_ref'],
seeing['moffat_beta']))
if moon:
print(
'Lunar V-band extinction coefficient is {0:.5f}.'
.format(moon.vband_extinction))
return atmosphere
class _Moon(Moon):
''' specimsim.atmosphere.Moon object hacked to work with a Krisciunas & Schaefer (1991)
model with extra free parameters
'''
def __init__(self, wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase,
model='regression'):
# initialize via super function
super().__init__(wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase)
self.model = model
# default KS coefficients
self.KS_CR = 10**5.36 # proportionality constant in the Rayleigh scattering function
# constants for the Mie scattering function term
self.KS_CM0 = 6.15
self.KS_CM1 = 40.
self.KS_M0 = -12.73
self.KS_M1 = 0.026
self.KS_M2 = 4.
def _update(self):
"""Update the model based on the current parameter values.
"""
self._update_required = False
# Calculate the V-band surface brightness of scattered moonlight.
if self.model == 'refit_ks':
self._scattered_V = krisciunas_schaefer_free(
self.obs_zenith, self.moon_zenith, self.separation_angle,
self.moon_phase, self.vband_extinction, self.KS_CR, self.KS_CM0,
self.KS_CM1, self.KS_M0, self.KS_M1, self.KS_M2)
elif self.model == 'regression':
self._scattered_V = _scattered_V_regression(
self.airmass,
0.5 * (np.cos(np.pi * self.moon_phase) + 1.),
90 - self.moon_zenith.value,
self.separation_angle.value) * u.mag / u.arcsec**2
else:
raise NotImplementedError
# Calculate the wavelength-dependent extinction of moonlight
# scattered once into the observed field of view.
scattering_airmass = (
1 - 0.96 * np.sin(self.moon_zenith) ** 2) ** (-0.5)
extinction = (
10 ** (-self._extinction_coefficient * scattering_airmass / 2.5) *
(1 - 10 ** (-self._extinction_coefficient * self.airmass / 2.5)))
self._surface_brightness = self._moon_spectrum * extinction
# Renormalized the extincted spectrum to the correct V-band magnitude.
raw_V = self._vband.get_ab_magnitude(
self._surface_brightness, self._wavelength) * u.mag
area = 1 * u.arcsec ** 2
self._surface_brightness *= 10 ** (
-(self._scattered_V * area - raw_V) / (2.5 * u.mag)) / area
@property
def KS_CR(self):
return self._KS_CR
@KS_CR.setter
def KS_CR(self, ks_cr):
self._KS_CR = ks_cr
self._update_required = True
@property
def KS_CM0(self):
return self._KS_CM0
@KS_CM0.setter
def KS_CM0(self, ks_cm0):
self._KS_CM0 = ks_cm0
self._update_required = True
@property
def KS_CM1(self):
return self._KS_CM1
@KS_CM1.setter
def KS_CM1(self, ks_cm1):
self._KS_CM1 = ks_cm1
self._update_required = True
@property
def KS_M0(self):
return self._KS_M0
@KS_M0.setter
def KS_M0(self, ks_m0):
self._KS_M0 = ks_m0
self._update_required = True
@property
def KS_M1(self):
return self._KS_M1
@KS_M1.setter
def KS_M1(self, ks_m1):
self._KS_M1 = ks_m1
self._update_required = True
@property
def KS_M2(self):
return self._KS_M2
@KS_M2.setter
def KS_M2(self, ks_m2):
self._KS_M2 = ks_m2
self._update_required = True
reg_model_coeffs = np.array([
0.00000000e+00, -1.24246947e-01, -2.19592318e-01, -1.27371956e-02,
4.16108739e-02, -8.96992463e-02, -6.74266151e-01, 2.67170371e-02,
-1.54258481e-02, -3.52318515e-01, -4.12007754e-03, 6.44355466e-02,
2.70616098e-04, -2.52914043e-04, -6.59789181e-04, -1.00704130e-01,
-1.17732794e+00, 1.00074153e-02, 2.02381309e-02, -1.03468867e+00,
7.06332796e-02, 1.80523919e-02, -8.04924203e-04, -8.78033445e-04,
-1.93926394e-04, -6.88153692e-01, -1.34713209e-01, 1.85076523e-03,
5.65520710e-05, -1.30331216e-05, -4.89722809e-04, 2.99858228e-06,
8.39852557e-06, 8.86494950e-06, 4.35592782e-06])
reg_model_intercept = 20.507688847655775
def _scattered_V_regression(airmass, moon_frac, moon_alt, moon_sep):
''' 4th degree polynomial regression fit to the V-band scattered moonlight
from BOSS and DESI CMX data.
'''
theta = np.atleast_2d(np.array([airmass, moon_frac, moon_alt, moon_sep]).T)
combs = chain.from_iterable(combinations_with_replacement(range(4), i)
for i in range(0, 4))
theta_transform = np.empty((theta.shape[0], len(reg_model_coeffs)))
for i, comb in enumerate(combs):
theta_transform[:, i] = theta[:, comb].prod(1)
return np.dot(theta_transform, reg_model_coeffs.T) + reg_model_intercept
def krisciunas_schaefer_free(obs_zenith, moon_zenith, separation_angle, moon_phase,
vband_extinction, C_R, C_M0, C_M1, M0, M1, M2):
"""Calculate the scattered moonlight surface brightness in V band.
Based on Krisciunas and Schaefer, "A model of the brightness of moonlight",
PASP, vol. 103, Sept. 1991, p. 1033-1039 (http://dx.doi.org/10.1086/132921).
Equation numbers in the code comments refer to this paper.
The function :func:`plot_lunar_brightness` provides a convenient way to
plot this model's predictions as a function of observation pointing.
Units are required for the angular inputs and the result has units of
surface brightness, for example:
>>> sb = krisciunas_schaefer(20*u.deg, 70*u.deg, 50*u.deg, 0.25, 0.15)
>>> print(np.round(sb, 3))
19.855 mag / arcsec2
The output is automatically broadcast over input arrays following the usual
numpy rules.
This method has several caveats but the authors find agreement with data at
the 8% - 23% level. See the paper for details.
Parameters
----------
obs_zenith : astropy.units.Quantity
Zenith angle of the observation in angular units.
moon_zenith : astropy.units.Quantity
Zenith angle of the moon in angular units.
separation_angle : astropy.units.Quantity
Opening angle between the observation and moon in angular units.
moon_phase : float
Phase of the moon from 0.0 (full) to 1.0 (new), which can be calculated
as abs((d / D) - 1) where d is the time since the last new moon
and D = 29.5 days is the period between new moons. The corresponding
illumination fraction is ``0.5*(1 + cos(pi * moon_phase))``.
vband_extinction : float
V-band extinction coefficient to use.
Returns
-------
astropy.units.Quantity
Observed V-band surface brightness of scattered moonlight.
"""
moon_phase = np.asarray(moon_phase)
if np.any((moon_phase < 0) | (moon_phase > 1)):
raise ValueError(
'Invalid moon phase {0}. Expected 0-1.'.format(moon_phase))
# Calculate the V-band magnitude of the moon (eqn. 9).
abs_alpha = 180. * moon_phase
#m = -12.73 + 0.026 * abs_alpha + 4e-9 * abs_alpha ** 4 (default value)
m = M0 + M1 * abs_alpha + M2 * 1e-9 * abs_alpha ** 4
# Calculate the illuminance of the moon outside the atmosphere in
# foot-candles (eqn. 8).
Istar = 10 ** (-0.4 * (m + 16.57))
# Calculate the scattering function (eqn.21).
rho = separation_angle.to(u.deg).value
f_scatter = (C_R * (1.06 + np.cos(separation_angle) ** 2) +
10 ** (C_M0 - rho / C_M1))
# Calculate the scattering airmass along the lines of sight to the
# observation and moon (eqn. 3).
X_obs = (1 - 0.96 * np.sin(obs_zenith) ** 2) ** (-0.5)
X_moon = (1 - 0.96 * np.sin(moon_zenith) ** 2) ** (-0.5)
# Calculate the V-band moon surface brightness in nanoLamberts.
B_moon = (f_scatter * Istar *
10 ** (-0.4 * vband_extinction * X_moon) *
(1 - 10 ** (-0.4 * (vband_extinction * X_obs))))
# Convert from nanoLamberts to to mag / arcsec**2 using eqn.19 of
# Garstang, "Model for Artificial Night-Sky Illumination",
# PASP, vol. 98, Mar. 1986, p. 364 (http://dx.doi.org/10.1086/131768)
return ((20.7233 - np.log(B_moon / 34.08)) / 0.92104 *
u.mag / (u.arcsec ** 2))
def _cI_twi(alpha, delta, airmass):
''' twilight contribution
:param alpha:
:param delta:
:param airmass:
:retrun wave:
:return twi:
'''
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
twi_coeffs = pickle.load(open(ftwi, 'rb'))
twi = (
twi_coeffs['t0'] * np.abs(alpha) + # CT2
twi_coeffs['t1'] * np.abs(alpha)**2 + # CT1
twi_coeffs['t2'] * np.abs(delta)**2 + # CT3
twi_coeffs['t3'] * np.abs(delta) # CT4
) * np.exp(-twi_coeffs['t4'] * airmass) + twi_coeffs['c0']
return twi_coeffs['wave'], np.array(twi)
def _twilight_coeffs():
''' save twilight coefficients from Parker
'''
f = os.path.join(UT.code_dir(), 'dat', 'sky', 'MoonResults.csv')
coeffs = pd.DataFrame.from_csv(f)
coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6']
# keep moon models
twi_coeffs = coeffs[coeffs['model'] == 'twilight']
coeffs = coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
twi = {}
twi['wave'] = np.array(coeffs['wl'])[wave_sort]
for k in ['t0', 't1', 't2', 't3', 't4', 'c0']:
twi[k] = np.array(twi_coeffs[k])[wave_sort]
# save to file
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
pickle.dump(twi, open(ftwi, 'wb'))
return None
##########################################################################
# contributions to parker's sky surface brightness model
##########################################################################
def _read_parkerCoeffs():
''' read the coefficients of parker's model
'''
f = ''.join([UT.code_dir(), 'dat/sky/MoonResults.csv'])
_coeffs = pd.DataFrame.from_csv(f)
_coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6'
]
# keep moon models
coeffs = _coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
for k in coeffs.keys():
coeffs[k] = np.array(coeffs[k])[wave_sort]
return coeffs
def _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g):
''' sky continuum (Fragelius thesis Eq. 4.23)
'''
# airmass contrib.
_Iairmass = coeffs['c_am'] * X
# zodiacal contrib. (func. of ecliptic latitude)
_Izodiacal = coeffs['c_zodi'] * _parker_Izodi(beta)
_Iisl = coeffs['c_isl'] * _parker_Iisl(l, b)
_Isolar_flux = coeffs['sol'] * _parker_Isf(mjd - coeffs['I'])
_Iseasonal = _parker_cI_seas(month_frac, coeffs)
_Ihourly = _parker_cI_hour(hour_frac, coeffs)
_dT = _parker_deltaT(X, coeffs)
# When the sun is above -20 altitude, some of its light will back-scatter
# off the atmosphere into the field of view. (Fragelius thesis Eq. 4.27)
_Itwilight = _parker_cI_twi_exp(alpha, delta, X, coeffs)
# light from the moon that is scattered into our field of view (Fragelius thesis Eq. 4.28, 4.29)
_Imoon = _parker_cI_moon_exp(altm, illm, delm, g, X, coeffs)
_Iadd_continuum = coeffs['c0']
# I_continuum(lambda)
Icont = (_Iairmass + _Izodiacal + _Iisl + _Isolar_flux + _Iseasonal + _Ihourly + _Iadd_continuum) * _dT + _Itwilight + _Imoon
return 10*coeffs['wl'], np.array(Icont)
def _parker_cI_moon_exp(altm, illm, deltam, g, airmass, coeffs):
''' light from the moon that is scattered into our field of view (Fragelius thesis Eq. 4.28, 4.29)
'''
Alambda = _parker_albedo(g, coeffs) # albedo factor
moon = (coeffs['m0'] * altm**2 +
coeffs['m1'] * altm +
coeffs['m2'] * illm**2 +
coeffs['m3'] * illm +
coeffs['m4'] * deltam**2 +
coeffs['m5'] * deltam
) * Alambda * np.exp(-coeffs['m6'] * airmass)
return moon
def _parker_albedo(g, coeffs):
''' albedo, i.e. reflectivity of the moon (Fragelius thesis Eq. 4.28)
g is the lunar phase (g = 0 for full moon and 180 for new moon)
'''
albedo_table = pd.read_csv(''.join([UT.code_dir(), 'dat/sky/albedo_constants.csv']),
delim_whitespace=True)
albedo_constants = {}
for col in list(albedo_table):
line = interp1d(albedo_table['WAVELENGTH'], albedo_table[col],
bounds_error=False, fill_value=0)
albedo_constants[col] = line
p1 = 4.06054
p2 = 12.8802
p3 = -30.5858
p4 = 16.7498
A = []
for i in range(4):
A.append(albedo_constants['a%d'%i](coeffs['wl'])*(g**i))
A.append(albedo_constants['d1'](coeffs['wl']) * np.exp(-g/p1))
A.append(albedo_constants['d2'](coeffs['wl']) * np.exp(-g/p2))
A.append(albedo_constants['d3'](coeffs['wl']) * np.cos((g - p3)/p4))
lnA = np.sum(A, axis=0)
Al = np.exp(lnA)
return Al
def _parker_cI_twi_exp(alpha, delta, airmass, coeffs):
''' When the sun is above -20 altitude, some of its light will back-scatter
off the atmosphere into the field of view. (Fragelius thesis Eq. 4.27)
no observations are made when sun is above -14 altitude.
'''
if alpha > -20.:
twi = (
coeffs['t0'] * np.abs(alpha) + # CT2
coeffs['t1'] * alpha**2 + # CT1
coeffs['t2'] * delta**2 + # CT3
coeffs['t3'] * delta # CT4
) * np.exp(-coeffs['t4'] * airmass)
else:
twi = np.zeros(len(coeffs['t0']))
return twi
def _parker_deltaT(airmass, coeffs):
'''effective transmission curve that accounts for the additional extinction
for observing at higher airmass (Fragelius thesis Eq. 4.24)
'''
zen_ext = np.loadtxt(''.join([UT.code_dir(), 'dat/sky/ZenithExtinction-KPNO.dat']))
zen_wave = zen_ext[:,0]/10.
ext = zen_ext[:,1]
zext = interp1d(zen_wave, ext, bounds_error=False, fill_value='extrapolate')
k = zext(coeffs['wl'])
return 1 - (10**(-0.4*k) - 10**(-0.4*k*airmass))
def _parker_cI_hour(hour_frac, coeffs):
''' Fragelius thesis Eq. 4.26
'''
levels = np.linspace(0,1,7)
idx = np.argmin(np.abs(levels - hour_frac))
_hours = np.zeros(6)
_hours[idx] = 1
for i in range(1,6):
if i == 1:
hours = coeffs['c'+str(i+1)] * _hours[i]
else:
hours += coeffs['c'+str(i+1)] * _hours[i]
return hours
def _parker_cI_seas(month_frac, coeffs):
# Fragelius thesis Eq. 4.25
mm = np.rint(month_frac)
if mm == 13: mm = 1
_months = | np.zeros(12) | numpy.zeros |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
import numpy as np
from copy import copy
from transformations import quaternion_from_matrix, quaternion_matrix, quaternion_multiply, quaternion_slerp
from .analytical_inverse_kinematics import AnalyticalLimbIK
from .numerical_ik_exp import NumericalInverseKinematicsExp
from .utils import normalize, project_on_intersection_circle, smooth_root_positions
from ..animation_data.skeleton_node import SkeletonEndSiteNode
from ..animation_data.utils import quaternion_from_vector_to_vector
from ..animation_data.motion_blending import create_transition_for_joints_using_slerp, BLEND_DIRECTION_FORWARD, BLEND_DIRECTION_BACKWARD, smooth_translation_in_quat_frames
from ..animation_data.skeleton_models import IK_CHAINS_DEFAULT_SKELETON
FOOT_STATE_GROUNDED = 0
FOOT_STATE_SWINGING = 1
def add_heels_to_skeleton(skeleton, left_foot, right_foot, left_heel, right_heel, offset=[0, -5, 0]):
left_heel_node = SkeletonEndSiteNode(left_heel, [], skeleton.nodes[left_foot])
left_heel_node.offset = np.array(offset)
skeleton.nodes[left_heel] = left_heel_node
skeleton.nodes[left_foot].children.append(left_heel_node)
right_heel_node = SkeletonEndSiteNode(right_heel, [], skeleton.nodes[right_foot])
right_heel_node.offset = np.array(offset)
skeleton.nodes[right_heel] = right_heel_node
skeleton.nodes[right_foot].children.append(right_heel_node)
return skeleton
def get_heel_offset(skeleton, foot_name, toe_name, frame):
""" calculate heel offset from foot assuming the "y" axis of the foot coordinate system is aligned to the ground
"""
m = skeleton.nodes[foot_name].get_global_matrix(frame)
foot_position = m[:3,3]
print("foot_position", foot_position)
toe_offset = skeleton.nodes[toe_name].offset
#if len(skeleton.nodes[toe_name].children) > 0:
# toe_offset += skeleton.nodes[toe_name].children[0].offset
up_vector = np.array(skeleton.skeleton_model["cos_map"][foot_name]["y"])
up_vector /= np.linalg.norm(up_vector)
#project toe offset on up vector that should be aligned with the ground
scale = np.dot(up_vector, toe_offset)
# get global position of toe aligned to ground
local_offset = scale*up_vector
local_offset = [local_offset[0],local_offset[1],local_offset[2], 1]
projected_toe_pos = np.dot(m, local_offset)[:3]
print("projected_toe_pos", projected_toe_pos)
# use offset from projected toe position to position to get the global heel position
toe_pos = skeleton.nodes[toe_name].get_global_position(frame)
heel_position = foot_position + (toe_pos - projected_toe_pos)
# bring into local coordinate system
heel_position = [heel_position[0],heel_position[1],heel_position[2], 1]
heel_offset = np.dot(np.linalg.inv(m), heel_position)[:3]
print("heel_offset", heel_offset)
print("toe_offset", toe_offset)
return heel_offset
def get_heel_offset2(skeleton, foot_name, toe_name, frame):
""" calculate heel offset from foot assuming the "y" axis of the foot coordinate system is aligned to the ground
"""
m = skeleton.nodes[foot_name].get_global_matrix(frame)
foot_position = m[:3,3]
print("foot_position", foot_position)
toe_offset = skeleton.nodes[toe_name].offset
#if len(skeleton.nodes[toe_name].children) > 0:
# toe_offset += skeleton.nodes[toe_name].children[0].offset
up_vector = np.array(skeleton.skeleton_model["cos_map"][foot_name]["y"])
up_vector /= np.linalg.norm(up_vector)
x_vector = np.array(skeleton.skeleton_model["cos_map"][foot_name]["x"])
x_vector /= np.linalg.norm(x_vector)
z_vector = np.cross(up_vector, x_vector)
z_vector /= np.linalg.norm(z_vector)
#project toe offset on up vector that should be aligned with the ground
scale = np.dot(z_vector, toe_offset)
heel_offset = scale*z_vector
# bring into local coordinate system
print("heel_offse2", heel_offset)
print("toe_offset", toe_offset)
return heel_offset
def add_temporary_heels_to_skeleton(skeleton, left_foot, right_foot, left_toe, right_toe, left_heel, right_heel):
left_heel_node = SkeletonEndSiteNode(left_heel, [], skeleton.nodes[left_foot])
left_heel_node.offset = get_heel_offset2(skeleton, left_foot, left_toe, skeleton.reference_frame)
skeleton.nodes[left_heel] = left_heel_node
skeleton.nodes[left_foot].children.append(left_heel_node)
skeleton.skeleton_model["joints"]["left_heel"] = left_heel
right_heel_node = SkeletonEndSiteNode(right_heel, [], skeleton.nodes[right_foot])
right_heel_node.offset = get_heel_offset2(skeleton, right_foot, right_toe, skeleton.reference_frame)
skeleton.nodes[right_heel] = right_heel_node
skeleton.nodes[right_foot].children.append(right_heel_node)
skeleton.skeleton_model["joints"]["right_heel"] = right_heel
return skeleton
def create_grounding_constraint_from_frame(skeleton, frames, frame_idx, joint_name):
position = skeleton.nodes[joint_name].get_global_position(frames[frame_idx])
m = skeleton.nodes[joint_name].get_global_matrix(frames[frame_idx])
m[:3, 3] = [0, 0, 0]
orientation = normalize(quaternion_from_matrix(m))
return MotionGroundingConstraint(frame_idx, joint_name, position, None, orientation)
def generate_ankle_constraint_from_toe(skeleton, frames, frame_idx, ankle_joint_name, heel_joint, toe_joint_name, target_ground_height, toe_pos=None):
""" create a constraint on the ankle position based on the toe constraint position"""
#print "add toe constraint"
if toe_pos is None:
ct = skeleton.nodes[toe_joint_name].get_global_position(frames[frame_idx])
ct[1] = target_ground_height # set toe constraint on the ground
else:
ct = toe_pos
a = skeleton.nodes[ankle_joint_name].get_global_position(frames[frame_idx])
t = skeleton.nodes[toe_joint_name].get_global_position(frames[frame_idx])
target_toe_offset = a - t # difference between unmodified toe and ankle at the frame
ca = ct + target_toe_offset # move ankle so toe is on the ground
m = skeleton.nodes[heel_joint].get_global_matrix(frames[frame_idx])
m[:3, 3] = [0, 0, 0]
oq = quaternion_from_matrix(m)
oq = normalize(oq)
return MotionGroundingConstraint(frame_idx, ankle_joint_name, ca, None, oq)
def create_ankle_constraint_from_toe_and_heel(skeleton, frames, frame_idx, ankle_joint, heel_joint, toe_joint,heel_offset, target_ground_height, heel_pos=None, toe_pos=None, is_swinging=False):
if toe_pos is None:
ct = skeleton.nodes[toe_joint].get_global_position(frames[frame_idx])
ct[1] = target_ground_height
else:
ct = toe_pos
if heel_pos is None:
ch = skeleton.nodes[heel_joint].get_global_position(frames[frame_idx])
ch[1] = target_ground_height
else:
ch = heel_pos
target_direction = normalize(ct - ch)
t = skeleton.nodes[toe_joint].get_global_position(frames[frame_idx])
h = skeleton.nodes[heel_joint].get_global_position(frames[frame_idx])
original_direction = normalize(t - h)
global_delta_q = quaternion_from_vector_to_vector(original_direction, target_direction)
global_delta_q = normalize(global_delta_q)
m = skeleton.nodes[heel_joint].get_global_matrix(frames[frame_idx])
m[:3, 3] = [0, 0, 0]
oq = quaternion_from_matrix(m)
oq = normalize(oq)
orientation = normalize(quaternion_multiply(global_delta_q, oq))
# set target ankle position based on the grounded heel and the global target orientation of the ankle
m = quaternion_matrix(orientation)[:3, :3]
target_heel_offset = np.dot(m, heel_offset)
ca = ch - target_heel_offset
print("set ankle constraint both", ch, ca, target_heel_offset, target_ground_height)
foot_state = FOOT_STATE_GROUNDED
if is_swinging:
foot_state = FOOT_STATE_SWINGING
return MotionGroundingConstraint(frame_idx, ankle_joint, ca, None, orientation, foot_state)
def interpolate_constraints(c1, c2):
p = (c1.position + c2.position)/2
o = quaternion_slerp(c1.orientation, c2.orientation, 0.5)
o = normalize(o)
return MotionGroundingConstraint(c1.frame_idx, c1.joint_name, p, None, o)
class MotionGroundingConstraint(object):
def __init__(self, frame_idx, joint_name, position, direction=None, orientation=None, foot_state=FOOT_STATE_GROUNDED):
self.frame_idx = frame_idx
self.joint_name = joint_name
self.position = position
self.direction = direction
self.orientation = orientation
self.toe_position = None
self.heel_position = None
self.global_toe_offset = None
self.foot_state = foot_state
def evaluate(self, skeleton, q_frame):
d = self.position - skeleton.nodes[self.joint_name].get_global_position(q_frame)
return np.dot(d, d)
class IKConstraintSet(object):
def __init__(self, frame_range, joint_names, positions):
self.frame_range = frame_range
self.joint_names = joint_names
self.constraints = []
for idx in range(frame_range[0], frame_range[1]):
for idx, joint_name in enumerate(joint_names):
c = MotionGroundingConstraint(idx, joint_name, positions[idx], None)
self.constraints.append(c)
def add_constraint(self, c):
self.constraints.append(c)
def evaluate(self, skeleton, q_frame):
error = 0
for c in self.constraints:
d = c.position - skeleton.nodes[c.joint_name].get_global_position(q_frame)
error += np.dot(d, d)
return error
def add_fixed_dofs_to_frame(skeleton, frame):
o = 3
full_frame = frame[:3].tolist()
for key, node in list(skeleton.nodes.items()):
if len(node.children) == 0:
continue
if not node.fixed:
full_frame += frame[o:o+4].tolist()
o += 4
else:
full_frame += node.rotation.tolist()
return full_frame
def extract_ik_chains(skeleton, damp_angle, damp_factor):
joints_map = skeleton.skeleton_model["joints"]
cos_map = skeleton.skeleton_model["cos_map"]
new_ik_chains = dict()
for j in IK_CHAINS_DEFAULT_SKELETON:
mapped_j = joints_map[j]
root_joint = IK_CHAINS_DEFAULT_SKELETON[j]["root"]
free_joint = IK_CHAINS_DEFAULT_SKELETON[j]["joint"]
if root_joint in joints_map and free_joint in joints_map:
mapped_free_joint = joints_map[free_joint]
if mapped_free_joint in cos_map:
data = copy(IK_CHAINS_DEFAULT_SKELETON[j])
data["root"] = joints_map[root_joint]
data["joint"] = mapped_free_joint
data["joint_axis"] = cos_map[mapped_free_joint]["x"]
data["end_effector_dir"] = cos_map[mapped_free_joint]["y"]
new_ik_chains[mapped_j] = AnalyticalLimbIK.init_from_dict(skeleton, mapped_j, data, damp_angle=damp_angle, damp_factor=damp_factor)
return new_ik_chains
class MotionGrounding(object):
def __init__(self, skeleton, ik_settings, skeleton_model, use_analytical_ik=True, damp_angle=None, damp_factor=None):
self.skeleton = skeleton
self._ik = NumericalInverseKinematicsExp(skeleton, ik_settings)
self._constraints = collections.OrderedDict()
self.transition_window = 10
self.root_smoothing_window = 20
self.translation_blend_window = 40
self._blend_ranges = collections.OrderedDict()
self.use_analytical_ik = use_analytical_ik
self.skeleton_model = skeleton_model
self.damp_angle = damp_angle
self.damp_factor = damp_factor
if "joints" in skeleton_model and "left_toe" in skeleton_model["joints"] and "right_toe" in skeleton_model["joints"]:
joints_map = skeleton_model["joints"]
self.ik_chains = extract_ik_chains(skeleton, self.damp_angle, self.damp_factor)
add_temporary_heels_to_skeleton(skeleton, joints_map["left_ankle"], joints_map["right_ankle"], joints_map["left_toe"], joints_map["right_toe"], "left_heel", "right_heel")
self.initialized = True
else:
self.ik_chains = dict()
self.initialized = False
def set_constraints(self, constraints):
self._constraints = constraints
def add_constraint(self, joint_name, frame_range, position, direction=None):
for frame_idx in range(*frame_range):
c = MotionGroundingConstraint(frame_idx, joint_name, position, direction)
if frame_idx not in list(self._constraints.keys()):
self._constraints[frame_idx] = []
self._constraints[frame_idx].append(c)
def add_blend_range(self, joint_names, frame_range):
if frame_range not in list(self._constraints.keys()):
self._blend_ranges[frame_range] = []
for j in joint_names:
self._blend_ranges[frame_range].append(j)
def clear_constraints(self):
self._constraints = collections.OrderedDict()
def clear_blend_ranges(self):
self._blend_ranges = collections.OrderedDict()
def clear(self):
self.clear_constraints()
self.clear_blend_ranges()
def run(self, motion_vector, scene_interface=None):
new_frames = motion_vector.frames[:]
if scene_interface is not None:
self.shift_root_to_ground(new_frames, scene_interface)
self.shift_root_to_reach_constraints(new_frames)
if len(new_frames) > 1:
self.blend_at_transitions(new_frames)
if self.use_analytical_ik:
self.apply_analytical_ik(new_frames)
else:
self.apply_ik_constraints(new_frames)
if len(new_frames) > 1:
self.blend_at_transitions(new_frames)
return new_frames
def apply_on_frame(self, frame, scene_interface):
x = frame[0]
z = frame[2]
target_ground_height = scene_interface.get_height(x, z)
shift = target_ground_height - frame[1]
frame[1] += shift
#self.apply_analytical_ik_on_frame(frame, constraints)
return frame
def _blend_around_frame_range(self, frames, start, end, joint_names):
for joint_name in joint_names:
transition_start = max(start - self.transition_window, 0)
transition_end = min(end + self.transition_window, frames.shape[0]-1) - 1
forward_steps = start - transition_start
backward_steps = transition_end - end
if joint_name == self.skeleton.root:
if start > 0:
frames = smooth_translation_in_quat_frames(frames, start, self.translation_blend_window)
temp_frame = min(end + 1, frames.shape[0]-1)
frames = smooth_translation_in_quat_frames(frames, temp_frame, self.translation_blend_window)
idx = self._ik.skeleton.animated_joints.index(joint_name)*4+3
joint_parameter_indices = [idx, idx+1, idx+2, idx+3]
if start > 0:
create_transition_for_joints_using_slerp(frames, joint_parameter_indices, transition_start, start, forward_steps, BLEND_DIRECTION_FORWARD)
create_transition_for_joints_using_slerp(frames, joint_parameter_indices, end, transition_end, backward_steps, BLEND_DIRECTION_BACKWARD)
def apply_ik_constraints(self, frames):
for frame_idx, constraints in self._constraints.items():
if 0 <= frame_idx < len(frames):
frames[frame_idx] = self._ik.modify_frame(frames[frame_idx], constraints)
def shift_root_to_reach_constraints(self, frames):
root_positions = self.generate_root_positions_from_foot_constraints(frames)
root_positions = smooth_root_positions(root_positions, self.root_smoothing_window)
self.apply_root_constraints(frames, root_positions)
def generate_root_positions_from_foot_constraints(self, frames):
root_constraints = []
for frame_idx, constraints in self._constraints.items():
if 0 <= frame_idx < len(frames):
grounding_constraints = [c for c in constraints if c.foot_state==FOOT_STATE_GROUNDED]
n_constraints = len(grounding_constraints)
p = None
if n_constraints == 1:
p = self.generate_root_constraint_for_one_foot(frames[frame_idx], grounding_constraints[0])
elif n_constraints > 1:
p = self.generate_root_constraint_for_two_feet(frames[frame_idx], grounding_constraints[0], grounding_constraints[1])
if p is None:
p = frames[frame_idx, :3]
root_constraints.append(p)
return np.array(root_constraints)
def apply_root_constraints(self, frames, constraints):
for frame_idx, p in enumerate(constraints):
if p is not None:
frames[frame_idx][:3] = p
def generate_root_constraint_for_one_foot(self, frame, c):
pelvis = self.skeleton.skeleton_model["joints"]["pelvis"]
pelvis_pos = self.skeleton.nodes[pelvis].get_global_position(frame)
target_length = | np.linalg.norm(c.position - pelvis_pos) | numpy.linalg.norm |
from __future__ import print_function
from builtins import next
from builtins import str
from builtins import object
import re
import numpy as nm
import os.path as osp
import pyfits as pf
def scarray(li, scal=False):
if len(li) == 1 and scal:
return li[0]
else:
if type(li[0]) != str:
return nm.array(li)
return li
def read_array(fname, dirname):
fname = lookupfile(fname, dirname)
try:
pfits = pf.open(fname)
ii = 0
while pfits[ii].data == None:
ii += 1
return pfits[ii].data
except Exception:
return | nm.loadtxt(fname) | numpy.loadtxt |
import argparse
from pathlib import Path
import networkx as nx
import nxmetis
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from torch_geometric.data import Data, DataLoader, Batch
from torch_geometric.nn import SAGEConv, GATConv, GlobalAttention, graclus, avg_pool, global_mean_pool
from torch_geometric.utils import to_networkx, k_hop_subgraph, degree
import numpy as np
from numpy import random
import scipy
from scipy.sparse import coo_matrix
from scipy.io import mmread
from scipy.spatial import Delaunay
#import random_p
import copy
import math
import timeit
import os
from itertools import combinations
import ctypes
libscotch = ctypes.cdll.LoadLibrary('scotch/build/libSCOTCHWrapper.so')
# Networkx geometric Delaunay mesh with n random points in the unit square
def graph_delaunay_from_points(points):
mesh = Delaunay(points, qhull_options="QJ")
mesh_simp = mesh.simplices
edges = []
for i in range(len(mesh_simp)):
edges += combinations(mesh_simp[i], 2)
e = list(set(edges))
return nx.Graph(e)
# Pytorch geometric Delaunay mesh with n random points in the unit square
def random_delaunay_graph(n):
points = np.random.random_sample((n, 2))
g = graph_delaunay_from_points(points)
adj_sparse = nx.to_scipy_sparse_matrix(g, format='coo')
row = adj_sparse.row
col = adj_sparse.col
one_hot = []
for i in range(g.number_of_nodes()):
one_hot.append([1., 0.])
edges = torch.tensor([row, col], dtype=torch.long)
nodes = torch.tensor(np.array(one_hot), dtype=torch.float)
graph_torch = Data(x=nodes, edge_index=edges)
return graph_torch
# Build a pytorch geometric graph with features [1,0] form a networkx graph
def torch_from_graph(g):
adj_sparse = nx.to_scipy_sparse_matrix(g, format='coo')
row = adj_sparse.row
col = adj_sparse.col
one_hot = []
for i in range(g.number_of_nodes()):
one_hot.append([1., 0.])
edges = torch.tensor([row, col], dtype=torch.long)
nodes = torch.tensor(np.array(one_hot), dtype=torch.float)
graph_torch = Data(x=nodes, edge_index=edges)
degs = np.sum(adj_sparse.todense(), axis=0)
first_vertices = np.where(degs == np.min(degs))[0]
first_vertex = | np.random.choice(first_vertices) | numpy.random.choice |
# coding: utf-8
"""
チョコボールの検出
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import io
import os
import glob
import numpy as np
from PIL import Image
from io import BytesIO
import random
from chainercv.visualizations import vis_bbox
from chainercv.links import FasterRCNNVGG16
class ChocoballDetector:
class_file = 'data/classes.txt'
pretrain_model = 'model/snapshot_model.npz'
MAX_WIDTH = 500
def __init__(self):
self.getClasses()
self.setModel()
def getClasses(self):
classes = list()
with open(self.class_file) as fd:
for one_line in fd.readlines():
cl = one_line.split('\n')[0]
classes.append(cl)
self.classes = classes
return classes
def setModel(self):
self.model_frcnn = FasterRCNNVGG16(n_fg_class=len(self.classes),
pretrained_model=self.pretrain_model)
return 0
def detectChocoball(self, img):
"""
ChocoBallの個数をカウントする
Args:
img : jpeg image (binary)
Returns:
dict{'box':[[ymin,xmin,ymax,xmax]], 'objects':[object_id], 'scores':[score]}
"""
#img_pil = Image.open(BytesIO(img))
img_pil = Image.open(img)
img_arr = | np.asarray(img_pil) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 14:31:17 2015
@author: <NAME>.
Description:
This script does CPU and GPU matrix element time complexity
profiling. It has a function which applies the matrix element
analysis for a given set of parameters, profiles the code and
plots the time complexity results (with fit) and plots the matrix
elements from each case.
"""
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from my_timer import timer
from math import log
from scipy.optimize import curve_fit
def f_MEplaceholder(neval, mode):
# Placeholder integration instead of ME calc
result, error = (sp.integrate.quad(lambda x:
sp.special.jv(2.5, x), 0, neval) if mode == 'gpu'
else sp.integrate.quadrature(lambda x:
sp.special.jv(2.5, x), 0, neval))
return result, error
def flinear(N, mode):
"""
O(n) function
"""
y = np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
return y ,1
def fsquare(N, mode):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
y = i*j
return y,1
def algoAnalysis(fn, nMin, nMax, mode):
"""
Run timer and plot time complexity
"""
n = []
time_result = []
y_result = []
y_err = []
for i in [j*32 for j in range(nMin,nMax+1)]:
with timer() as t:
temp_result, temp_err = fn(i, mode)
time_result.append(t.msecs)
y_result.append(temp_result)
y_err.append(temp_err)
n.append(i)
return n, time_result, y_result, y_err
def plotAll(n, time_data, y_data, err_data):
n = np.asarray(n)
time_data = np.asarray(time_data)
y_data = np.asarray(y_data)
err_data = | np.asarray(err_data) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tqdm
import torch
import pickle
import resource
import numpy as np
import matplotlib.pyplot as plt
from args import parse_args
from modelSummary import model_dict
from pytorchtools import load_from_file
from torch.utils.data import DataLoader
from helperfunctions import mypause, stackall_Dict
from loss import get_seg2ptLoss
from utils import get_nparams, get_predictions
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1]))
#%%
if __name__ == '__main__':
args = parse_args()
device=torch.device("cuda")
torch.cuda.manual_seed(12)
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
args.useMultiGPU = False
torch.backends.cudnn.deterministic=False
if args.model not in model_dict:
print("Model not found.")
print("valid models are: {}".format(list(model_dict.keys())))
exit(1)
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2op = os.path.join(os.getcwd(), 'op', str(args.curObj))
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
os.makedirs(path2op, exist_ok=True)
model = model_dict[args.model]
netDict = load_from_file([args.loadfile,
os.path.join(path2checkpoint, 'checkpoint.pt')])
startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0
if 'state_dict' in netDict.keys():
model.load_state_dict(netDict['state_dict'])
print('Parameters: {}'.format(get_nparams(model)))
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec)
f = open(os.path.join('curObjects',
'baseline',
'cond_'+str(args.curObj)+'.pkl'), 'rb')
_, _, testObj = pickle.load(f)
testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All')
testObj.augFlag = False
testloader = DataLoader(testObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=False)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
accLoss = 0.0
imCounter = 0
ious = []
dists_pupil_latent = []
dists_pupil_seg = []
dists_iris_latent = []
dists_iris_seg = []
model.eval()
opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [],
'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]},
'pred':{'pup_latent_c':[],
'pup_seg_c':[],
'iri_latent_c':[],
'iri_seg_c':[],
'mask':[]},
'gt':{'pup_c':[], 'mask':[]}}
with torch.no_grad():
for bt, batchdata in enumerate(tqdm.tqdm(testloader)):
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
out_tup = model(img.to(device).to(args.prec),
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long),
0.5)
output, elOut, latent, loss = out_tup
latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy()
latent_iris_center = elOut[:, 5:7].detach().cpu().numpy()
_, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4)
_, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4)
loss = loss if args.useMultiGPU else loss.mean()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
iou, iou_bySample = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1:]
latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
latent_pupil_center,
cond[:,0].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
seg_pupil_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
latent_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
seg_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
dists_pupil_latent.append(latent_pupil_dist)
dists_iris_latent.append(latent_iris_dist)
dists_pupil_seg.append(seg_pupil_dist)
dists_iris_seg.append(seg_iris_dist)
ious.append(iou)
pup_latent_c = unnormPts(latent_pupil_center,
img.shape[2:])
pup_seg_c = unnormPts(seg_pupil_center,
img.shape[2:])
iri_latent_c = unnormPts(latent_iris_center,
img.shape[2:])
iri_seg_c = unnormPts(seg_iris_center,
img.shape[2:])
dispI = generateImageGrid(img.numpy().squeeze(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_seg_c,
cond.numpy(),
override=True,
heatmaps=False)
for i in range(0, img.shape[0]):
archNum = testObj.imList[imCounter, 1]
opDict['id'].append(testObj.imList[imCounter, 0])
opDict['code'].append(latent[i,...].detach().cpu().numpy())
opDict['archNum'].append(archNum)
opDict['archName'].append(testObj.arch[archNum])
opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :])
opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :])
opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :])
opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :])
if args.test_save_op_masks:
opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8))
opDict['scores']['iou'].append(iou_bySample[i, ...])
opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...])
opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...])
opDict['gt']['pup_c'].append(pupil_center[i,...].numpy())
if args.test_save_op_masks:
opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8))
imCounter+=1
if args.disp:
if bt == 0:
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
opDict = stackall_Dict(opDict)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious))
print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent),
| np.nanstd(dists_pupil_latent) | numpy.nanstd |
import numpy as np
from sklearn.cluster import DBSCAN
from faster_particles.ppn_utils import crop as crop_util
from faster_particles.display_utils import extract_voxels
class CroppingAlgorithm(object):
"""
Base class for any cropping algorithm, they should inherit from it
and implement crop method (see below)
"""
def __init__(self, cfg, debug=False):
self.cfg = cfg
self.d = cfg.SLICE_SIZE # Patch or box/crop size
self.a = cfg.CORE_SIZE # Core size
self.N = cfg.IMAGE_SIZE
self._debug = debug
def crop(self, coords):
"""
coords is expected to be dimensions (None, 3) = list of non-zero voxels
Returns a list of patches centers and sizes (of cubes centered at the
patch centers)
"""
pass
def process(self, original_blob):
# FIXME cfg.SLICE_SIZE vs patch_size
patch_centers, patch_sizes = self.crop(original_blob['voxels'])
return self.extract(patch_centers, patch_sizes, original_blob)
def extract(self, patch_centers, patch_sizes, original_blob):
batch_blobs = []
for i in range(len(patch_centers)):
patch_center, patch_size = patch_centers[i], patch_sizes[i]
blob = {}
# Flip patch_center coordinates
# because gt_pixels coordinates are reversed
# FIXME here or before blob['data'] ??
patch_center = np.flipud(patch_center)
blob['data'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['data'], return_labels=False)
patch_center = patch_center.astype(int)
# print(patch_center, original_blob['data'][0, patch_center[0], patch_center[1], patch_center[2], 0], np.count_nonzero(blob['data']))
# assert np.count_nonzero(blob['data']) > 0
if 'labels' in original_blob:
blob['labels'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['labels'][..., np.newaxis], return_labels=False)
blob['labels'] = blob['labels'][..., 0]
# print(np.nonzero(blob['data']))
# print(np.nonzero(blob['labels']))
# assert np.array_equal(np.nonzero(blob['data']), np.nonzero(blob['labels']))
if 'weight' in original_blob:
blob['weight'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['weight'][..., np.newaxis], return_labels=False)
blob['weight'][blob['weight'] == 0.0] = 0.1
blob['weight'] = blob['weight'][..., 0]
# Select gt pixels
if 'gt_pixels' in original_blob:
indices = np.where(np.all(np.logical_and(
original_blob['gt_pixels'][:, :-1] >= patch_center - patch_size/2.0,
original_blob['gt_pixels'][:, :-1] < patch_center + patch_size/2.0), axis=1))
blob['gt_pixels'] = original_blob['gt_pixels'][indices]
blob['gt_pixels'][:, :-1] = blob['gt_pixels'][:, :-1] - (patch_center - patch_size / 2.0)
# Add artificial gt pixels
artificial_gt_pixels = self.add_gt_pixels(original_blob, blob, patch_center, self.cfg.SLICE_SIZE)
if artificial_gt_pixels.shape[0]:
blob['gt_pixels'] = np.concatenate([blob['gt_pixels'], artificial_gt_pixels], axis=0)
# Select voxels
# Flip patch_center coordinates back to normal
patch_center = np.flipud(patch_center)
if 'voxels' in original_blob:
voxels = original_blob['voxels']
blob['voxels'] = voxels[np.all(np.logical_and(
voxels >= patch_center - patch_size / 2.0,
voxels < patch_center + patch_size / 2.0), axis=1)]
blob['voxels'] = blob['voxels'] - (patch_center - patch_size / 2.0)
blob['entries'] = original_blob['entries']
# Crops for small UResNet
if self.cfg.NET == 'small_uresnet':
blob['crops'], blob['crops_labels'] = crop_util(
blob['gt_pixels'][:, :-1],
self.cfg.CROP_SIZE, blob['data'])
# FIXME FIXME FIXME
# Make sure there is at least one ground truth pixel in the patch (for training)
if self.cfg.NET not in ['ppn', 'ppn_ext', 'full'] or len(blob['gt_pixels']) > 0:
batch_blobs.append(blob)
return batch_blobs, patch_centers, patch_sizes
def compute_overlap(self, coords, patch_centers, sizes=None):
"""
Compute overlap dict: dict[x] gives the number of voxels which belong
to x patches.
"""
if sizes is None:
sizes = self.d/2.0
overlap = []
for voxel in coords:
overlap.append(np.sum(np.all(np.logical_and(
patch_centers-sizes <= voxel,
patch_centers + sizes >= voxel
), axis=1)))
return dict(zip(*np.unique(overlap, return_counts=True)))
def add_gt_pixels(self, original_blob, blob, patch_center, patch_size):
"""
Add artificial pixels after cropping
"""
# Case 1: crop boundaries is intersecting with data
nonzero_idx = np.array(np.where(blob['data'][0, ..., 0] > 0.0)).T # N x 3
border_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx == 0, nonzero_idx == self.cfg.IMAGE_SIZE - 1), axis=1)]
# Case 2: crop is partially outside of original data (thus padded)
# if patch_center is within patch_size of boundaries of original blob
# boundary intesecting with data
padded_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx + patch_center - patch_size / 2.0 >= self.cfg.IMAGE_SIZE - 2, nonzero_idx + patch_center - patch_size / 2.0 <= 1), axis=1)]
# dbscan on all found voxels from case 1 and 2
coords = np.concatenate([border_idx, padded_idx], axis=0)
artificial_gt_pixels = []
if coords.shape[0]:
db = DBSCAN(eps=10, min_samples=3).fit_predict(coords)
for v in np.unique(db):
cluster = coords[db == v]
artificial_gt_pixels.append(cluster[np.argmax(blob['data'][0, ..., 0][cluster.T[0], cluster.T[1], cluster.T[2]]), :])
artificial_gt_pixels = np.concatenate([artificial_gt_pixels, np.ones((len(artificial_gt_pixels), 1))], axis=1)
return np.array(artificial_gt_pixels)
def reconcile(self, batch_results, patch_centers, patch_sizes):
"""
Reconcile slices result together
using batch_results, batch_blobs, patch_centers and patch_sizes
"""
final_results = {}
if len(batch_results) == 0: # Empty batch
return final_results
# UResNet predictions
if 'predictions' and 'scores' and 'softmax' in batch_results[0]:
final_voxels = np.array([], dtype=np.int32).reshape(0, 3) # Shape N_voxels x dim
final_scores = np.array([], dtype=np.float32).reshape(0, self.cfg.NUM_CLASSES) # Shape N_voxels x num_classes
final_counts = np.array([], dtype=np.int32).reshape(0,) # Shape N_voxels x 1
for i, result in enumerate(batch_results):
# Extract voxel and voxel values
# Shape N_voxels x dim
v, values = extract_voxels(result['predictions'])
# Extract corresponding softmax scores
# Shape N_voxels x num_classes
scores = result['softmax'][v[:, 0], v[:, 1], v[:, 2], :]
# Restore original blob coordinates
v = (v + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0).astype(np.int64)
v = np.clip(v, 0, self.cfg.IMAGE_SIZE-1)
# indices are indices of the *first* occurrences of the unique values
# hence for doublons they are indices in final_voxels
# We assume the only overlap that can occur is between
# final_voxels and v, not inside these arrays themselves
n = final_voxels.shape[0]
final_voxels, indices, counts = np.unique(np.concatenate([final_voxels, v], axis=0), axis=0, return_index=True, return_counts=True)
final_scores = np.concatenate([final_scores, scores], axis=0)[indices]
lower_indices = indices[indices < n]
upper_indices = indices[indices >= n]
final_counts[lower_indices] += counts[lower_indices] - 1
final_counts = np.concatenate([final_counts, np.ones((upper_indices.shape[0],))], axis=0)
final_scores = final_scores / final_counts[:, np.newaxis] # Compute average
final_predictions = np.argmax(final_scores, axis=1)
final_results['predictions'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)
final_results['predictions'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_predictions
final_results['scores'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)
final_results['scores'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_scores[np.arange(final_scores.shape[0]), final_predictions]
final_results['softmax'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))
final_results['softmax'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2], :] = final_scores
final_results['predictions'] = final_results['predictions'][np.newaxis, ...]
# PPN
if 'im_proposals' and 'im_scores' and 'im_labels' and 'rois' in batch_results[0]:
# print(batch_results[0]['im_proposals'].shape, batch_results[0]['im_scores'].shape, batch_results[0]['im_labels'].shape, batch_results[0]['rois'].shape)
final_im_proposals = np.array([], dtype=np.float32).reshape(0, 3)
final_im_scores = np.array([], dtype=np.float32).reshape(0,)
final_im_labels = np.array([], dtype=np.int32).reshape(0,)
final_rois = np.array([], dtype=np.float32).reshape(0, 3)
for i, result in enumerate(batch_results):
im_proposals = result['im_proposals'] + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0
im_proposals = np.clip(im_proposals, 0, self.cfg.IMAGE_SIZE-1)
# print(final_im_proposals, im_proposals)
final_im_proposals = np.concatenate([final_im_proposals, im_proposals], axis=0)
final_im_scores = | np.concatenate([final_im_scores, result['im_scores']], axis=0) | numpy.concatenate |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common.functions import softmax, cross_entropy_error
from common.gradient import numerical_gradient
class simpleNet:
def __init__(self):
self.W = np.random.randn(2,3)
def predict(self, x):
return | np.dot(x, self.W) | numpy.dot |
#!/usr/bin/env python3
from timeit import default_timer
import numpy as np
from ortools.linear_solver import pywraplp
import json
import argparse
from topology_parser import get_topology_matrix
parser = argparse.ArgumentParser(description="create transfer plan.")
parser.add_argument("mode", type=str, help="scatter, gather or all2all")
parser.add_argument("main_gpu", type=int, help="source for scatter or target for gather")
args=parser.parse_args()
modes = {"scatter":0, "gather":1, "all2all":2}
if args.mode in modes.keys():
mode = args.mode
else:
print("invalid mode")
parser.print_help()
raise SystemExit
main_gpu = args.main_gpu
capacities = get_topology_matrix()
bisection_width = 0
# dgx1 volta: 6 nvlink per gpu
# capacities = get_topology_matrix("dgx1_topology.txt")
# bisection_width = 6
# ps0001 pascal: 4 nvlink per gpu
# capacities = get_topology_matrix("ps0001_topology.txt")
# bisection_width = 4
# ps0001 pascal: 4 nvlink per gpu
# num_gpus = 4
# capacities = np.eye(num_gpus) * num_gpus
# capacities += np.array([[0,2,1,1],
# [2,0,1,1],
# [1,1,0,2],
# [1,1,2,0]])
# bisection_width = 4
# like ps0001 but volta: 6 nvlink per gpu
# num_gpus = 4
# capacities = np.eye(num_gpus) * num_gpus
# capacities += np.array([[0,2,2,2],
# [2,0,2,2],
# [2,2,0,2],
# [2,2,2,0]])
# bisection_width = 6
# half of dgx1 volta: 6 nvlink per gpu
# num_gpus = 4
# capacities = np.eye(num_gpus) * num_gpus
# capacities += np.array([[0,1,1,2],
# [1,0,2,1],
# [1,2,0,2],
# [2,1,2,0]])
# bisection_width = 5
# dgx1 volta: 6 nvlink per gpu
# num_gpus = 8
# capacities = np.eye(num_gpus) * num_gpus
# capacities += np.array([[0,1,1,2,2,0,0,0],
# [1,0,2,1,0,2,0,0],
# [1,2,0,2,0,0,1,0],
# [2,1,2,0,0,0,0,1],
# [2,0,0,0,0,1,1,2],
# [0,2,0,0,1,0,2,1],
# [0,0,1,0,1,2,0,2],
# [0,0,0,1,2,1,2,0]])
# bisection_width = 6
# like dgx1 volta: 6 nvlink per gpu, different ring structure
# num_gpus = 8
# capacities = np.eye(num_gpus) * num_gpus
# capacities += np.array([[0,2,1,1,2,0,0,0],
# [2,0,1,1,0,2,0,0],
# [1,1,0,2,0,0,2,0],
# [1,1,2,0,0,0,0,2],
# [2,0,0,0,0,1,1,2],
# [0,2,0,0,1,0,2,1],
# [0,0,2,0,1,2,0,1],
# [0,0,0,2,2,1,1,0]])
# bisection_width = 8
num_gpus = capacities.shape[0]
if bisection_width == 0:
bisection_width = np.sum(capacities[num_gpus//2:,:num_gpus//2]).astype(int)
main_degree = int(np.sum(capacities[main_gpu, :]) - capacities[main_gpu,main_gpu])
print("main:", main_gpu, "degree:", main_degree, "bisection width:", bisection_width)
print("topology:")
print(capacities)
max_capacity = np.max(capacities * (1-np.eye(num_gpus)))
print("max links:", max_capacity)
if max_capacity > 2:
print("topologies with more than 2 nvlinks at the same edge are not supported.")
raise SystemExit()
lengths = np.where(capacities <= max_capacity, max_capacity / capacities, 1)
# print("lengths:")
# print(lengths)
if modes[mode] == 0: # scatter
num_commodities = 1
parts_per_commodity = int(main_degree // np.gcd(main_degree, num_gpus-1))
# one gpu starts with one chunk of the commodity
source = main_gpu
commodities_out = np.ones(num_gpus) * parts_per_commodity
commodities_in = np.zeros(num_gpus)
commodities_in[source] += np.sum(commodities_out)
elif modes[mode] == 1: # gather
num_commodities = 1
parts_per_commodity = int(main_degree // np.gcd(main_degree, num_gpus-1))
# one gpu starts with all chunks of the commodity
target = main_gpu
commodities_in = np.ones(num_gpus) * parts_per_commodity
commodities_out = np.zeros(num_gpus)
commodities_out[target] += np.sum(commodities_in)
elif modes[mode] == 2: # all-to-all
num_commodities = num_gpus
parts_per_commodity = int(bisection_width // np.gcd(bisection_width, int(np.ceil(num_gpus/2)*np.floor(num_gpus/2))))
# each gpu starts with one of each commodity
commodities_in = np.ones((num_gpus,num_commodities)) * parts_per_commodity
commodities_out = np.diagflat( np.sum(commodities_in, axis=0) )
else:
raise SystemExit()
# parts_per_commodity = 1
capacities += | np.eye(num_gpus) | numpy.eye |
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, List, Tuple, Any, Union
import warnings
import numpy as np
import pandas as pd
from floodlight.utils.types import Numeric
from floodlight.core.definitions import essential_events_columns, protected_columns
from floodlight.core.code import Code
@dataclass
class Events:
"""Event data fragment. Core class of floodlight.
Event data is stored in `pandas` ``DataFrame``, where each row stores one event
with its different properties organized in columns. You may put whatever
information you like in these columns. Yet, the columns `"eID"` and `"gameclock"`
are mandatory to identify and time-locate events. Some special column names are
reserved for properties that follow conventions. These may be necessary and their
existence is checked for running particular analyses.
Parameters
----------
events: pd.DataFrame
DataFrame containing rows of events and columns of respective event properties.
direction: str, optional
Playing direction of players in data fragment, should be either
'lr' (left-to-right) or 'rl' (right-to-left).
Attributes
----------
essential: list
List of essential columns available for stored events.
protected: list
List of protected columns available for stored events.
custom: list
List of custom (i.e. non-essential and non-protected) columns available for
stored events.
essential_missing: list
List of missing essential columns.
essential_invalid: list
List of essential columns that violate the definitions.
protected_missing: list
List of missing protected columns.
protected_invalid: list
List of protected columns that violate the definitions.
"""
events: pd.DataFrame
direction: str = None
def __post_init__(self):
# check for missing essential columns
missing_columns = self.essential_missing
if missing_columns:
raise ValueError(
f"Floodlight Events object is missing the essential "
f"column(s) {missing_columns}!"
)
# warn if value ranges are violated
incorrect_columns = self.essential_invalid
if incorrect_columns:
for col in incorrect_columns:
warnings.warn(
f"Floodlight Events column {col} does not match the defined value"
f"range (from floodlight.core.definitions). You can pursue at this "
f"point, however, be aware that this may lead to unexpected "
f"behavior in the future."
)
def __str__(self):
return f"Floodlight Events object of shape {self.events.shape}"
def __len__(self):
return len(self.events)
def __getitem__(self, key):
return self.events[key]
def __setitem__(self, key, value):
self.events[key] = value
@property
def essential(self):
essential = [
col for col in self.events.columns if col in essential_events_columns
]
return essential
@property
def protected(self):
protected = [col for col in self.events.columns if col in protected_columns]
return protected
@property
def custom(self):
custom = [
col
for col in self.events.columns
if col not in essential_events_columns and col not in protected_columns
]
return custom
@property
def essential_missing(self):
missing_columns = [
col for col in essential_events_columns if col not in self.essential
]
return missing_columns
@property
def essential_invalid(self):
invalid_columns = [
col
for col in self.essential
if not self.column_values_in_range(col, essential_events_columns)
]
return invalid_columns
@property
def protected_missing(self):
missing_columns = [
col for col in protected_columns if col not in self.protected
]
return missing_columns
@property
def protected_invalid(self):
invalid_columns = [
col
for col in self.protected
if not self.column_values_in_range(col, protected_columns)
]
return invalid_columns
def column_values_in_range(self, col: str, definitions: Dict[str, Dict]) -> bool:
"""Check if values for a single column of the inner event DataFrame are in
correct range using using the specifications from floodlight.core.definitions.
Parameters
----------
col: str
Column name of the inner event DataFrame to be checked
definitions: Dict
Dictionary (from floodlight.core.definitions) containing specifications for
the columns to be checked.
The definitions need to contain an entry for the column to be checked and
this entry needs to contain information about the value range in the form:
``definitions[col][value_range] = (min, max)``.
Returns
-------
bool
True if the checks for value range pass and False otherwise
Notes
-----
Non-integer results of this computation will always be rounded to the next
smaller integer.
"""
# skip if value range is not defined
if definitions[col]["value_range"] is None:
return True
# skip values that are None or NaN
col_nan_free = self.events[col].dropna()
# retrieve value range from definitions
min_val, max_val = definitions[col]["value_range"]
# check value range for remaining values
if not (min_val <= col_nan_free).all() & (col_nan_free <= max_val).all():
return False
# all checks passed
return True
def add_frameclock(self, framerate: int):
"""Add the column "frameclock", computed as the rounded multiplication of
gameclock and framerate, to the inner events DataFrame.
Parameters
----------
framerate: int
Temporal resolution of data in frames per second/Hertz.
"""
frameclock = np.full((len(self.events)), -1, dtype=int)
frameclock[:] = np.floor(self.events["gameclock"].values * framerate)
self.events["frameclock"] = frameclock
def select(
self, conditions: Tuple[str, Any] or List[Tuple[str, Any]]
) -> pd.DataFrame:
"""Returns a DataFrame containing all entries from the inner events DataFrame
that satisfy all given conditions.
Parameters
----------
conditions: Tuple or List of Tuples
A single or a list of conditions used for filtering. Each condition should
follow the form ``(column, value)``. If ``value`` is given as a variable
(can also be None), it is used to filter for an exact value. If given as a
tuple ``value = (min, max)`` that specifies a minimum and maximum value, it
is filtered for a value range.
For example, to filter all events that have the ``eID`` of ``"Pass"`` and
that happened within the first 1000 seconds of the segment, conditions
should look like:
``conditions = [("eID", "Pass"), ("gameclock", (0, 1000))]``
Returns
-------
filtered_events: pd.DataFrame
A view of the inner events DataFrame with rows fulfilling all criteria
specified in conditions. The DataFrame can be empty if no row fulfills all
specified criteria.
"""
filtered_events = self.events
# convert single non-list condition to list
if not isinstance(conditions, list):
conditions = [conditions]
# loop through and filter by conditions
for column, value in conditions:
# if the value is None filter for all entries with None, NaN or NA
if value is None:
filtered_events = filtered_events[filtered_events[column].isna()]
# check if a single value or a value range is given
else:
# value range: filter by minimum and maximum value
if isinstance(value, (list, tuple)):
min_val, max_val = value
filtered_events = filtered_events[
filtered_events[column] >= min_val
]
filtered_events = filtered_events[
filtered_events[column] <= max_val
]
# single value: filter by that value
else:
filtered_events = filtered_events[filtered_events[column] == value]
return filtered_events
def translate(self, shift: Tuple[Numeric, Numeric]):
"""Translates data by shift vector.
Parameters
----------
shift : list or array-like
Shift vector of form v = (x, y). Any iterable data type with two numeric
entries is accepted.
"""
if "at_x" in self.protected and self.events["at_x"].dtype in [
"int64",
"float64",
]:
self.events["at_x"] = self.events["at_x"].map(lambda x: x + shift[0])
if "at_y" in self.protected and self.events["at_y"].dtype in [
"int64",
"float64",
]:
self.events["at_y"] = self.events["at_y"].map(lambda x: x + shift[1])
if "to_x" in self.protected and self.events["to_x"].dtype in [
"int64",
"float64",
]:
self.events["to_x"] = self.events["to_x"].map(lambda x: x + shift[0])
if "to_y" in self.protected and self.events["to_y"].dtype in [
"int64",
"float64",
]:
self.events["to_y"] = self.events["to_y"].map(lambda x: x + shift[1])
def scale(self, factor: float, axis: str = None):
"""Scales data by a given factor and optionally selected axis.
Parameters
----------
factor : float
Scaling factor.
axis : {None, 'x', 'y'}, optional
Name of scaling axis. If set to 'x' data is scaled on x-axis, if set to 'y'
data is scaled on y-axis. If None, data is scaled in both directions
(default).
"""
if axis not in ["x", "y", None]:
raise ValueError(f"Expected axis to be one of ('x', 'y', None), got {axis}")
if axis is None or axis == "x":
if "at_x" in self.protected and self.events["at_x"].dtype in [
"int64",
"float64",
]:
self.events["at_x"] = self.events["at_x"].map(lambda x: x * factor)
if "to_x" in self.protected and self.events["to_x"].dtype in [
"int64",
"float64",
]:
self.events["at_x"] = self.events["at_x"].map(lambda x: x * factor)
if axis is None or axis == "y":
if "at_y" in self.protected and self.events["at_y"].dtype in [
"int64",
"float64",
]:
self.events["at_y"] = self.events["at_y"].map(lambda x: x * factor)
if "to_y" in self.protected and self.events["to_y"].dtype in [
"int64",
"float64",
]:
self.events["to_y"] = self.events["to_y"].map(lambda x: x * factor)
def reflect(self, axis: str):
"""Reflects data on given `axis`.
Parameters
----------
axis : {'x', 'y'}
Name of reflection axis. If set to "x", data is reflected on x-axis,
if set to "y", data is reflected on y-axis.
"""
if axis == "x":
self.scale(factor=-1, axis="y")
elif axis == "y":
self.scale(factor=-1, axis="x")
else:
raise ValueError(f"Expected axis to be one of ('x', 'y'), got {axis}")
def rotate(self, alpha: float):
"""Rotates data on given angle 'alpha' around the origin.
Parameters
----------
alpha: float
Rotation angle in degrees. Alpha must be between -360 and 360.
If positive alpha, data is rotated in counter clockwise direction.
If negative, data is rotated in clockwise direction around the origin.
"""
if not (-360 <= alpha <= 360):
raise ValueError(
f"Expected alpha to be from -360 to 360, got {alpha} instead"
)
phi = np.radians(alpha)
cos = np.cos(phi)
sin = np.sin(phi)
# construct rotation matrix
r = np.array([[cos, -sin], [sin, cos]]).transpose()
if "at_x" in self.protected and self.events["at_x"].dtype in [
"int64",
"float64",
]:
if "at_y" in self.protected and self.events["at_y"].dtype in [
"int64",
"float64",
]:
self.events[["at_x", "at_y"]] = pd.DataFrame(
np.round(np.dot(self.events[["at_x", "at_y"]], r), 3)
)
if "to_x" in self.protected and self.events["to_x"].dtype in [
"int64",
"float64",
]:
if "to_y" in self.protected and self.events["to_y"].dtype in [
"int64",
"float64",
]:
self.events[["to_x", "to_y"]] = pd.DataFrame(
np.round(np.dot(self.events[["to_x", "to_y"]], r), 3)
)
def slice(
self,
start: float = None,
end: float = None,
slice_by="gameclock",
inplace: bool = False,
):
"""Return copy of object with events sliced in a time interval.
Intended columns for using this function are ``gameclock`` (total seconds) or
``frameclock``. However, also allows slicing by any other column that manifests
a temporal relation between events (e.g. ``minute``). Excludes all entries
without a valid entry in the specified column (e.g. None).
Parameters
----------
start : float, optional
Start frame or second of slice. Defaults to beginning of segment.
end : float, optional
End frame or second of slice (endframe is excluded). Defaults to last event
of segment (including).
slice_by: {'gameclock', 'frameclock'}, optional
Column used to slice the events. Defaults to ``gameclock``.
inplace: bool, optional
If set to ``False`` (default), a new object is returned, otherwise the
operation is performed in place on the called object.
Returns
-------
events_sliced: Union[Event, None]
"""
if slice_by not in self.events:
ValueError(f"Events object does not contain column {slice_by}!")
if start is None:
start = 0
if end is None:
end = np.nanmax(self.events[slice_by].values) + 1
sliced_data = self.events[self.events[slice_by] >= start].copy()
sliced_data = sliced_data[sliced_data[slice_by] < end]
sliced_data.reset_index(drop=True, inplace=True)
events_sliced = None
if inplace:
self.events = sliced_data
else:
events_sliced = Events(
events=sliced_data,
direction=deepcopy(self.direction),
)
return events_sliced
def get_event_stream(
self,
fade: Union[int, None] = 0,
**kwargs,
) -> Code:
"""Generates a Code object containing the eIDs of all events at the
respective frame and optionally subsequent frames as defined by the fade
argument.
This function translates the object's DataFrame of temporally irregular events
to a continuous frame-wise representation. This can be especially helpful to
connect event data with spatiotemporal data, e.g., for filtering the latter
based on the former. Events overwrite preceding event's fade, and unfilled
values are set to np.nan.
Notes
------
Requires the DataFrame to contain the protected ``frameclock`` column.
Parameters
----------
fade: int, optional
Number of additional frames for which the Code object should stay at a
value after the event occurred. The value is overwritten if another event
occurs within the fade duration. If chosen to zero, the value is maintained
only for a single frame. If chosen to None, the value is maintained until
either the next event or until the end of the sequence. Defaults to 0.
kwargs:
Keyword arguments of the Code object ("name", "definitions", "framerate")
that are passed down to instantiate the returned event_stream.
Returns
-------
event_stream: Code
Generated continuous event stream describing the designated game state.
"""
if "frameclock" in self.protected_missing:
raise ValueError(
"Cannot create event stream from Events object missing "
"the protected column 'frameclock'. Consider calling "
"add_frameclock to the Events object first!"
)
if fade is not None and fade < 0:
raise ValueError(
f"Expected fade to be a positive integer or None, got {fade} instead."
)
sorted_events = self.events.sort_values("frameclock")
start = int(np.round(np.nanmin(sorted_events["frameclock"].values)))
end = int(np.round(np.nanmax(sorted_events["frameclock"].values))) + 1
code = np.full((end - start,), np.nan, dtype=object)
for _, event in sorted_events.iterrows():
if pd.isna(event["frameclock"]):
continue
frame = int( | np.round(event["frameclock"]) | numpy.round |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 12:41:30 2022
@author: DanielT17
"""
# %% Imports
from crcengine import get_algorithm_params,algorithms_available,new,create
from logging import Formatter,DEBUG,INFO,WARNING,ERROR,CRITICAL,getLogger,StreamHandler
import numpy as np
from math import ceil
from collections import Counter
from itertools import product
# %% Formatter
class Custom_Formatter(Formatter):
"""
Description:
This function configurates the logger object.
Inputs:
logging.Formatter - logging - logging foramatter.
Outputs:
formatter.format(record) - formatter - recorded dictionary turns it to string.
"""
grey = "\x1b[38;20m"; green = "\x1b[32;1m"; yellow = "\x1b[33;20m"
red = "\x1b[31;20m"; bold_red = "\x1b[31;1m"; reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {DEBUG: grey + format + reset,
INFO: green + format + reset,
WARNING: yellow + format + reset,
ERROR: red + format + reset,
CRITICAL: bold_red + format + reset}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = Formatter(log_fmt)
return formatter.format(record)
def Logger_Object():
"""
Description:
This function creates and configurates the logger object.
Inputs:
None.
Outputs:
logger - logging module - a logging object configured.
"""
logger = getLogger("CRC reversing")
logger.setLevel(DEBUG)
ch = StreamHandler()
ch.setLevel(DEBUG)
ch.setFormatter(Custom_Formatter())
logger.addHandler(ch)
return logger
# %% Helper functions
def Swap(a,b):
'''
Description:
This function swaps two variables.
Inputs:
a,b - ints.
Outputs:
c,d - ints.
'''
c = b
d = a
return c,d
def Bitstring_To_Bytes(s,endian='big'):
'''
Description:
This function gets a bit string binary and turns it to bytes array.
Inputs:
s - string - binary string.
endian - str - big or little endian representation
Outputs:
return - byte array of s.
'''
return int(s, 2).to_bytes((len(s) + 7) // 8, byteorder=endian)
def Bytearray_To_Int(s,endian="big"):
'''
Description:
This function turn a byte array into an int.
Inputs:
s - byte array.
Outputs:
returns - int.
'''
return int.from_bytes(s, endian)
def Int_To_Bytearray(s,endian="big"):
'''
Description:
This function turns an int into a bytearray.
Inputs:
s - int.
Outputs:
returns - byte array.
'''
return s.to_bytes(ceil(s.bit_length()/8),endian)
def Remove_Zeros_From_Binary_String(string):
'''
Description:
This function removes preappended zeros to a binary string.
Inputs:
string - a string sequence of ones and zeros.
Outputs:
string - without preappended zeros.
'''
counter = 0
for char in string:
if char == '0':
counter += 1
else:
break
return string[counter:]
def Turn_Bitstring_To_Numpy_Array_Of_Bits(string,crc_width):
'''
Description:
This function turns a bit string into a numpy array of size crc_width
where each arr[i] is equal to string[i]. A binary vector in GF(2).
Inputs:
string - string - a binary string.
crc_width - int - the crc polynomial width
Outputs:
arr - numpy array - vector version of the binary string in GF(2).
'''
arr = np.zeros((1,crc_width),dtype=np.uint8)
for i in range(crc_width):
arr[0,i] = int(string[i])
return arr
def Turn_Numpy_Array_Of_Bits_To_Bitstring(arr,crc_width):
'''
Description:
This function turns a numpy array of bits in GF(2) to a bit string.
Inputs:
arr - numpy array - a vector of bits in GF(2).
crc_width - int - the crc polynomial width
Outputs:
string - string - a binary string.
'''
string = ''
for i in range(crc_width):
string += str(arr[i])
return string
def Byte_Xor(ba1, ba2):
"""
Description:
This function computes the xor between two byte arrays.
Inputs:
ba1, ba2 - byte arrays - are byte arrays of the same size to be xored.
Outputs:
xored - byte array - A byte array with the xored result.
"""
xored = bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])
return xored
def Print_Crc_Parameters(crc_algorithm_name):
'''
Description:
This functions returns crc algorithm parameters.
Inputs:
crc_algorithm_name - str - the name of the crc algorithm.
Outputs:
None. Prints text.
'''
params = get_algorithm_params(crc_algorithm_name)
print('\nCyclic redundancy check parameters: \n')
for _,param in enumerate(params.items()):
if param[0] == 'poly':
print('poly: ' + str(hex(param[1])))
else:
print(str(param[0]) + ': ' + str(param[1]) + '.')
print('\n')
return params['width']
def Get_CRC(data,crc_algorithm):
'''
Description:
This functions computes the crc of some byte array named data, and returns
a byte array object named crc_bytes.
Inputs:
data - byte array - we want to calculate the crc of this input.
crc_algorithm - crcengine.calc._CrcLsbf - the crc object which will be
used for the calculation.
Outputs:
crc_bytes - byte array - the byte array of object of the resulting crc.
'''
crc = crc_algorithm(data)
length_in_bits = crc.bit_length()
crc_bytes = crc.to_bytes(ceil(length_in_bits/8),'big')
return crc_bytes
def Print_Packets(packets):
'''
Description:
This function prints packets entered by the user in hexadecimal.
Inputs:
packets - list - a list of packets.
Outputs:
None.
Prints the packets in hexadecimal as inputed by the user.
'''
print('\nPresenting packets inputed by user: \n')
for i in range(len(packets)):
print('Packet ' + str(i+1) + ': ' + packets[i].hex())
def reverse_poly(poly,order):
'''
Description:
Printing reverse polynomial representation.
Inputs:
poly - int - polynomial coefficents
order - int - polynomial degree
Outputs:
return - int - reverse polynomial represenation
'''
len_string = order - len(bin(poly)[2:])
return int(bin(poly)[::-1][:-2],2) << len_string
def recipolar_poly(poly,order):
'''
Description:
Printing recipolar polynomial representation: p(x) -> p(x^(-1))*x^n.
Inputs:
poly - int - polynomial coefficents
order - int - polynomial degree
Outputs:
return - int - reverse polynomial represenation
'''
temp = bin(poly)[::-1][:-2][1:]
temp = temp + '0'*(order-len(temp)-1) + '1'
return int(temp,2)
def Generate_All_Poly_Representations(poly,crc_width,enb_combinations=False):
'''
Description:
This function calculates all possible polynomial representations.
Inputs:
poly - int - polynomial coefficents
crc_width - int - polynomial degree
enb_combinations - boolean - if enabled returns a list of all possible
polynomil combinations.
Outputs:
estimated_reverse_poly,estimated_poly_recipolar,
estimated_poly_recipolar_reverese,estimated_reverse_poly_recipolar,
estimated_reverse_poly_recipolar_reverese - ints - polynomial representations.
'''
estimated_reverse_poly = reverse_poly(poly,crc_width)
estimated_poly_recipolar = recipolar_poly(poly,crc_width)
estimated_poly_recipolar_reverese = reverse_poly(poly,crc_width)
estimated_reverse_poly_recipolar = recipolar_poly(estimated_reverse_poly,crc_width)
estimated_reverse_poly_recipolar_reverese = reverse_poly(estimated_poly_recipolar,crc_width)
if enb_combinations:
ls = [poly,estimated_reverse_poly,estimated_poly_recipolar,estimated_poly_recipolar_reverese,estimated_reverse_poly_recipolar,estimated_reverse_poly_recipolar_reverese]
polys = []
for cur_poly in ls:
for i in range(4):
if i == 0: polys.append(cur_poly)
elif i == 1: polys.append(cur_poly+1)
elif i == 2: polys.append(cur_poly+2**(crc_width))
elif i == 3:
counter = 0;
hex_cur_poly = hex(cur_poly)[2:]
for j in reversed(hex_cur_poly):
if j == '0':
counter += 1
else:
break;
if counter != 0:
polys.append(int(hex_cur_poly[:-counter],16))
return polys
else:
return estimated_reverse_poly,estimated_poly_recipolar,estimated_poly_recipolar_reverese,estimated_reverse_poly_recipolar,estimated_reverse_poly_recipolar_reverese
def Print_All_Polynomial_Representations(poly,crc_width):
'''
Description:
This function prints all possible polynomial representations.
Inputs:
poly - int - polynomial coefficents
crc_width - int - polynomial degree
Outputs:
None.
Prints all possible polynomial representations.
'''
estimated_reverse_poly,estimated_poly_recipolar,estimated_poly_recipolar_reverese,estimated_reverse_poly_recipolar,estimated_reverse_poly_recipolar_reverese = Generate_All_Poly_Representations(poly,crc_width)
print('\n')
print('----------------------------------------')
print('Estimated CRC polynomial:')
print('Normal mode: ' + str(hex(poly)))
print('Reverse mode: ' + str(hex(estimated_reverse_poly)))
print('Recipolar mode: ' + str(hex(estimated_poly_recipolar)))
print('Reversed recipolar mode: ' + str(hex(estimated_poly_recipolar_reverese)))
print('Recipolar reverse mode: ' + str(hex(estimated_reverse_poly_recipolar)))
print('Reversed recipolar reverse mode: ' + str(hex(estimated_reverse_poly_recipolar_reverese)))
print('----------------------------------------\n')
def Ranking_Estimated_Polynomial(polys):
'''
Description:
This function gets a list of possible candidate polynomials, and outputs
the three (or two or one) most likely polynomials.
Inputs:
polys - list - list of intgers which are candidate polynomials.
Outputs:
polys_best - list - list of the most likely polynomial to be a correct
polynomial, ranked from most likely to least likely.
occurrence - list - the probability (in this case percentage) of a polynomial
to be the correct polynomial according to the algorithm.
'''
polys = np.asarray(polys,dtype = object)
polys = polys[polys != 0]
values, counts = Unique(polys)
for i in reversed(range(3)):
try: inds = np.argpartition(counts, -(i+1))[-(i+1):]; break;
except: continue;
occurrence = []; polys_best = [];
for ind in inds:
occurrence.append(counts[ind]); polys_best.append(values[ind]);
occurrence = occurrence[::-1]; polys_best = polys_best[::-1]
occurrence = occurrence/np.sum(occurrence) * 100;
ranking = np.argsort(occurrence)[::-1];
occurrence = occurrence[ranking];
polys_best = [x for _,x in sorted(zip(list(ranking),polys_best))][::-1]
return polys_best,occurrence
def Print_Estimated_Polynomial_By_Ranking_After_Method(polys,occurrence,crc_width,enable_pre_text=True):
'''
Description:
This fucntion gets a list of ranking (occurrence) and polynomial and
print them in this order.
Inputs:
polys - list - list of possible candidate polynomials.
occurrence - numpy array - Ordered ranking of the polynomials.
crc_width - int - estimated polynomial degree
Outputs:
None. Prints polynomial in order of ranking.
'''
if enable_pre_text:
print('\n\nPrinting the three most likely polynomials: \n')
else:
print('\n\n\n\n\n')
print('-------------------------------------------------------------------------------------')
print('The list of polynomials we will continue to use in our XorIn estimation procedure:')
print('-------------------------------------------------------------------------------------\n\n')
for i in range(len(polys)):
print('\nProbability to be the right polynomial is: ' + str(np.round(occurrence[i],2)) + '%.')
Print_All_Polynomial_Representations(polys[i],crc_width)
if not enable_pre_text:
print('\n\n\n\n\n')
def Merge_By_Ranking_Polynomials(occurrence1,polys1,occurrence2,polys2):
'''
Description:
This function gets 2 lists of polynomial and two lists of there respective
ranking, merges and rescores them with respect to unique values.
Inputs:
occurrence1,occurrence2 - numpy arrays - Ordered ranking of the polynomials.
polys1,polys2 - numpy arrays - A numpy array of the ordered polynomials.
Outputs:
polys_unique - list - list of the remaining ranked unique polynomials.
new_occurrences - numpy array - array of the ranking of the polynomials.
'''
occurrences = np.array(list(occurrence1) + list(occurrence2));
occurrences = occurrences/np.sum(occurrences)*100
ranking = np.argsort(occurrences)
occurrences = list(occurrences[ranking])[::-1]
polys = list(polys1) + list(polys2)
polys = [x for _,x in sorted(zip(list(ranking),polys))][::-1]
polys_unique,_ = Unique(polys)
new_occurrences = [];
for cur_poly in polys_unique:
indcies = np.where(cur_poly == | np.array(polys) | numpy.array |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range, zip
import random
import numpy as np
from copy import deepcopy
from scipy.ndimage import map_coordinates
from scipy.ndimage.filters import gaussian_filter, gaussian_gradient_magnitude
from scipy.ndimage.morphology import grey_dilation
from skimage.transform import resize
from scipy.ndimage.measurements import label as lb
def generate_elastic_transform_coordinates(shape, alpha, sigma):
n_dim = len(shape)
offsets = []
for _ in range(n_dim):
offsets.append(gaussian_filter((np.random.random(shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
tmp = tuple([np.arange(i) for i in shape])
coords = np.meshgrid(*tmp, indexing='ij')
indices = [np.reshape(i + j, (-1, 1)) for i, j in zip(offsets, coords)]
return indices
def create_zero_centered_coordinate_mesh(shape):
tmp = tuple([np.arange(i) for i in shape])
coords = np.array(np.meshgrid(*tmp, indexing='ij')).astype(float)
for d in range(len(shape)):
coords[d] -= ((np.array(shape).astype(float) - 1) / 2.)[d]
return coords
def convert_seg_image_to_one_hot_encoding(image, classes=None):
'''
Takes as input an nd array of a label map (any dimension). Outputs a one hot encoding of the label map.
Example (3D): if input is of shape (x, y, z), the output will ne of shape (n_classes, x, y, z)
'''
if classes is None:
classes = np.unique(image)
out_image = np.zeros([len(classes)]+list(image.shape), dtype=image.dtype)
for i, c in enumerate(classes):
out_image[i][image == c] = 1
return out_image
def elastic_deform_coordinates(coordinates, alpha, sigma):
n_dim = len(coordinates)
offsets = []
for _ in range(n_dim):
offsets.append(
gaussian_filter((np.random.random(coordinates.shape[1:]) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
offsets = np.array(offsets)
indices = offsets + coordinates
return indices
def rotate_coords_3d(coords, angle_x, angle_y, angle_z):
rot_matrix = np.identity(len(coords))
rot_matrix = create_matrix_rotation_x_3d(angle_x, rot_matrix)
rot_matrix = create_matrix_rotation_y_3d(angle_y, rot_matrix)
rot_matrix = create_matrix_rotation_z_3d(angle_z, rot_matrix)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def rotate_coords_2d(coords, angle):
rot_matrix = create_matrix_rotation_2d(angle)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def scale_coords(coords, scale):
return coords * scale
def uncenter_coords(coords):
shp = coords.shape[1:]
coords = deepcopy(coords)
for d in range(coords.shape[0]):
coords[d] += (shp[d] - 1) / 2.
return coords
def interpolate_img(img, coords, order=3, mode='nearest', cval=0.0, is_seg=False):
if is_seg and order != 0:
unique_labels = np.unique(img)
result = np.zeros(coords.shape[1:], img.dtype)
for i, c in enumerate(unique_labels):
res_new = map_coordinates((img == c).astype(float), coords, order=order, mode=mode, cval=cval)
result[res_new >= 0.5] = c
return result
else:
return map_coordinates(img.astype(float), coords, order=order, mode=mode, cval=cval).astype(img.dtype)
def generate_noise(shape, alpha, sigma):
noise = np.random.random(shape) * 2 - 1
noise = gaussian_filter(noise, sigma, mode="constant", cval=0) * alpha
return noise
def find_entries_in_array(entries, myarray):
entries = np.array(entries)
values = np.arange(np.max(myarray) + 1)
lut = np.zeros(len(values), 'bool')
lut[entries.astype("int")] = True
return np.take(lut, myarray.astype(int))
def center_crop_3D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_3D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2, 3 and 4 are x y z
center = np.array(img.shape[2:]) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * (len(img.shape) - 2)
else:
center_crop = crop_size
assert len(center_crop) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[:, :, int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_2D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.)]
def center_crop_2D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2 and 3 are x y
center = np.array(img.shape[2:]) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * (len(img.shape) - 2)
else:
center_crop = crop_size
assert len(center_crop) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
return img[:, :, int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.)]
def random_crop_3D_image(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * len(img.shape)
else:
assert len(crop_size) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
if crop_size[0] < img.shape[0]:
lb_x = np.random.randint(0, img.shape[0] - crop_size[0])
elif crop_size[0] == img.shape[0]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[1]:
lb_y = np.random.randint(0, img.shape[1] - crop_size[1])
elif crop_size[1] == img.shape[1]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
if crop_size[2] < img.shape[2]:
lb_z = np.random.randint(0, img.shape[2] - crop_size[2])
elif crop_size[2] == img.shape[2]:
lb_z = 0
else:
raise ValueError("crop_size[2] must be smaller or equal to the images z dimension")
return img[lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1], lb_z:lb_z + crop_size[2]]
def random_crop_3D_image_batched(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 2)
else:
assert len(crop_size) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
if crop_size[0] < img.shape[2]:
lb_x = np.random.randint(0, img.shape[2] - crop_size[0])
elif crop_size[0] == img.shape[2]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[3]:
lb_y = np.random.randint(0, img.shape[3] - crop_size[1])
elif crop_size[1] == img.shape[3]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
if crop_size[2] < img.shape[4]:
lb_z = np.random.randint(0, img.shape[4] - crop_size[2])
elif crop_size[2] == img.shape[4]:
lb_z = 0
else:
raise ValueError("crop_size[2] must be smaller or equal to the images z dimension")
return img[:, :, lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1], lb_z:lb_z + crop_size[2]]
def random_crop_2D_image(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * len(img.shape)
else:
assert len(crop_size) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
if crop_size[0] < img.shape[0]:
lb_x = np.random.randint(0, img.shape[0] - crop_size[0])
elif crop_size[0] == img.shape[0]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[1]:
lb_y = np.random.randint(0, img.shape[1] - crop_size[1])
elif crop_size[1] == img.shape[1]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
return img[lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1]]
def random_crop_2D_image_batched(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 2)
else:
assert len(crop_size) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
if crop_size[0] < img.shape[2]:
lb_x = np.random.randint(0, img.shape[2] - crop_size[0])
elif crop_size[0] == img.shape[2]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[3]:
lb_y = np.random.randint(0, img.shape[3] - crop_size[1])
elif crop_size[1] == img.shape[3]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
return img[:, :, lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1]]
def resize_image_by_padding(image, new_shape, pad_value=None):
shape = tuple(list(image.shape))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2, len(shape))), axis=0))
if pad_value is None:
if len(shape) == 2:
pad_value = image[0, 0]
elif len(shape) == 3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
res = np.ones(list(new_shape), dtype=image.dtype) * pad_value
start = np.array(new_shape) / 2. - np.array(shape) / 2.
if len(shape) == 2:
res[int(start[0]):int(start[0]) + int(shape[0]), int(start[1]):int(start[1]) + int(shape[1])] = image
elif len(shape) == 3:
res[int(start[0]):int(start[0]) + int(shape[0]), int(start[1]):int(start[1]) + int(shape[1]),
int(start[2]):int(start[2]) + int(shape[2])] = image
return res
def resize_image_by_padding_batched(image, new_shape, pad_value=None):
shape = tuple(list(image.shape[2:]))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2, len(shape))), axis=0))
if pad_value is None:
if len(shape) == 2:
pad_value = image[0, 0]
elif len(shape) == 3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
start = np.array(new_shape) / 2. - np.array(shape) / 2.
if len(shape) == 2:
res = np.ones((image.shape[0], image.shape[1], new_shape[0], new_shape[1]), dtype=image.dtype) * pad_value
res[:, :, int(start[0]):int(start[0]) + int(shape[0]), int(start[1]):int(start[1]) + int(shape[1])] = image[:,
:]
elif len(shape) == 3:
res = np.ones((image.shape[0], image.shape[1], new_shape[0], new_shape[1], new_shape[2]),
dtype=image.dtype) * pad_value
res[:, :, int(start[0]):int(start[0]) + int(shape[0]), int(start[1]):int(start[1]) + int(shape[1]),
int(start[2]):int(start[2]) + int(shape[2])] = image[:, :]
return res
def create_matrix_rotation_x_3d(angle, matrix=None):
rotation_x = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
if matrix is None:
return rotation_x
return np.dot(matrix, rotation_x)
def create_matrix_rotation_y_3d(angle, matrix=None):
rotation_y = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
if matrix is None:
return rotation_y
return np.dot(matrix, rotation_y)
def create_matrix_rotation_z_3d(angle, matrix=None):
rotation_z = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
if matrix is None:
return rotation_z
return np.dot(matrix, rotation_z)
def create_matrix_rotation_2d(angle, matrix=None):
rotation = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
if matrix is None:
return rotation
return np.dot(matrix, rotation)
def create_random_rotation(angle_x=(0, 2 * np.pi), angle_y=(0, 2 * np.pi), angle_z=(0, 2 * np.pi)):
return create_matrix_rotation_x_3d(np.random.uniform(*angle_x),
create_matrix_rotation_y_3d(np.random.uniform(*angle_y),
create_matrix_rotation_z_3d(
np.random.uniform(*angle_z))))
def illumination_jitter(img, u, s, sigma):
# img must have shape [....., c] where c is the color channel
alpha = np.random.normal(0, sigma, s.shape)
jitter = np.dot(u, alpha * s)
img2 = np.array(img)
for c in range(img.shape[0]):
img2[c] = img[c] + jitter[c]
return img2
def general_cc_var_num_channels(img, diff_order=0, mink_norm=1, sigma=1, mask_im=None, saturation_threshold=255,
dilation_size=3, clip_range=True):
# img must have first dim color channel! img[c, x, y(, z, ...)]
dim_img = len(img.shape[1:])
if clip_range:
minm = img.min()
maxm = img.max()
img_internal = np.array(img)
if mask_im is None:
mask_im = np.zeros(img_internal.shape[1:], dtype=bool)
img_dil = deepcopy(img_internal)
for c in range(img.shape[0]):
img_dil[c] = grey_dilation(img_internal[c], tuple([dilation_size] * dim_img))
mask_im = mask_im | np.any(img_dil >= saturation_threshold, axis=0)
if sigma != 0:
mask_im[:sigma, :] = 1
mask_im[mask_im.shape[0] - sigma:, :] = 1
mask_im[:, mask_im.shape[1] - sigma:] = 1
mask_im[:, :sigma] = 1
if dim_img == 3:
mask_im[:, :, mask_im.shape[2] - sigma:] = 1
mask_im[:, :, :sigma] = 1
output_img = deepcopy(img_internal)
if diff_order == 0 and sigma != 0:
for c in range(img_internal.shape[0]):
img_internal[c] = gaussian_filter(img_internal[c], sigma, diff_order)
elif diff_order == 1:
for c in range(img_internal.shape[0]):
img_internal[c] = gaussian_gradient_magnitude(img_internal[c], sigma)
elif diff_order > 1:
raise ValueError("diff_order can only be 0 or 1. 2 is not supported (ToDo, maybe)")
img_internal = np.abs(img_internal)
white_colors = []
if mink_norm != -1:
kleur = np.power(img_internal, mink_norm)
for c in range(kleur.shape[0]):
white_colors.append(np.power((kleur[c][mask_im != 1]).sum(), 1. / mink_norm))
else:
for c in range(img_internal.shape[0]):
white_colors.append(np.max(img_internal[c][mask_im != 1]))
som = np.sqrt(np.sum([i ** 2 for i in white_colors]))
white_colors = [i / som for i in white_colors]
for c in range(output_img.shape[0]):
output_img[c] /= (white_colors[c] * np.sqrt(3.))
if clip_range:
output_img[output_img < minm] = minm
output_img[output_img > maxm] = maxm
return white_colors, output_img
def convert_seg_to_bounding_box_coordinates(data_dict, dim, get_rois_from_seg_flag=False, class_specific_seg_flag=False):
'''
:param data_dict:
:param dim:
:param get_rois_from_seg:
:return: coords (y1, x1, y2, x2)
'''
bb_target = []
roi_masks = []
roi_labels = []
out_seg = np.copy(data_dict['seg'])
for b in range(data_dict['seg'].shape[0]):
p_coords_list = []
p_roi_masks_list = []
p_roi_labels_list = []
if | np.sum(data_dict['seg'][b]!=0) | numpy.sum |
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def user_bias_update(A, u, v, mu, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
b = np.array([0.] * m)
for i in xrange(m):
for j in xrange(n):
b[i] += (-1. / n) * (np.dot(u[i], v[j].T) + c[j] + mu - A[i, j])
return b
def user_vector_update(A, v, k, mu, b, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
u = np.zeros([m, k])
v_matrix = np.dot(v.T, v)
for i in xrange(m):
right_side = np.zeros([k, ])
for j in xrange(n):
right_side += b[i] * v[j]
right_side += c[j] * v[j]
right_side += mu * v[j]
right_side -= A[i, j] * v[j]
u[i, :] = -np.dot(np.linalg.inv(v_matrix), right_side)
return u
def movie_bias_update(A, u, v, mu, b):
m = np.shape(A)[0]
n = np.shape(A)[1]
c = np.array([0.] * n)
for i in xrange(m):
for j in xrange(n):
c[j] += (-1. / m) * (np.dot(u[i], v[j].T) + b[i] + mu - A[i, j])
return c
def movie_vector_update(A, u, k, mu, b, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
v = np.zeros([n, k])
u_matrix = np.dot(u.T, u)
for j in xrange(n):
right_side = np.zeros([k, ])
for i in xrange(m):
right_side += b[i] * u[i]
right_side += c[j] * u[i]
right_side += mu * u[i]
right_side -= A[i, j] * u[i]
v[j, :] = -np.dot(np.linalg.inv(u_matrix), right_side)
return v
def log_update(A, u, v, T, mu, b, c):
log_iter = 0
m = np.shape(A)[0]
n = np.shape(A)[1]
for i in xrange(m):
for j in xrange(n):
log_iter += (-1. / 2) * \
((np.dot(u[i], v[j].T) + b[i] + c[j] + mu - A[i, j])**2)
return log_iter
def alt_least_squares(A, k, T):
"""
Inputs:
A: input data
k: number of dimensions for movie vectors & user vectors
T: number of iterations
Output:
Log-likelihood function for each iteration
"""
# Calculate average rating in A
mu = np.mean(A["ratings"])
# Independently draw u_i and v_j vectors from multivariate normal
m = max(A["i"])
n = max(A["j"])
omega = len(A["i"])
# Total # of elements in matrix
A_matrix = np.zeros([m, n])
for l in xrange(omega):
A_matrix[A["i"][l] - 1][A["j"][l] - 1] = A["ratings"][l]
mean_vect = np.array([0] * k)
cov_matrix = (1. / k) * np.identity(k)
u = []
v = []
for i in xrange(m):
u.append(np.random.multivariate_normal(mean_vect, cov_matrix))
for j in xrange(n):
v.append(np.random.multivariate_normal(mean_vect, cov_matrix))
# Initalize b_i and c_j to 0
u = | np.array(u) | numpy.array |
import typing as tp
import flax.linen as nn
import flax_tools as ft
import jax
import jax.numpy as jnp
import numpy as np
import optax
class TestOptimizer:
def test_apply_updates(self):
key = jax.random.PRNGKey(0)
optax_optim = optax.adam(0.1)
optimizer = ft.Optimizer(optax_optim)
x = jnp.ones((2, 4))
linear = ft.ModuleManager.new(nn.Dense(3)).init(key, x)
optimizer = optimizer.init(linear["params"])
opt_state = optax_optim.init(linear["params"])
@jax.grad
def loss_fn(params):
return sum(jnp.mean(x**2) for x in jax.tree_leaves(params))
grads = loss_fn(linear["params"])
optax_params: tp.Dict[str, tp.Any]
optax_updates, opt_state = optax_optim.update(
grads, opt_state, linear["params"]
)
optax_params = optax.apply_updates(optax_updates, linear["params"])
optimizer_params, optimizer = optimizer.update(grads, linear["params"])
assert all(
np.allclose(a, b)
for a, b in zip(jax.tree_leaves(opt_state), jax.tree_leaves(optimizer))
)
assert all(
| np.allclose(a, b) | numpy.allclose |
"""Generate specific tensor network states and operators.
"""
import math
import functools
import itertools
import collections
from numbers import Integral
import numpy as np
import opt_einsum as oe
from ..core import make_immutable, ikron
from ..utils import deprecated, unique, concat
from ..gen.operators import (
spin_operator, eye, _gen_mbl_random_factors, ham_heis
)
from ..gen.rand import randn, choice, random_seed_fn, rand_phase
from .tensor_core import (Tensor, new_bond, TensorNetwork, rand_uuid,
tensor_direct_product)
from .array_ops import asarray, sensibly_scale
from .decomp import eigh
from .tensor_arbgeom import TensorNetworkGen, TensorNetworkGenVector
from .tensor_1d import MatrixProductState, MatrixProductOperator
from .tensor_2d import gen_2d_bonds, TensorNetwork2D
from .tensor_3d import TensorNetwork3D
from .tensor_1d_tebd import LocalHam1D
from .tensor_2d_tebd import LocalHam2D
@random_seed_fn
def rand_tensor(shape, inds, tags=None, dtype='float64', left_inds=None):
"""Generate a random tensor with specified shape and inds.
Parameters
----------
shape : sequence of int
Size of each dimension.
inds : sequence of str
Names of each dimension.
tags : sequence of str
Labels to tag this tensor with.
dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional
The underlying data type.
left_inds : sequence of str, optional
Which, if any, indices to group as 'left' indices of an effective
matrix. This can be useful, for example, when automatically applying
unitary constraints to impose a certain flow on a tensor network but at
the atomistic (Tensor) level.
Returns
-------
Tensor
"""
data = randn(shape, dtype=dtype)
return Tensor(data=data, inds=inds, tags=tags, left_inds=left_inds)
@random_seed_fn
def rand_phased(shape, inds, tags=None, dtype=complex):
"""Generate a random tensor with specified shape and inds, and randomly
'phased' (distributed on the unit circle) data, such that
``T.H @ T == T.norm()**2 == T.size``.
Parameters
----------
shape : sequence of int
Size of each dimension.
inds : sequence of str
Names of each dimension.
tags : sequence of str
Labels to tag this tensor with.
dtype : {'complex128', 'complex64'}, optional
The underlying data type - can only be complex.
Returns
-------
Tensor
"""
data = rand_phase(shape, dtype=dtype)
return Tensor(data=data, inds=inds, tags=tags)
def gen_unique_edges(edges):
seen = set()
for node_a, node_b in edges:
if node_b < node_a:
node_a, node_b = node_b, node_a
key = (node_a, node_b)
if key in seen:
continue
yield (node_a, node_b)
seen.add(key)
def TN_from_edges_and_fill_fn(
fill_fn,
edges,
D,
phys_dim=None,
site_tag_id='I{}',
site_ind_id='k{}',
):
"""Create a tensor network from a sequence of edges defining a graph,
and a 'fill' function that maps shapes to data.
Parameters
----------
fill_fn : callable
A function with signature ``fill_fn(shape) -> array``, used to fill
each tensor.
edges : sequence of tuple[hashable, hashable]
The graph edges, as a sequence of pairs of hashable objects, for
example integers, representing the nodes. You can redundantly specify
``(u, v)`` and ``(v, u)`` and only one edge will be added.
D : int
The bond dimension connecting tensors.
phys_dim : int, optional
If not ``None``, give each tensor a 'physical', free index of this size
at each node.
site_tag_id : str, optional
String with formatter to tag sites.
site_ind_id : str, optional
String with formatter to tag indices (if ``phys_dim`` specified).
Returns
-------
TensorNetworkGen or TensorNetworkGenVector
"""
terms = collections.defaultdict(list)
bonds = collections.defaultdict(rand_uuid)
for node_a, node_b in gen_unique_edges(edges):
bond = bonds[node_a, node_b]
# insert at 0 to exactly match geometry of old TN_rand_reg
terms[node_a].insert(0, bond)
terms[node_b].insert(0, bond)
ts = []
sites = []
for node, inds in sorted(terms.items(), key=lambda x: x[0]):
sites.append(node)
shape = [D] * len(inds)
if phys_dim is not None:
inds.append(site_ind_id.format(node))
shape.append(phys_dim)
data = fill_fn(shape)
tags = site_tag_id.format(node)
ts.append(Tensor(data=data, inds=inds, tags=tags))
tn = TensorNetwork(ts)
if phys_dim is not None:
tn.view_as_(
TensorNetworkGenVector, sites=sites,
site_tag_id=site_tag_id, site_ind_id=site_ind_id
)
else:
tn.view_as_(
TensorNetworkGen, sites=sites,
site_tag_id=site_tag_id
)
return tn
def TN_from_edges_empty(
edges,
D,
phys_dim=None,
site_tag_id='I{}',
site_ind_id='k{}',
dtype='float64',
):
"""Create a tensor network from a sequence of edges defining a graph,
initialized with empty tensors.
Parameters
----------
edges : sequence of tuple[hashable, hashable]
The graph edges, as a sequence of pairs of hashable objects, for
example integers, representing the nodes. You can redundantly specify
``(u, v)`` and ``(v, u)`` and only one edge will be added.
D : int
The bond dimension connecting tensors.
phys_dim : int, optional
If not ``None``, give each tensor a 'physical', free index of this size
at each node.
site_tag_id : str, optional
String with formatter to tag sites.
site_ind_id : str, optional
String with formatter to tag indices (if ``phys_dim`` specified).
dtype : str, optional
The data type of the tensors.
Returns
-------
TensorNetworkGen or TensorNetworkGenVector
"""
def fill_fn(shape):
return np.empty(shape, dtype=dtype)
return TN_from_edges_and_fill_fn(
edges=edges, D=D, fill_fn=fill_fn, phys_dim=phys_dim,
site_tag_id=site_tag_id, site_ind_id=site_ind_id)
def TN_from_edges_with_value(
value,
edges,
D,
phys_dim=None,
site_tag_id='I{}',
site_ind_id='k{}',
dtype=None,
):
"""Create a tensor network from a sequence of edges defining a graph,
initialized with a constant value. This uses ``numpy.broadcast_to`` and
therefore essentially no memory.
Parameters
----------
value : scalar
The value to fill the tensors with.
edges : sequence of tuple[hashable, hashable]
The graph edges, as a sequence of pairs of hashable objects, for
example integers, representing the nodes. You can redundantly specify
``(u, v)`` and ``(v, u)`` and only one edge will be added.
D : int
The bond dimension connecting tensors.
phys_dim : int, optional
If not ``None``, give each tensor a 'physical', free index of this size
at each node.
site_tag_id : str, optional
String with formatter to tag sites.
site_ind_id : str, optional
String with formatter to tag indices (if ``phys_dim`` specified).
dtype : str, optional
The data type of the tensors.
Returns
-------
TensorNetworkGen or TensorNetworkGenVector
"""
element = np.array(value, dtype=dtype)
def fill_fn(shape):
return np.broadcast_to(element, shape)
return TN_from_edges_and_fill_fn(
edges=edges, D=D, fill_fn=fill_fn, phys_dim=phys_dim,
site_tag_id=site_tag_id, site_ind_id=site_ind_id)
def TN_rand_from_edges(
edges,
D,
phys_dim=None,
seed=None,
dtype='float64',
site_tag_id='I{}',
site_ind_id='k{}',
):
"""Create a random tensor network with geometry defined from a sequence
of edges defining a graph.
Parameters
----------
G : sequence of tuple[node, node]
The edges defining a graph, each element should be a pair of nodes
described by hashable objects.
D : int
The bond dimension connecting tensors.
phys_dim : int, optional
If not ``None``, give each tensor a 'physical', free index of this size
to mimic a wavefunction of ``len(G)`` sites.
seed : int, optional
A random seed.
site_tag_id : str, optional
String with formatter to tag sites.
site_ind_id : str, optional
String with formatter to tag indices (if ``phys_dim`` specified).
Returns
-------
TensorNetworkGen or TensorNetworkGenVector
"""
ts = {}
sites = tuple(sorted(set(concat(edges))))
for node in sites:
t = Tensor(tags=site_tag_id.format(node))
if phys_dim is not None:
t.new_ind(site_ind_id.format(node), size=phys_dim)
ts[node] = t
for node_a, node_b in gen_unique_edges(edges):
new_bond(ts[node_a], ts[node_b], size=D)
tn = TensorNetwork(ts.values())
tn.randomize_(seed=seed, dtype=dtype)
if phys_dim is not None:
tn.view_as_(
TensorNetworkGenVector, sites=sites,
site_tag_id=site_tag_id, site_ind_id=site_ind_id
)
else:
tn.view_as_(
TensorNetworkGen, sites=sites,
site_tag_id=site_tag_id
)
return tn
def TN_rand_reg(
n,
reg,
D,
phys_dim=None,
seed=None,
dtype='float64',
site_tag_id='I{}',
site_ind_id='k{}',
):
"""Create a random regular tensor network.
Parameters
----------
n : int
The number of tensors.
reg : int
The degree of the tensor network (how many tensors each tensor
connects to).
D : int
The bond dimension connecting tensors.
phys_dim : int, optional
If not ``None``, give each tensor a 'physical', free index of this size
to mimic a wavefunction of ``n`` sites.
seed : int, optional
A random seed.
site_tag_id : str, optional
String with formatter to tag sites.
site_ind_id : str, optional
String with formatter to tag indices (if ``phys_dim`` specified).
Returns
-------
TensorNetwork
"""
import networkx as nx
G = nx.random_degree_sequence_graph([reg] * n, seed=seed)
return TN_rand_from_edges(
G.edges, D=D, phys_dim=phys_dim, seed=seed, dtype=dtype,
site_tag_id=site_tag_id, site_ind_id=site_ind_id)
def TN2D_from_fill_fn(
fill_fn,
Lx,
Ly,
D,
cyclic=False,
site_tag_id='I{},{}',
row_tag_id='ROW{}',
col_tag_id='COL{}',
):
"""A scalar 2D lattice tensor network with tensors filled by a function.
Parameters
----------
fill_fn : callable
A function with signature ``fill_fn(shape) -> array``, used to fill
each tensor.
Lx : int
Length of side x.
Ly : int
Length of side y.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
site_tag_id : str, optional
String specifier for naming convention of site tags.
row_tag_id : str, optional
String specifier for naming convention of row tags.
col_tag_id : str, optional
String specifier for naming convention of column tags.
Returns
-------
TensorNetwork2D
"""
try:
cyclic_x, cyclic_y = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic
ts = []
bonds = collections.defaultdict(rand_uuid)
for i, j in itertools.product(range(Lx), range(Ly)):
directions = ""
inds = []
if j > 0 or cyclic_y:
directions += 'l'
inds.append(bonds[(i, (j - 1) % Ly), (i, j)])
if j < Ly - 1 or cyclic_y:
directions += 'r'
inds.append(bonds[(i, j), (i, (j + 1) % Ly)])
if i < Lx - 1 or cyclic_x:
directions += 'u'
inds.append(bonds[(i, j), ((i + 1) % Lx, j)])
if i > 0 or cyclic_x:
directions += 'd'
inds.append(bonds[((i - 1) % Lx, j), (i, j)])
shape = (D,) * len(inds)
data = fill_fn(shape)
tags = [site_tag_id.format(i, j),
row_tag_id.format(i), col_tag_id.format(j)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
tn = TensorNetwork(ts)
return tn.view_as_(
TensorNetwork2D,
Lx=Lx, Ly=Ly,
site_tag_id=site_tag_id,
row_tag_id=row_tag_id,
col_tag_id=col_tag_id,
)
def TN2D_empty(
Lx,
Ly,
D,
cyclic=False,
site_tag_id='I{},{}',
row_tag_id='ROW{}',
col_tag_id='COL{}',
dtype='float64',
):
"""A scalar 2D lattice tensor network initialized with empty tensors.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
site_tag_id : str, optional
String specifier for naming convention of site tags.
row_tag_id : str, optional
String specifier for naming convention of row tags.
col_tag_id : str, optional
String specifier for naming convention of column tags.
dtype : str, optional
The data type of the tensors.
Returns
-------
TensorNetwork2D
"""
def fill_fn(shape):
return np.empty(shape, dtype=dtype)
return TN2D_from_fill_fn(
fill_fn, Lx=Lx, Ly=Ly, D=D, cyclic=cyclic,
site_tag_id=site_tag_id, row_tag_id=row_tag_id, col_tag_id=col_tag_id,
)
def TN2D_with_value(
value,
Lx,
Ly,
D,
cyclic=False,
site_tag_id='I{},{}',
row_tag_id='ROW{}',
col_tag_id='COL{}',
dtype=None,
):
"""A scalar 2D lattice tensor network with every element set to ``value``.
This uses ``numpy.broadcast_to`` and therefore essentially no memory.
Parameters
----------
value : scalar
The value to fill the tensors with.
Lx : int
Length of side x.
Ly : int
Length of side y.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
site_tag_id : str, optional
String specifier for naming convention of site tags.
row_tag_id : str, optional
String specifier for naming convention of row tags.
col_tag_id : str, optional
String specifier for naming convention of column tags.
dtype : str, optional
The data type of the tensors.
Returns
-------
TensorNetwork2D
"""
element = np.array(value, dtype=dtype)
def fill_fn(shape):
return np.broadcast_to(element, shape)
return TN2D_from_fill_fn(
fill_fn, Lx=Lx, Ly=Ly, D=D, cyclic=cyclic,
site_tag_id=site_tag_id, row_tag_id=row_tag_id, col_tag_id=col_tag_id,
)
@random_seed_fn
def TN2D_rand(
Lx,
Ly,
D,
cyclic=False,
site_tag_id='I{},{}',
row_tag_id='ROW{}',
col_tag_id='COL{}',
dtype='float64',
):
"""A random scalar 2D lattice tensor network.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
site_tag_id : str, optional
String specifier for naming convention of site tags.
row_tag_id : str, optional
String specifier for naming convention of row tags.
col_tag_id : str, optional
String specifier for naming convention of column tags.
dtype : dtype, optional
Data type of the random arrays.
seed : int, optional
A random seed.
Returns
-------
TensorNetwork2D
"""
def fill_fn(shape):
return randn(shape, dtype=dtype)
return TN2D_from_fill_fn(
fill_fn, Lx=Lx, Ly=Ly, D=D, cyclic=cyclic,
site_tag_id=site_tag_id, row_tag_id=row_tag_id, col_tag_id=col_tag_id,
)
def TN3D_from_fill_fn(
fill_fn,
Lx,
Ly,
Lz,
D,
cyclic=False,
site_tag_id='I{},{},{}',
x_tag_id='X{}',
y_tag_id='Y{}',
z_tag_id='Z{}',
):
"""A scalar 3D lattice tensor network with tensors filled by a function.
Parameters
----------
fill_fn : callable
A function with signature ``fill_fn(shape) -> array``, used to fill
each tensor.
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
site_tag_id : str, optional
String formatter specifying how to label each site.
dtype : dtype, optional
Data type of the random arrays.
Returns
-------
TensorNetwork3D
"""
try:
cyclic_x, cyclic_y, cyclic_z = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic_z = cyclic
ts = []
bonds = collections.defaultdict(rand_uuid)
for i, j, k in itertools.product(range(Lx), range(Ly), range(Lz)):
directions = ""
inds = []
if k > 0 or cyclic_z:
directions += 'b'
inds.append(bonds[(i, j, (k - 1) % Lz), (i, j, k)])
if k < Lz - 1 or cyclic_z:
directions += 'a'
inds.append(bonds[(i, j, k), (i, j, (k + 1) % Lz)])
if j > 0 or cyclic_y:
directions += 'l'
inds.append(bonds[(i, (j - 1) % Ly, k), (i, j, k)])
if j < Ly - 1 or cyclic_y:
directions += 'r'
inds.append(bonds[(i, j, k), (i, (j + 1) % Ly, k)])
if i < Lx - 1 or cyclic_x:
directions += 'u'
inds.append(bonds[(i, j, k), ((i + 1) % Lx, j, k)])
if i > 0 or cyclic_x:
directions += 'd'
inds.append(bonds[((i - 1) % Lx, j, k), (i, j, k)])
shape = (D,) * len(inds)
data = fill_fn(shape)
tags = [site_tag_id.format(i, j, k), x_tag_id.format(i),
y_tag_id.format(j), z_tag_id.format(k)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
tn = TensorNetwork(ts)
return tn.view_as_(
TensorNetwork3D,
Lx=Lx, Ly=Ly, Lz=Lz,
site_tag_id=site_tag_id,
x_tag_id=x_tag_id,
y_tag_id=y_tag_id,
z_tag_id=z_tag_id,
)
def TN3D_empty(
Lx,
Ly,
Lz,
D,
cyclic=False,
site_tag_id='I{},{},{}',
x_tag_id='X{}',
y_tag_id='Y{}',
z_tag_id='Z{}',
dtype='float64',
):
"""A scalar 3D lattice tensor network initialized with empty tensors.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
site_tag_id : str, optional
String formatter specifying how to label each site.
dtype : dtype, optional
Data type of the random arrays.
seed : int, optional
Random seed.
Returns
-------
TensorNetwork3D
"""
def fill_fn(shape):
return np.empty(shape, dtype=dtype)
return TN3D_from_fill_fn(
fill_fn, Lx, Ly, Lz, D,
cyclic=cyclic, site_tag_id=site_tag_id,
x_tag_id=x_tag_id, y_tag_id=y_tag_id, z_tag_id=z_tag_id,
)
def TN3D_with_value(
value,
Lx,
Ly,
Lz,
D,
cyclic=False,
site_tag_id='I{},{},{}',
x_tag_id='X{}',
y_tag_id='Y{}',
z_tag_id='Z{}',
dtype=None,
):
"""A scalar 2D lattice tensor network with every element set to ``value``.
This uses ``numpy.broadcast_to`` and therefore essentially no memory.
Parameters
----------
value : scalar
The value to fill the tensors with.
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
site_tag_id : str, optional
String formatter specifying how to label each site.
dtype : dtype, optional
Data type of the random arrays.
seed : int, optional
Random seed.
Returns
-------
TensorNetwork3D
"""
element = np.array(value, dtype=dtype)
def fill_fn(shape):
return np.broadcast_to(element, shape)
return TN3D_from_fill_fn(
fill_fn, Lx, Ly, Lz, D,
cyclic=cyclic, site_tag_id=site_tag_id,
x_tag_id=x_tag_id, y_tag_id=y_tag_id, z_tag_id=z_tag_id,
)
@random_seed_fn
def TN3D_rand(
Lx,
Ly,
Lz,
D,
cyclic=False,
site_tag_id='I{},{},{}',
x_tag_id='X{}',
y_tag_id='Y{}',
z_tag_id='Z{}',
dtype='float64',
):
"""A random scalar 3D lattice tensor network.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
D : int
The bond dimension connecting sites.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
site_tag_id : str, optional
String formatter specifying how to label each site.
dtype : dtype, optional
Data type of the random arrays.
seed : int, optional
Random seed.
Returns
-------
TensorNetwork
"""
def fill_fn(shape):
return randn(shape, dtype=dtype)
return TN3D_from_fill_fn(
fill_fn, Lx, Ly, Lz, D,
cyclic=cyclic, site_tag_id=site_tag_id,
x_tag_id=x_tag_id, y_tag_id=y_tag_id, z_tag_id=z_tag_id,
)
# ---------------------------- classical models ----------------------------- #
@functools.lru_cache(128)
def classical_ising_S_matrix(beta, j=1.0):
"""The interaction term for the classical ising model.
"""
S = np.array(
[[math.exp(+j * beta), math.exp(-j * beta)],
[math.exp(-j * beta), math.exp(+j * beta)]])
make_immutable(S)
return S
@functools.lru_cache(128)
def classical_ising_H_matrix(beta, h=0.0):
"""The magnetic field term for the classical ising model.
"""
H = np.array([math.exp(-beta * h), math.exp(beta * h)])
make_immutable(H)
return H
@functools.lru_cache(128)
def classical_ising_sqrtS_matrix(beta, j=1.0, asymm=None):
"""The sqrt factorized interaction term for the classical ising model.
If ``j`` is negative you can supply ``asymm='l'`` or ``'r'`` to
keep the matrix real, but it must be paired with the opposite in a tensor
network.
"""
if (j < 0.0) and (asymm is not None):
Slr = eigh(classical_ising_S_matrix(beta=beta, j=j))
S_1_2 = {
'l': Slr[0], 'lT': Slr[0].T,
'r': Slr[-1], 'rT': Slr[-1].T,
}[asymm]
else:
S_1_2 = np.array(
[[math.cosh(j * beta)**0.5 + math.sinh(j * beta)**0.5,
math.cosh(j * beta)**0.5 - math.sinh(j * beta)**0.5],
[math.cosh(j * beta)**0.5 - math.sinh(j * beta)**0.5,
math.cosh(j * beta)**0.5 + math.sinh(j * beta)**0.5]]
) / 2**0.5
make_immutable(S_1_2)
return S_1_2
@functools.lru_cache(128)
def classical_ising_T_matrix(
beta,
j=1.0,
h=0.0,
directions='lrud',
asymm=None,
):
"""The single effective TN site for the classical ising model.
"""
try:
js = tuple(j)
except TypeError:
js = (j,) * len(directions)
try:
asymms = tuple(asymm)
except TypeError:
asymms = (asymm,) * len(directions)
arrays = (
[
classical_ising_sqrtS_matrix(beta=beta, j=j, asymm=a)
for j, a in zip(js, asymms)
] +
[classical_ising_H_matrix(beta, h)]
)
lhs = ",".join(f'i{x}' for x in directions)
eq = lhs + ",i->" + directions
return oe.contract(eq, *arrays)
def HTN2D_classical_ising_partition_function(
Lx,
Ly,
beta,
h=0.0,
j=1.0,
ind_id='s{},{}',
cyclic=False,
):
"""Hyper tensor network representation of the 2D classical ising model
partition function. The indices will be shared by 4 or 5 tensors depending
on whether ``h`` is non-zero. As opposed to the 'normal' tensor network,
here each classical spin is still a single index, which is easier to
contract exactly.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
beta : float
The inverse temperature.
h : float, optional
The magnetic field strength.
j : float, optional
The interaction strength, positive being *ferromagnetic*.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
ind_id : str, optional
How to label the indices i.e. ``ind_id.format(i, j)``, each of which
corresponds to a single classical spin.
Returns
-------
TensorNetwork
See Also
--------
TN2D_classical_ising_partition_function
"""
try:
cyclic_x, cyclic_y = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
ts = []
for ni, nj in itertools.product(range(Lx), range(Ly)):
if ni < Lx - 1 or cyclic_x:
node_a, node_b = (ni, nj), ((ni + 1) % Lx, nj)
inds = ind_id.format(*node_a), ind_id.format(*node_b)
data = classical_ising_S_matrix(
beta=beta, j=j_factory(node_a, node_b))
ts.append(Tensor(data, inds=inds))
if nj < Ly - 1 or cyclic_y:
node_a, node_b = (ni, nj), (ni, (nj + 1) % Ly)
inds = ind_id.format(*node_a), ind_id.format(*node_b)
data = classical_ising_S_matrix(
beta=beta, j=j_factory(node_a, node_b))
ts.append(Tensor(data, inds=inds))
if h != 0.0:
data = classical_ising_H_matrix(beta=beta, h=h)
ts.append(Tensor(data, inds=(ind_id.format(ni, nj),)))
return TensorNetwork(ts)
def HTN3D_classical_ising_partition_function(
Lx,
Ly,
Lz,
beta,
j=1.0,
h=0.0,
cyclic=False,
ind_id='s{},{},{}',
):
"""Hyper tensor network representation of the 3D classical ising model
partition function. The indices will be shared by 6 or 7 tensors depending
on whether ``h`` is non-zero. As opposed to the 'normal' tensor network,
here each classical spin is still a single index, which is easier to
contract exactly.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
beta : float
The inverse temperature.
j : float, optional
The interaction strength, positive being *ferromagnetic*.
h : float, optional
The magnetic field strength.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
ind_id : str, optional
How to label the indices i.e. ``ind_id.format(i, j, k)``, each of which
corresponds to a single classical spin.
Returns
-------
TensorNetwork
See Also
--------
TN3D_classical_ising_partition_function
"""
try:
cyclic_x, cyclic_y, cyclic_z = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic_z = cyclic
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
ts = []
for ni, nj, nk in itertools.product(range(Lx), range(Ly), range(Lz)):
if ni < Lx - 1 or cyclic_x:
node_a, node_b = (ni, nj, nk), ((ni + 1) % Lx, nj, nk)
inds = (ind_id.format(*node_a), ind_id.format(*node_b))
data = classical_ising_S_matrix(
beta=beta, j=j_factory(node_a, node_b))
ts.append(Tensor(data, inds=inds))
if nj < Ly - 1 or cyclic_y:
node_a, node_b = (ni, nj, nk), (ni, (nj + 1) % Ly, nk)
inds = (ind_id.format(*node_a), ind_id.format(*node_b))
data = classical_ising_S_matrix(
beta=beta, j=j_factory(node_a, node_b))
ts.append(Tensor(data, inds=inds))
if nk < Lz - 1 or cyclic_z:
node_a, node_b = (ni, nj, nk), (ni, nj, (nk + 1) % Lz)
inds = (ind_id.format(*node_a), ind_id.format(*node_b))
data = classical_ising_S_matrix(
beta=beta, j=j_factory(node_a, node_b))
ts.append(Tensor(data, inds=inds))
if h != 0.0:
data = classical_ising_H_matrix(beta=beta, h=h)
ts.append(Tensor(data, inds=(ind_id.format(ni, nj, nk),)))
return TensorNetwork(ts)
def TN2D_classical_ising_partition_function(
Lx,
Ly,
beta,
j=1.0,
h=0.0,
cyclic=False,
site_tag_id='I{},{}',
row_tag_id='ROW{}',
col_tag_id='COL{}',
):
"""The tensor network representation of the 2D classical ising model
partition function.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
beta : float
The inverse temperature.
j : float, optional
The interaction strength, positive being *ferromagnetic*.
h : float, optional
The magnetic field strength.
cyclic : bool or (bool, bool), optional
Whether to use periodic boundary conditions. X and Y can be specified
separately using a tuple.
site_tag_id : str, optional
String specifier for naming convention of site tags.
row_tag_id : str, optional
String specifier for naming convention of row tags.
col_tag_id : str, optional
String specifier for naming convention of column tags.
Returns
-------
TensorNetwork2D
See Also
--------
HTN2D_classical_ising_partition_function
"""
try:
cyclic_x, cyclic_y = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
ts = []
bonds = collections.defaultdict(rand_uuid)
for ni, nj in itertools.product(range(Lx), range(Ly)):
directions = ""
inds = []
js = ()
asymms = ()
for inbounds, pair, direction in [
(nj > 0 or cyclic_y, ((ni, (nj - 1) % Ly), (ni, nj)), 'l'),
(nj < Ly - 1 or cyclic_y, ((ni, nj), (ni, (nj + 1) % Ly)), 'r'),
(ni < Lx - 1 or cyclic_x, ((ni, nj), ((ni + 1) % Lx, nj)), 'u'),
(ni > 0 or cyclic_x, (((ni - 1) % Lx, nj), (ni, nj)), 'd'),
]:
if inbounds:
js += (j_factory(*pair),)
directions += direction
# this is logic for handling negative j without imag tensors
# i.e. add the left factor if the first instance of bond, right
# factor if second. If j > 0.0 this doesn't matter anyhow
asymms += ('l' if pair not in bonds else 'rT',)
inds.append(bonds[pair])
ts.append(Tensor(
data=classical_ising_T_matrix(
beta=beta, directions=directions, j=js, h=h, asymm=asymms,
),
inds=inds,
tags=[site_tag_id.format(ni, nj),
row_tag_id.format(ni),
col_tag_id.format(nj)]))
tn = TensorNetwork(ts)
return tn.view_as_(
TensorNetwork2D,
Lx=Lx, Ly=Ly,
site_tag_id=site_tag_id,
row_tag_id=row_tag_id,
col_tag_id=col_tag_id,
)
def TN3D_classical_ising_partition_function(
Lx,
Ly,
Lz,
beta,
j=1.0,
h=0.0,
cyclic=False,
site_tag_id='I{},{},{}',
x_tag_id='X{}',
y_tag_id='Y{}',
z_tag_id='Z{}',
):
"""Tensor network representation of the 3D classical ising model
partition function.
Parameters
----------
Lx : int
Length of side x.
Ly : int
Length of side y.
Lz : int
Length of side z.
beta : float
The inverse temperature.
j : float, optional
The interaction strength, positive being *ferromagnetic*.
h : float, optional
The magnetic field strength.
cyclic : bool or (bool, bool, bool), optional
Whether to use periodic boundary conditions. X, Y and Z can be
specified separately using a tuple.
site_tag_id : str, optional
String formatter specifying how to label each site.
Returns
-------
TensorNetwork
See Also
--------
HTN3D_classical_ising_partition_function
"""
try:
cyclic_x, cyclic_y, cyclic_z = cyclic
except TypeError:
cyclic_x = cyclic_y = cyclic_z = cyclic
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
ts = []
bonds = collections.defaultdict(rand_uuid)
for ni, nj, nk in itertools.product(range(Lx), range(Ly), range(Lz)):
directions = ""
inds = []
js = ()
asymms = ()
for inbounds, pair, direction in [
(nk > 0 or cyclic_z,
((ni, nj, (nk - 1) % Lz), (ni, nj, nk)), 'b'),
(nk < Lz - 1 or cyclic_z,
((ni, nj, nk), (ni, nj, (nk + 1) % Lz)), 'a'),
(nj > 0 or cyclic_y,
((ni, (nj - 1) % Ly, nk), (ni, nj, nk)), 'l'),
(nj < Ly - 1 or cyclic_y,
((ni, nj, nk), (ni, (nj + 1) % Ly, nk)), 'r'),
(ni < Lx - 1 or cyclic_x,
((ni, nj, nk), ((ni + 1) % Lx, nj, nk)), 'u'),
(ni > 0 or cyclic_x,
(((ni - 1) % Lx, nj, nk), (ni, nj, nk)), 'd'),
]:
if inbounds:
js += (j_factory(*pair),)
directions += direction
# this is logic for handling negative j without imag tensors
# i.e. add the left factor if the first instance of bond, right
# factor if second. If j > 0.0 this doesn't matter anyhow
asymms += ('l' if pair not in bonds else 'rT',)
inds.append(bonds[pair])
ts.append(Tensor(
data=classical_ising_T_matrix(
beta=beta, directions=directions, j=js, h=h, asymm=asymms,
),
inds=inds,
tags=[
site_tag_id.format(ni, nj, nk),
x_tag_id.format(ni),
y_tag_id.format(nj),
z_tag_id.format(nk),
],
))
tn = TensorNetwork(ts)
return tn.view_as_(
TensorNetwork3D,
Lx=Lx, Ly=Ly, Lz=Lz,
site_tag_id=site_tag_id,
x_tag_id=x_tag_id,
y_tag_id=y_tag_id,
z_tag_id=z_tag_id,
)
def HTN_classical_partition_function_from_edges(
edges,
beta,
j=1.0,
h=0.0,
site_ind_id="s{}",
site_tag_id="I{}",
bond_tag_id="B{},{}",
):
"""Build a hyper tensor network representation of a classical ising model
partition function by specifying graph edges. There will be a single
tensor *per interaction* rather than per site, as well as a single tensor
for each site, if ``h != 0.0``.
Parameters
----------
edges : sequence of tuple[hashable, hashable]
The graph edges, as a sequence of pairs of hashable objects, for
example integers, representing the nodes. You can redundantly specify
``(u, v)`` and ``(v, u)`` and only one edge will be added.
beta : float, optional
The inverse temperature.
j : float, or callable, optional
The interaction strength, positive being *ferromagnetic*. If a
callable should have the signature ``j(node_a, node_b)`` and return
a float.
h : float, or callable, optional
The magnetic field strength. If a callable should have the
signature ``h(node)`` and return a float.
site_ind_id : str, optional
A string formatter for naming tensor indices like
``site_ind_id.format(node)``.
site_tag_id : str, optional
A string formatter for naming tensor tags like
``site_tag_id.format(node)``.
bond_tag_id : str, optional
A string formatter for naming tensor tags like
``bond_tag_id.format(node_a, node_b)``.
Returns
-------
TensorNetwork
"""
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
ts = []
for node_a, node_b in gen_unique_edges(edges):
data = classical_ising_S_matrix(beta=beta, j=j_factory(node_a, node_b))
inds = [site_ind_id.format(node_a),
site_ind_id.format(node_b)]
tags = [bond_tag_id.format(node_a, node_b),
site_tag_id.format(node_a),
site_tag_id.format(node_b)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
if h != 0.0:
if callable(h):
h_factory = h
else:
def h_factory(node):
return h
for node in unique(concat(edges)):
data = classical_ising_H_matrix(beta, h=h_factory(node))
inds = [site_ind_id.format(node)]
tags = [site_tag_id.format(node)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
return TensorNetwork(ts)
def TN_classical_partition_function_from_edges(
edges,
beta,
j=1.0,
h=0.0,
site_tag_id="I{}",
bond_ind_id="b{},{}",
):
"""Build a regular tensor network representation of a classical ising model
partition function by specifying graph edges. There will be a single
tensor per site.
Parameters
----------
edges : sequence of tuple[hashable, hashable]
The graph edges, as a sequence of pairs of hashable objects, for
example integers, representing the nodes. You can redundantly specify
``(u, v)`` and ``(v, u)`` and only one edge will be added.
beta : float, optional
The inverse temperature.
j : float, or callable, optional
The interaction strength, positive being *ferromagnetic*. If a
callable should have the signature ``j(node_a, node_b)`` and return
a float.
h : float, or callable, optional
The magnetic field strength. If a callable should have the
signature ``h(node)`` and return a float.
site_tag_id : str, optional
A string formatter for naming tensor tags like
``site_ind_id.format(node)``.
bond_ind_id : str, optional
A string formatter for naming the indices bewteen tensors like
``bond_ind_id.format(node_a, node_b)``.
Returns
-------
TensorNetwork
"""
if callable(j):
j_factory = j
else:
def j_factory(node_a, node_b):
return j
to_contract = collections.defaultdict(list)
ts = []
for node_a, node_b in gen_unique_edges(edges):
j_ab = j_factory(node_a, node_b)
bond_ab = bond_ind_id.format(node_a, node_b)
# left tensor factor
data = classical_ising_sqrtS_matrix(beta=beta, j=j_ab, asymm='l')
inds = [f's{node_a}', bond_ab]
tags = [site_tag_id.format(node_a)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
# right tensor factor
data = classical_ising_sqrtS_matrix(beta=beta, j=j_ab, asymm='r')
inds = [bond_ab, f's{node_b}']
tags = [site_tag_id.format(node_b)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
to_contract[f's{node_a}'].append(bond_ab)
to_contract[f's{node_b}'].append(bond_ab)
sites = tuple(sorted(set(concat(edges))))
if h != 0.0:
if callable(h):
h_factory = h
else:
def h_factory(node):
return h
for node in sites:
data = classical_ising_H_matrix(beta, h=h_factory(node))
inds = [f's{node}']
tags = [site_tag_id.format(node)]
ts.append(Tensor(data=data, inds=inds, tags=tags))
to_contract[f's{node}'].extend(())
tn = TensorNetwork(ts)
for ind, output_inds in to_contract.items():
tn.contract_ind(ind, output_inds=output_inds)
tn.view_as_(TensorNetworkGen, sites=sites, site_tag_id=site_tag_id)
return tn
@functools.lru_cache(128)
def dimer_data(d, cover_count=1, dtype=float):
shape = [2] * d
x = np.zeros(shape, dtype=dtype)
index_sum = np.indices(shape).sum(axis=0)
x[index_sum == cover_count] = 1
make_immutable(x)
return x
def TN_dimer_covering_from_edges(
edges,
cover_count=1,
site_tag_id="I{}",
bond_ind_id="b{},{}",
dtype=float,
):
"""Make a tensor network from sequence of graph edges that counts the
number of ways to cover the graph exactly with dimers. See
https://arxiv.org/abs/1805.10598 for the construction.
Parameters
----------
edges : sequence of tuple
The edges, each item should be a pair of hashable objects describing
nodes linked.
cover_count : int, optional
The exact number of times each node must be 'covered'. For example
1 for a standard dimer covering or 2 for 'ice rules'.
site_tag_id : str, optional
A string formatter for naming tensor tags like
``site_ind_id.format(node)``.
bond_ind_id : str, optional
A string formatter for naming the indices bewteen tensors like
``bond_ind_id.format(node_a, node_b)``.
Returns
-------
TensorNetwork
"""
nodes2inds = collections.defaultdict(list)
for ni, nj in edges:
bond = bond_ind_id.format(ni, nj)
nodes2inds[ni].append(bond)
nodes2inds[nj].append(bond)
ts = []
for node, inds in nodes2inds.items():
data = dimer_data(len(inds), cover_count=cover_count, dtype=dtype)
tag = site_tag_id.format(node)
ts.append(Tensor(data, inds=inds, tags=tag))
tn = TensorNetwork(ts)
sites = tuple(sorted(nodes2inds))
tn.view_as_(TensorNetworkGen, sites=sites, site_tag_id=site_tag_id)
return tn
# --------------------------------------------------------------------------- #
# Weighted Model Counting #
# --------------------------------------------------------------------------- #
def clause_negmask(clause):
return int("".join('0' if x > 0 else '1' for x in clause), 2)
@functools.lru_cache(128)
def or_clause_data(ndim, m=0, dtype=float, q=2):
"""Get the array representing satisfiability of ``ndim`` clauses with
unsatisfied condition encoded in ``m``.
"""
shape = [q] * ndim
t = np.ones(shape, dtype=dtype)
t[np.unravel_index(m, shape)] = 0
return t
def or_clause_tensor(ndim, m, inds, tags=None):
"""Get the tensor representing satisfiability of ``ndim`` clauses with
unsatisfied condition encoded in ``m`` labelled by ``inds`` and ``tags``.
"""
data = or_clause_data(ndim, m=m)
return Tensor(data=data, inds=inds, tags=tags)
def or_clause_mps_tensors(ndim, m, inds, tags=None):
"""Get the set of MPS tensors representing satisfiability of ``ndim``
clauses with unsatisfied condition encoded in ``m`` labelled by ``inds``
and ``tags``.
"""
mps = (
MPS_computational_state('+' * ndim, tags=tags) * (2**(ndim / 2)) -
MPS_computational_state(f'{m:0>{ndim}b}', tags=tags)
)
mps.reindex_({
mps.site_ind(i): ind
for i, ind in enumerate(inds)
})
return mps.tensors
@functools.lru_cache(2**10)
def or_clause_parafac_data(ndim, m):
"""Get the set of PARAFAC arrays representing satisfiability of ``ndim``
clauses with unsatisfied condition encoded in ``m``.
"""
inds = [f'k{i}' for i in range(ndim)]
bond = 'b'
pfc_ones = np.ones((2, 1))
pfc_up = np.array([[1.], [0.]])
pfc_dn = np.array([[0.], [1.]])
ts_ones = [Tensor(data=pfc_ones, inds=[ix, bond]) for ix in inds]
bmask = f'{m:0>{ndim}b}'
ts_mask = [Tensor(data=(pfc_dn if b == '1' else pfc_up), inds=[ix, bond])
for ix, b in zip(inds, bmask)]
# just need to multiply a single mask tensor by -1
ts_mask[0] *= -1
ts = [tensor_direct_product(t1, t2, sum_inds=(ix,))
for ix, t1, t2 in zip(inds, ts_ones, ts_mask)]
return tuple(t.data for t in ts)
def clause_parafac_tensors(ndim, m, inds, tags=None):
"""Get the set of PARAFAC tensors representing satisfiability of ``ndim``
clauses with unsatisfied condition encoded in ``m`` labelled by ``inds``
and ``tags``.
"""
bond = rand_uuid()
return [Tensor(x, inds=[ix, bond], tags=tags)
for x, ix in zip(or_clause_parafac_data(ndim, m), inds)]
def HTN_from_cnf(fname, mode='parafac'):
"""Create a hyper tensor network from a '.cnf' or '.wcnf' file - i.e. a
model counting or weighted model counting instance specification.
Parameters
----------
fname : str
Path to a '.cnf' or '.wcnf' file.
mode : {'parafac', 'mps', 'dense', int}, optional
How to represent the clauses:
* 'parafac' - `N` rank-2 tensors connected by a single hyper index.
You could further call :meth:`hyperinds_resolve` for more options
to convert the hyper index into a (decomposed) COPY-tensor.
* 'mps' - `N` rank-3 tensors connected along a 1D line.
* 'dense' - contract the hyper index.
* int - use the 'parafac' mode, but only if the length of a clause
is larger than this threshold.
Returns
-------
htn : TensorNetwork
"""
ts = []
weights = {}
weighted = set()
clause_counter = 1
with open(fname, 'r') as f:
for line in f:
args = line.split()
# global info, don't need
if args[0] == 'p':
# num_vars = int(args[2])
# num_clauses = int(args[3])
continue
# translate mc2021 style weight to normal
if args[:3] == ['c', 'p', 'weight']:
args = ('w', *args[3:5])
# variable weight
if args[0] == 'w':
sgn_var, w = args[1:]
sgn_var = int(sgn_var)
sgn = '-' if sgn_var < 0 else '+'
var = str(abs(sgn_var))
w = float(w)
weights[var, sgn] = w
weighted.add(var)
continue
# ignore empty lines, other comments and info line
if (not args) or (args == ['0']) or (args[0][0] in 'c%'):
continue
# clause tensor
clause = tuple(map(int, filter(None, args[:-1])))
# encode the OR statement with possible negations as int
m = clause_negmask(clause)
inds = [str(abs(var)) for var in clause]
tag = f'CLAUSE{clause_counter}'
if (
# parafac mode
(mode == 'parafac' and len(inds) > 2) or
# parafac above cutoff size mode
(isinstance(mode, int) and len(inds) > mode)
):
ts.extend(clause_parafac_tensors(len(inds), m, inds, tag))
elif mode == 'mps' and len(inds) > 2:
ts.extend(or_clause_mps_tensors(len(inds), m, inds, tag))
else:
# dense
ts.append(or_clause_tensor(len(inds), m, inds, tag))
clause_counter += 1
for var in sorted(weighted):
wp_specified = (var, '+') in weights
wm_specified = (var, '-') in weights
if wp_specified and wm_specified:
wp, wm = weights[var, '+'], weights[var, '-']
elif wp_specified:
wp = weights[var, '+']
wm = 1 - wp
elif wm_specified:
wm = weights[var, '-']
wp = 1 - wm
ts.append(Tensor([wm, wp], inds=[var], tags=[f'VAR{var}']))
return TensorNetwork(ts, virtual=True)
# --------------------------------------------------------------------------- #
# MPSs #
# --------------------------------------------------------------------------- #
@random_seed_fn
def MPS_rand_state(L, bond_dim, phys_dim=2, normalize=True, cyclic=False,
dtype='float64', trans_invar=False, **mps_opts):
"""Generate a random matrix product state.
Parameters
----------
L : int
The number of sites.
bond_dim : int
The bond dimension.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
normalize : bool, optional
Whether to normalize the state.
cyclic : bool, optional
Generate a MPS with periodic boundary conditions or not, default is
open boundary conditions.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
trans_invar : bool (optional)
Whether to generate a translationally invariant state,
requires cyclic=True.
mps_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductState`.
"""
if trans_invar and not cyclic:
raise ValueError("State cannot be translationally invariant with open "
"boundary conditions.")
# check for site varying physical dimensions
if isinstance(phys_dim, Integral):
phys_dims = itertools.repeat(phys_dim)
else:
phys_dims = itertools.cycle(phys_dim)
cyc_dim = (bond_dim,) if cyclic else ()
def gen_shapes():
yield (*cyc_dim, bond_dim, next(phys_dims))
for _ in range(L - 2):
yield (bond_dim, bond_dim, next(phys_dims))
yield (bond_dim, *cyc_dim, next(phys_dims))
def gen_data(shape):
return randn(shape, dtype=dtype)
if trans_invar:
array = sensibly_scale(gen_data(next(gen_shapes())))
arrays = (array for _ in range(L))
else:
arrays = map(sensibly_scale, map(gen_data, gen_shapes()))
rmps = MatrixProductState(arrays, **mps_opts)
if normalize == 'left':
rmps.left_canonize(normalize=True)
elif normalize == 'right':
rmps.left_canonize(normalize=True)
elif normalize:
rmps /= (rmps.H @ rmps)**0.5
return rmps
def MPS_product_state(arrays, cyclic=False, **mps_opts):
"""Generate a product state in MatrixProductState form, i,e,
with bond dimension 1, from single site vectors described by ``arrays``.
"""
cyc_dim = (1,) if cyclic else ()
def gen_array_shapes():
yield (*cyc_dim, 1, -1)
for _ in range(len(arrays) - 2):
yield (1, 1, -1)
yield (*cyc_dim, 1, -1)
mps_arrays = (asarray(array).reshape(*shape)
for array, shape in zip(arrays, gen_array_shapes()))
return MatrixProductState(mps_arrays, shape='lrp', **mps_opts)
def MPS_computational_state(binary, dtype='float64', cyclic=False, **mps_opts):
"""A computational basis state in Matrix Product State form.
Parameters
----------
binary : str or sequence of int
String specifying the state, e.g. ``'00101010111'`` or ``[0, 0, 1]``.
cyclic : bool, optional
Generate a MPS with periodic boundary conditions or not, default open
boundary conditions.
mps_opts
Supplied to MatrixProductState constructor.
"""
array_map = {
'0': np.array([1., 0.], dtype=dtype),
'1': np.array([0., 1.], dtype=dtype),
'+': np.array([2**-0.5, 2**-0.5], dtype=dtype),
'-': np.array([2**-0.5, -2**-0.5], dtype=dtype),
}
def gen_arrays():
for s in binary:
yield array_map[str(s)]
return MPS_product_state(tuple(gen_arrays()), cyclic=cyclic, **mps_opts)
def MPS_neel_state(L, down_first=False, dtype='float64', **mps_opts):
"""Generate the neel state in Matrix Product State form.
Parameters
----------
L : int
The number of spins.
down_first : bool, optional
Whether to start with '1' (down) or '0' (up) first.
mps_opts
Supplied to MatrixProductState constructor.
"""
binary_str = "01" * (L // 2) + (L % 2 == 1) * "0"
if down_first:
binary_str = "1" + binary_str[:-1]
return MPS_computational_state(binary_str, dtype=dtype, **mps_opts)
def MPS_ghz_state(L, dtype='float64', **mps_opts):
"""Build the chi=2 OBC MPS representation of the GHZ state.
Parameters
----------
L : int
Number of qubits.
dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional
The underlying data type.
mps_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductState`.
"""
def gen_arrays():
yield 2**-0.5 * np.array([[1., 0.],
[0., 1.]]).astype(dtype)
for i in range(1, L - 1):
yield np.array([[[1., 0.],
[0., 0.]],
[[0., 0.],
[0., 1.]]]).astype(dtype)
yield np.array([[1., 0.],
[0., 1.]]).astype(dtype)
return MatrixProductState(gen_arrays(), **mps_opts)
def MPS_w_state(L, dtype='float64', **mps_opts):
"""Build the chi=2 OBC MPS representation of the W state.
Parameters
----------
L : int
Number of qubits.
dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional
The underlying data type.
mps_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductState`.
"""
def gen_arrays():
yield (np.array([[1., 0.],
[0., 1.]]) / L ** 0.5).astype(dtype)
for i in range(1, L - 1):
yield np.array([[[1., 0.],
[0., 1.]],
[[0., 0.],
[1., 0.]]]).astype(dtype)
yield np.array([[0., 1.],
[1., 0.]]).astype(dtype)
return MatrixProductState(gen_arrays(), **mps_opts)
@random_seed_fn
def MPS_rand_computational_state(L, dtype='float64', **mps_opts):
"""Generate a random computation basis state, like '01101001010'.
Parameters
----------
L : int
The number of qubits.
seed : int, optional
The seed to use.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
mps_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductState`.
"""
cstr = (choice(('0', '1')) for _ in range(L))
return MPS_computational_state(cstr, dtype=dtype, **mps_opts)
def MPS_zero_state(L, bond_dim=1, phys_dim=2, cyclic=False,
dtype='float64', **mps_opts):
"""The all-zeros MPS state, of given bond-dimension.
Parameters
----------
L : int
The number of sites.
bond_dim : int, optional
The bond dimension, defaults to 1.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
cyclic : bool, optional
Generate a MPS with periodic boundary conditions or not, default is
open boundary conditions.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
mps_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductState`.
"""
cyc_dim = (bond_dim,) if cyclic else ()
def gen_arrays():
yield np.zeros((*cyc_dim, bond_dim, phys_dim), dtype=dtype)
for _ in range(L - 2):
yield np.zeros((bond_dim, bond_dim, phys_dim), dtype=dtype)
yield np.zeros((bond_dim, *cyc_dim, phys_dim), dtype=dtype)
return MatrixProductState(gen_arrays(), **mps_opts)
def MPS_sampler(L, dtype=complex, squeeze=True, **mps_opts):
"""A product state for sampling tensor network traces. Seen as a vector it
has the required property that ``psi.H @ psi == d`` always for hilbert
space size ``d``.
"""
arrays = [rand_phase(2, dtype=dtype) for _ in range(L)]
psi = MPS_product_state(arrays, **mps_opts)
if squeeze:
psi.squeeze_()
return psi
# --------------------------------------------------------------------------- #
# MPOs #
# --------------------------------------------------------------------------- #
def MPO_identity(L, phys_dim=2, dtype='float64', cyclic=False, **mpo_opts):
"""Generate an identity MPO of size ``L``.
Parameters
----------
L : int
The number of sites.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
cyclic : bool, optional
Generate a MPO with periodic boundary conditions or not, default is
open boundary conditions.
mpo_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
"""
II = np.identity(phys_dim, dtype=dtype)
cyc_dim = (1,) if cyclic else ()
def gen_arrays():
yield II.reshape(*cyc_dim, 1, phys_dim, phys_dim)
for _ in range(L - 2):
yield II.reshape(1, 1, phys_dim, phys_dim)
yield II.reshape(1, *cyc_dim, phys_dim, phys_dim)
return MatrixProductOperator(gen_arrays(), **mpo_opts)
def MPO_identity_like(mpo, **mpo_opts):
"""Return an identity matrix operator with the same physical index and
inds/tags as ``mpo``.
"""
return MPO_identity(L=mpo.L, phys_dim=mpo.phys_dim(), dtype=mpo.dtype,
site_tag_id=mpo.site_tag_id, cyclic=mpo.cyclic,
upper_ind_id=mpo.upper_ind_id,
lower_ind_id=mpo.lower_ind_id, **mpo_opts)
def MPO_zeros(L, phys_dim=2, dtype='float64', cyclic=False, **mpo_opts):
"""Generate a zeros MPO of size ``L``.
Parameters
----------
L : int
The number of sites.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
cyclic : bool, optional
Generate a MPO with periodic boundary conditions or not, default is
open boundary conditions.
mpo_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
"""
cyc_dim = (1,) if cyclic else ()
def gen_arrays():
yield np.zeros((*cyc_dim, 1, phys_dim, phys_dim), dtype=dtype)
for _ in range(L - 2):
yield np.zeros((1, 1, phys_dim, phys_dim), dtype=dtype)
yield np.zeros((1, *cyc_dim, phys_dim, phys_dim), dtype=dtype)
return MatrixProductOperator(gen_arrays(), **mpo_opts)
def MPO_zeros_like(mpo, **mpo_opts):
"""Return a zeros matrix operator with the same physical index and
inds/tags as ``mpo``.
"""
return MPO_zeros(L=mpo.L, phys_dim=mpo.phys_dim(),
dtype=mpo.dtype, site_tag_id=mpo.site_tag_id,
upper_ind_id=mpo.upper_ind_id, cyclic=mpo.cyclic,
lower_ind_id=mpo.lower_ind_id, **mpo_opts)
@random_seed_fn
def MPO_rand(L, bond_dim, phys_dim=2, normalize=True, cyclic=False,
herm=False, dtype='float64', **mpo_opts):
"""Generate a random matrix product state.
Parameters
----------
L : int
The number of sites.
bond_dim : int
The bond dimension.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
normalize : bool, optional
Whether to normalize the operator such that ``trace(A.H @ A) == 1``.
cyclic : bool, optional
Generate a MPO with periodic boundary conditions or not, default is
open boundary conditions.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
herm : bool, optional
Whether to make the matrix hermitian (or symmetric if real) or not.
mpo_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
"""
cyc_shp = (bond_dim,) if cyclic else ()
shapes = [(*cyc_shp, bond_dim, phys_dim, phys_dim),
*((bond_dim, bond_dim, phys_dim, phys_dim),) * (L - 2),
(bond_dim, *cyc_shp, phys_dim, phys_dim)]
def gen_data(shape):
data = randn(shape, dtype=dtype)
if not herm:
return data
trans = (0, 2, 1) if len(shape) == 3 else (0, 1, 3, 2)
return data + data.transpose(*trans).conj()
arrays = map(sensibly_scale, map(gen_data, shapes))
rmpo = MatrixProductOperator(arrays, **mpo_opts)
if normalize:
rmpo /= (rmpo.H @ rmpo)**0.5
return rmpo
@random_seed_fn
def MPO_rand_herm(L, bond_dim, phys_dim=2, normalize=True,
dtype='float64', **mpo_opts):
"""Generate a random hermitian matrix product operator.
See :class:`~quimb.tensor.tensor_gen.MPO_rand`.
"""
return MPO_rand(L, bond_dim, phys_dim=phys_dim, normalize=normalize,
dtype=dtype, herm=True, **mpo_opts)
# ---------------------------- MPO hamiltonians ----------------------------- #
def maybe_make_real(X):
"""Check if ``X`` is real, if so, convert to contiguous array.
"""
if np.allclose(X.imag, | np.zeros_like(X) | numpy.zeros_like |
from __future__ import annotations
import typing
from typing_extensions import TypedDict
from ctc import evm
from ctc import rpc
from ctc import spec
from . import coracle_spec
class FeiPcvStats(TypedDict):
pcv: int
user_fei: int
protocol_equity: int
valid: bool
async def async_get_pcv_stats(
block: spec.BlockNumberReference | None = None,
wrapper: bool = False,
provider: spec.ProviderSpec = None,
) -> FeiPcvStats:
if block is None:
block = 'latest'
if block is not None:
block = await evm.async_block_number_to_int(block=block)
to_address = coracle_spec.get_coracle_address(
wrapper,
block=block,
)
result = await rpc.async_eth_call(
function_name='pcvStats',
block_number=block,
provider=provider,
to_address=to_address,
)
return {
'pcv': result[0],
'user_fei': result[1],
'protocol_equity': result[2],
'valid': result[3],
}
async def async_get_pcv_stats_by_block(
blocks: typing.Sequence[spec.BlockNumberReference],
wrapper: bool = False,
provider: spec.ProviderSpec = None,
nullify_invalid: bool = True,
) -> spec.DataFrame:
import asyncio
import numpy as np
if blocks is not None:
blocks = await evm.async_block_numbers_to_int(blocks=blocks)
# assemble kwargs
provider = rpc.get_provider(provider)
if provider['chunk_size'] is None:
provider['chunk_size'] = 1
async def _wrapped_call(
block: spec.BlockNumberReference, to_address: spec.Address
) -> typing.Sequence[None | list[typing.Any]]:
try:
return await rpc.async_eth_call(
function_name='pcvStats',
block_number=block,
provider=provider,
to_address=to_address,
)
except spec.RpcException as e:
invalid_message = 'execution reverted: chainlink is down'
if (
nullify_invalid
and len(e.args) > 0
and e.args[0].endswith(invalid_message)
):
return [None] * 4
else:
raise e
coroutines = []
for block in blocks:
to_address = coracle_spec.get_coracle_address(
wrapper,
block=block,
)
coroutine = _wrapped_call(block, to_address)
coroutines.append(coroutine)
result = await asyncio.gather(*coroutines)
# arrange results
transpose = list(zip(*result))
data = {}
keys = ['pcv', 'user_fei', 'protocol_equity', 'valid']
for k, key in enumerate(keys):
data[key] = transpose[k]
as_array = {
'pcv': | np.array(data['pcv'], dtype=float) | numpy.array |
"""
Copyright (C) 2019, Monash University, Geoscience Australia
Copyright (C) 2018, <NAME>
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
# external libraries
import numpy as np
import pylab as pl
# Managers
from .OreBodyDataManager import OreBodyDataManager
from .MiningSystemDataManager import MiningSystemDataManager
from .ProcessingSystemDataManager import ProcessingSystemDataManager
from .EconomicDataManager import EconomicDataManager
from .InfrastructureDataManager import InfrastructureDataManager
# Functions
from Functions.FunctionManager import FunctionManager
# IO
from IO.XML import HasChild,GetChild,AddChild
from IO.XML import GetAttributeValue, SetAttributeString
class MineDataManager():
def __init__(self):
"""
Create an empty mine data manager and default variables.
"""
self.mineLatLong = np.array([0.0,0.0])
self.theOreBody = OreBodyDataManager()
self.theMiningSystem = MiningSystemDataManager()
self.theProcessingSystem = ProcessingSystemDataManager()
self.theEconomicDataManager = EconomicDataManager()
self.theInfrastructureManager = InfrastructureDataManager()
def ParseXMLNode(self, mineDataNode):
"""
Generate Mine Data Manager data from xml tree node.
"""
# Location
if(HasChild(mineDataNode,"Location")):
locNode = GetChild(mineDataNode,"Location")
self.mineLatLong[0] = GetAttributeValue(locNode,"lat")
self.mineLatLong[1] = GetAttributeValue(locNode,"long")
# Orebody
if(HasChild(mineDataNode,"Orebody")):
orebodyNode = GetChild(mineDataNode,"Orebody")
self.theOreBody.ParseXMLNode(orebodyNode)
theFunctionManager = FunctionManager()
if( (self.theOreBody.cover < 0.0 ) and (theFunctionManager.HasFunction("DepthOfCover") ) ):
self.theOreBody.cover = theFunctionManager.GetFunction("DepthOfCover").f( self.mineLatLong )
print("Cover set to: ", self.theOreBody.cover)
# Infrastructure
if(HasChild(mineDataNode,"Infrastructure")):
infrastructureNode = GetChild(mineDataNode,"Infrastructure")
self.theInfrastructureManager.ParseXMLNode(infrastructureNode)
# Economics
if(HasChild(mineDataNode,"Economics")):
economicsNode = GetChild(mineDataNode,"Economics")
self.theEconomicDataManager.ParseXMLNode(economicsNode)
def WriteXMLNode(self, node):
"""
Write problem to xml node
"""
# Location
locNode = AddChild(node,"Location")
SetAttributeString(locNode,"lat",self.mineLatLong[0])
SetAttributeString(locNode,"long",self.mineLatLong[1])
# Orebody
orebodyNode = AddChild(node,"Orebody")
self.theOreBody.WriteXMLNode(orebodyNode)
# Economics Node
economicsNode = AddChild(node,"Economics")
self.theEconomicDataManager.WriteXMLNode(economicsNode)
# Mining System Node
miningNode = AddChild(node,"Mining")
self.theMiningSystem.WriteXMLNode(miningNode)
# Processing System Node
processingNode = AddChild(node,"Processing")
self.theProcessingSystem.WriteXMLNode(processingNode)
# Infrastructure Node
infrastructureNode = AddChild(node,"Infrastructure")
self.theInfrastructureManager.WriteXMLNode(infrastructureNode)
return node
def SetMineType(self,mineType):
self.theMiningSystem.mineType = mineType
def CaculateMineProductionAndValue(self,problemManager):
"""
Determine after tax NPV for the mine
"""
# Mining Model
self.DetermineMiningSystem(problemManager)
# Processing Model
self.DetermineProcessingSystem(problemManager)
# G&A Model
self.CalculateGandAExpenses(problemManager)
# Infrastructure Model
self.CalculateInfrastructureCosts(problemManager)
# Cash flow
self.CalculateBeforeTaxCashFlow(problemManager)
self.CalculateTaxes(problemManager)
self.CalculateAfterTaxCashFlow(problemManager)
# EconomicIndicators
self.CalculateEconomicIndicators(problemManager)
value = self.theEconomicDataManager.atNPV
return value
def SetMiningMethod(self,miningMethod):
"""
Set the mining method (mine production and value need to be determined separately)
"""
self.theMiningSystem.miningMethod = miningMethod
def SetCoverDepth(self,cover):
self.theOreBody.cover = cover
return None
def DetermineMiningSystem(self,problemManager):
self.theMiningSystem.DetermineMiningSystem(problemManager,self)
return self.theMiningSystem
def DetermineProcessingSystem(self,problemManager):
self.theProcessingSystem.DetermineProcessingSystem(problemManager,self)
return self.theProcessingSystem
# G&A Model
def CalculateGandAExpenses(self,problemManager):
"""
General and administrative costs are estimated based on a fixed percentage of the overall mining and processing costs.
"""
self.theEconomicDataManager.CalculateGandAExpenses(problemManager,self)
return self.theEconomicDataManager.GandAOpex
# Infrastructure Model
def CalculateInfrastructureCosts(self,problemManager):
self.theInfrastructureManager.DetermineDistanceToInfrastructure(problemManager,self)
self.theInfrastructureManager.CalculateInfrastructureExpenses(problemManager,self)
return None
# Cash flow
def CalculateBeforeTaxCashFlow(self,problemManager):
self.theEconomicDataManager.CalculateBeforeTaxCashFlow(problemManager,self)
return self.theEconomicDataManager.btNCF
def CalculateTaxes(self,problemManager):
self.theEconomicDataManager.CalculateTaxes(problemManager,self)
return self.theEconomicDataManager.taxes
def CalculateAfterTaxCashFlow(self,problemManager):
self.theEconomicDataManager.CalculateAfterTaxCashFlow(problemManager,self)
return self.theEconomicDataManager.atNCF
# EconomicIndicators
def CalculateEconomicIndicators(self,problemManager):
self.theEconomicDataManager.CalculateBeforeTaxNPV(problemManager,self)
self.theEconomicDataManager.CalculateAfterTaxNPV(problemManager,self)
return self.theEconomicDataManager.atNPV
def PlotResults(self):
pl.plot(self.theEconomicDataManager.btNCF/1e6,"b-",label="btNCF")
pl.plot(self.theEconomicDataManager.royalties/1e6,"r--",label="Royalties")
pl.plot(self.theEconomicDataManager.taxes/1e6,"r-",label="Income tax")
pl.plot(self.theEconomicDataManager.atNCF/1e6,'g-',label="atNCF")
pl.legend()
pl.figure()
pl.plot(self.theMiningSystem.materialMined/1e6,"k-",label="total mined")
pl.plot(self.theMiningSystem.oreMined/1e6,"b-",label="ore")
pl.plot(self.theMiningSystem.wasteMined/1e6,"r-",label="waste")
pl.plot(self.theProcessingSystem.oreProcessed/1e6,"g-",label="processed")
pl.plot(self.theProcessingSystem.concentrateProduced/1e6,"g--",label="concentrate")
pl.legend(loc="lower right")
pl.figure()
inflation = (1.0+self.theEconomicDataManager.inflation)**np.array( list(range( self.theMiningSystem.mineLife)) )
pl.plot(self.theEconomicDataManager.btNCF/1e6,"b-",label="btNCF(MMAUD)")
pl.plot(self.theEconomicDataManager.revenue/1e6,"g-",label="Revenue")
pl.plot(inflation*self.theMiningSystem.miningCapex/1e6,"r--",label="Mining Startup")
pl.plot(inflation*self.theMiningSystem.miningOpex/1e6,"r-",label="Mining Sustaining")
pl.plot(inflation*self.theProcessingSystem.processingCapex/1e6,"m--",label="Processing Startup")
pl.plot(inflation*self.theProcessingSystem.processingOpex/1e6,"m-",label="Processing Sustaining")
pl.plot(inflation*self.theEconomicDataManager.GandAOpex/1e6,"r:",label="G&A")
pl.plot(inflation*self.theInfrastructureManager.infrastructureCapex/1e6,"k--",label="Infrastructure Startup")
pl.plot(inflation*self.theInfrastructureManager.infrastructureOpex/1e6,"k-",label="Infrastructure Sustaining")
pl.legend(loc="lower right")
pl.show()
def RecordResults(self,problemManager):
fid = open(problemManager.outputPrefix + ".txt","wt")
fid.write("#BTNCF:\n")
np.savetxt(fid,self.theEconomicDataManager.btNCF,newline=" ")
fid.write("\n")
fid.write("#Royalties:\n")
np.savetxt(fid,self.theEconomicDataManager.royalties,newline=" ")
fid.write("\n")
fid.write("#taxes:\n")
np.savetxt(fid,self.theEconomicDataManager.taxes,newline=" ")
fid.write("\n")
fid.write("#ATNCF:\n")
np.savetxt(fid,self.theEconomicDataManager.atNCF,newline=" ")
fid.write("\n")
fid.write("#Total Mined:\n")
np.savetxt(fid,self.theMiningSystem.materialMined,newline=" ")
fid.write("\n")
fid.write("#Ore Mined:\n")
np.savetxt(fid,self.theMiningSystem.oreMined,newline=" ")
fid.write("\n")
fid.write("#Waste Mined:\n")
np.savetxt(fid,self.theMiningSystem.wasteMined,newline=" ")
fid.write("\n")
fid.write("#Ore processed:\n")
np.savetxt(fid,self.theProcessingSystem.oreProcessed,newline=" ")
fid.write("\n")
fid.write("#Concentrate:\n")
np.savetxt(fid,self.theProcessingSystem.concentrateProduced,newline=" ")
fid.write("\n")
fid.write("#Revenue:\n")
np.savetxt(fid,self.theEconomicDataManager.revenue,newline=" ")
fid.write("\n")
fid.write("#Mining Startup:\n")
np.savetxt(fid,self.theMiningSystem.miningCapex,newline=" ")
fid.write("\n")
fid.write("#Mining Sustaining:\n")
np.savetxt(fid,self.theMiningSystem.miningOpex,newline=" ")
fid.write("\n")
fid.write("#Processing Startup:\n")
np.savetxt(fid,self.theProcessingSystem.processingCapex,newline=" ")
fid.write("\n")
fid.write("#Processing Sustaining:\n")
| np.savetxt(fid,self.theProcessingSystem.processingOpex,newline=" ") | numpy.savetxt |
"""@package Methods.Machine.NotchEvenDist.build_geometry
NotchEvenDist build_geometry method
@date Created on 03-09-2019 15:47
@author sebastian_g
@todo: intermediate solution, <nittest it ony if needed
"""
from pyleecan.Classes.Arc1 import Arc1
import numpy as np
def build_geometry(self, alpha_begin, alpha_end, label=""):
"""Compute the curve (Line) needed to plot the object.
The ending point of a curve is the starting point of the next curve in
the list
Parameters
----------
self : NotchEvenDist
A NotchEvenDist object
Returns
-------
curve_list: list
A list of every individual notches lines
"""
Rbo = self.get_Rbo()
alphaM = np.array([])
alpha0 = np.array([])
alpha1 = np.array([])
_line_list = list()
# collect all notches data
for idx, shape in enumerate(self.notch_shape):
Zs = shape.Zs
ang_open = self.notch_shape[idx].comp_angle_opening()
aM = np.linspace(0, Zs, Zs, endpoint=False) * 2 * np.pi / Zs + self.alpha[idx]
a0 = aM - ang_open / 2
a1 = aM + ang_open / 2
sid = | np.ones_like(alphaM) | numpy.ones_like |
from __future__ import absolute_import, division, print_function
import inspect
import itertools
import os
import warnings
from collections import defaultdict
from contextlib import contextmanager
from numbers import Number
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import scanpy as sc
import scipy as sp
import tensorflow as tf
from anndata._core.aligned_mapping import AxisArrays
from bigarray import MmapArrayWriter
from scipy.sparse import issparse
from scipy.stats import pearsonr, spearmanr
from six import string_types
from sklearn.cluster import MiniBatchKMeans, SpectralClustering
from sklearn.decomposition import IncrementalPCA
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import (mutual_info_classif,
mutual_info_regression)
from sklearn.mixture import GaussianMixture
from odin import visual as vs
from odin.search import diagonal_linear_assignment
from odin.stats import (describe, is_discrete, sparsity_percentage,
train_valid_test_split)
from odin.utils import (MPI, IndexedList, as_tuple, batching, cache_memory,
catch_warnings_ignore, cpu_count, is_primitive)
from odin.utils.crypto import md5_checksum
from sisua.data._single_cell_base import BATCH_SIZE, _OMICbase
from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC
from sisua.data.utils import (apply_artificial_corruption, get_library_size,
is_binary_dtype, is_categorical_dtype,
standardize_protein_name)
from sisua.label_threshold import ProbabilisticEmbedding
# ===========================================================================
# Helper
# ===========================================================================
def _threshold(x, nmin=2, nmax=5):
if x.ndim == 1:
x = x[:, np.newaxis]
models = []
aic = []
for n_components in range(int(nmin), int(nmax)):
gmm = GaussianMixture(n_components=n_components, random_state=1)
gmm.fit(x)
models.append(gmm)
aic.append(gmm.aic(x))
# select the best model
gmm = models[np.argmax(aic)]
y = gmm.predict(x)
idx = np.argmax(gmm.means_.ravel())
return (y == idx).astype(np.bool)
# ===========================================================================
# Main
# ===========================================================================
class _OMICanalyzer(_OMICbase):
def get_x_probs(self, omic=None):
r""" Return the probability embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[1]
def get_x_bins(self, omic=None):
r""" Return the binary embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[2]
# ******************** transformation ******************** #
def corrupt(self,
omic=None,
dropout_rate=0.2,
retain_rate=0.2,
distribution='binomial',
inplace=True,
seed=1):
r"""
omic : `OMIC`, which omic type will be corrupted
dropout_rate : scalar (0.0 - 1.0), (default=0.25)
how many entries (in percent) be selected for corruption.
retain_rate : scalar (0.0 - 1.0), (default=0.2)
how much percent of counts retained their original values.
distribution : {'binomial', 'uniform} (default='binomial')
omic : `sisua.data.OMIC`, which OMIC type will be corrupted
inplace : `bool` (default=True). Perform computation inplace or return
new `SingleCellOMIC` with the corrupted data.
seed : `int` (default=8). Seed for the random state.
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('corrupt', locals())
if not (0. < retain_rate < 1. or 0. < dropout_rate < 1.):
return om
for o in omic:
apply_artificial_corruption(om.numpy(o),
dropout=dropout_rate,
retain_rate=retain_rate,
distribution=distribution,
copy=False,
seed=seed)
om._calculate_statistics(o)
return om
def filter_highly_variable_genes(self,
min_disp: float = 1.0,
max_disp: float = np.inf,
min_mean: float = 0.01,
max_mean: float = 8,
n_top_genes: int = 1000,
n_bins: int = 20,
flavor: str = 'seurat',
inplace: bool = True):
r""" Annotate highly variable genes [Satija15]_ [Zheng17]_.
https://www.rdocumentation.org/packages/Seurat/versions/2.3.4/topics/FindVariableGenes
`Expects logarithmized data`.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Arguments:
min_disp : `float`, optional (default=0.5)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_disp : `float`, optional (default=`np.inf`)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
min_mean : `float`, optional (default=0.0125)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_mean : `float`, optional (default=3)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
n_top_genes : {`float`, int`, `None`}, optional (default=`None`)
Number of highly-variable genes to keep., if the value is in (0, 1],
intepret as percent of genes
n_bins : `int`, optional (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1.
flavor : `{'seurat', 'cell_ranger'}`, optional (default='seurat')
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
inplace : `bool` (default=True)
if False, copy the `SingleCellOMIC` and apply the vargene filter.
Returns:
New `SingleCellOMIC` with filtered features if `applying_filter=True`
else assign `SingleCellOMIC.highly_variable_features` with following
attributes.
highly_variable : bool
boolean indicator of highly-variable genes
**means**
means per gene
**dispersions**
dispersions per gene
**dispersions_norm**
normalized dispersions per gene
Notes:
Proxy to `scanpy.pp.highly_variable_genes`. It is recommended to do
`log1p` normalization before if `flavor='seurat'`.
"""
flavor = str(flavor).lower()
if n_top_genes is not None:
if 0. < n_top_genes < 1.:
n_top_genes = int(n_top_genes * self.n_vars)
# prepare the data
# this function will take the exponential of X all the time,
# so non-logarithmzed data might led to overflow
omics = self if inplace else self.copy()
omics._record('filter_highly_variable_genes', locals())
sc.pp.highly_variable_genes(omics,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
n_top_genes=n_top_genes,
n_bins=int(n_bins),
flavor=flavor,
subset=True,
inplace=False)
omics._name += '_vargene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_genes(self,
min_counts=None,
max_counts=None,
min_cells=None,
max_cells=None,
inplace=True):
r""" Filter features (columns) based on number of rows or counts.
Keep columns that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_row_counts, max_row_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a gene to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a gene to pass filtering.
min_cells : {int, None} (default=None)
Minimum number of cells expressed required for a feature to pass filtering.
max_cells : {int, None} (default=None)
Maximum number of cells expressed required for a feature to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
genes applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of genes
gene_subset : `numpy.ndarray`
Boolean index mask that does filtering. `True` means that the
gene is kept. `False` means the gene is removed.
number_per_gene : `numpy.ndarray`
Depending on what was thresholded (`counts` or `cells`), the array
stores `n_counts` or `n_cells` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
omics = self if inplace else self.copy()
omics._record('filter_genes', locals())
sc.pp.filter_genes(omics,
min_counts=min_counts,
max_counts=max_counts,
min_cells=min_cells,
max_cells=max_cells,
inplace=True)
omics._name += '_filtergene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_cells(self,
min_counts=None,
max_counts=None,
min_genes=None,
max_genes=None,
inplace=True):
r""" Filter examples (rows) based on number of features or counts.
Keep rows that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_col_counts, max_col_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a cell to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a cell to pass filtering.
min_genes : {int, None} (default=None)
Minimum number of genes expressed required for a cell to pass filtering.
max_genes : {int, None} (default=None)
Maximum number of genes expressed required for a cell to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of cells
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
# scanpy messed up here, the obs was not updated with the new indices
cells_subset, number_per_cell = sc.pp.filter_cells(self,
min_counts=min_counts,
max_counts=max_counts,
min_genes=min_genes,
max_genes=max_genes,
inplace=False)
omics = self if inplace else self.copy()
omics._record('filter_cells', locals())
omics.apply_indices(cells_subset, observation=True)
omics._name += '_filtercell'
# recalculate library info
omics._calculate_statistics()
return omics
def probabilistic_embedding(self,
omic=None,
n_components_per_class=2,
positive_component=1,
log_norm=True,
clip_quartile=0.,
remove_zeros=True,
ci_threshold=-0.68,
seed=1,
pbe: Optional[ProbabilisticEmbedding] = None):
r""" Fit a GMM on each feature column to get the probability or binary
representation of the features
Return:
`ProbabilisticEmbedding` model
np.ndarray : probabilities X
np.ndarray : binary X
Arguments:
pbe : {`sisua.ProbabilisticEmbedding`, `None`}, optional pretrained
instance of `ProbabilisticEmbedding`
"""
if omic is None:
omic = self.current_omic
self._record('probabilistic_embedding', locals())
# We turn-off default log_norm here since the data can be normalized
# separately in advance.
omic = OMIC.parse(omic)
X = self.numpy(omic)
if X.shape[1] >= 100:
warnings.warn("%d GMM will be trained!" % self.shape[1])
name = omic.name
pbe_name = '%s_pbe' % name
prob_name = '%s_prob' % name
bin_name = '%s_bin' % name
label_name = self.get_labels_name(name)
if is_binary_dtype(X):
X_prob = X
X_bin = X
self.uns[pbe_name] = None
else:
if pbe is None:
if pbe_name not in self.uns:
pbe = ProbabilisticEmbedding(
n_components_per_class=n_components_per_class,
positive_component=positive_component,
log_norm=log_norm,
clip_quartile=clip_quartile,
remove_zeros=remove_zeros,
ci_threshold=ci_threshold,
random_state=seed)
with catch_warnings_ignore(ConvergenceWarning):
pbe.fit(X)
self.uns[pbe_name] = pbe
else:
pbe = self.uns[pbe_name]
else:
assert isinstance(pbe, ProbabilisticEmbedding), \
'pbe, if given, must be instance of sisua.ProbabilisticEmbedding'
# make prediction
X_prob = np.clip(pbe.predict_proba(X), 0. + 1e-8, 1. - 1e-8)
X_bin = pbe.predict(X)
# store the data
if prob_name not in self.obsm:
self.obsm[prob_name] = X_prob
if label_name not in self.obs and name + '_var' in self.uns:
omic_id = self.get_var(name).index
labels = [omic_id[i] for i in np.argmax(self.obsm[prob_name], axis=1)]
self.obs[label_name] = pd.Categorical(labels)
if bin_name not in self.obsm:
self.obsm[bin_name] = X_bin
return pbe, self.obsm[prob_name], self.obsm[bin_name]
def dimension_reduce(self,
omic=None,
n_components=100,
algo='pca',
random_state=1):
r""" Perform dimension reduction on given OMIC data. """
if omic is None:
omic = self.current_omic
self._record('dimension_reduce', locals())
algo = str(algo).lower().strip()
assert algo in ('pca', 'tsne', 'umap'), \
"Only support algorithm: 'pca', 'tsne', 'umap'; but given: '{algo}'"
omic = OMIC.parse(omic)
name = f"{omic.name}_{algo}"
## already transformed
if name in self.obsm:
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
X = self.numpy(omic)
n_components = min(n_components, X.shape[1])
### train new PCA model
if algo == 'pca':
X_ = np.empty(shape=(X.shape[0], n_components), dtype=X.dtype)
model = IncrementalPCA(n_components=n_components)
# fitting
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
model.partial_fit(chunk)
# transforming
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
X_[start:end] = model.transform(chunk)
### TSNE
elif algo == 'tsne':
from odin.ml import fast_tsne
X_ = fast_tsne(X, n_components=n_components, return_model=False)
model = None
## UMAP
elif algo == 'umap':
try:
import cuml
method = 'rapids'
except ImportError:
method = 'umap'
connectivities, distances, nn = self.neighbors(omic,
method='umap',
random_state=random_state)
self.uns['neighbors'] = nn
self.obsp['connectivities'] = connectivities
self.obsp['distances'] = distances
with catch_warnings_ignore(UserWarning):
sc.tl.umap(self, method=method, random_state=random_state, copy=False)
X_ = self.obsm['X_umap']
model = self.uns['umap']
del self.obsm['X_umap']
del self.uns['umap']
del self.uns['neighbors']
del self.obsp['connectivities']
del self.obsp['distances']
## store and return the result
self.obsm[name] = X_
# the model could be None, in case of t-SNE
self.uns[name] = model
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
def expm1(self, omic=None, inplace=True):
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('expm1', locals())
_expm1 = lambda x: (np.expm1(x.data, out=x.data)
if issparse(x) else np.expm1(x, out=x))
X = om.numpy(omic)
for s, e in batching(n=self.n_obs, batch_size=BATCH_SIZE):
X[s:e] = _expm1(X[s:e])
om._calculate_statistics(omic)
return om
def normalize(self,
omic=None,
total=False,
log1p=False,
scale=False,
target_sum=None,
exclude_highly_expressed=False,
max_fraction=0.05,
max_value=None,
inplace=True):
r""" If ``exclude_highly_expressed=True``, very highly expressed genes are
excluded from the computation of the normalization factor (size factor)
for each cell. This is meaningful as these can strongly influence
the resulting normalized values for all other genes [1]_.
Arguments:
total : bool (default=False). Normalize counts per cell.
log1p : bool (default=False). Logarithmize the data matrix.
scale : bool (default=False). Scale data to unit variance and zero mean.
target_sum : {float, None} (default=None)
If None, after normalization, each observation (cell) has a
total count equal to the median of total counts for
observations (cells) before normalization.
exclude_highly_expressed : bool (default=False)
Exclude (very) highly expressed genes for the computation of the
normalization factor (size factor) for each cell. A gene is considered
highly expressed, if it has more than ``max_fraction`` of the total counts
in at least one cell. The not-excluded genes will sum up to
``target_sum``.
max_fraction : bool (default=0.05)
If ``exclude_highly_expressed=True``, consider cells as highly expressed
that have more counts than ``max_fraction`` of the original total counts
in at least one cell.
max_value : `float` or `None`, optional (default=`None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
References:
Weinreb et al. (2016), SPRING: a kinetic interface for visualizing
high dimensional single-cell expression data, bioRxiv.
Note:
Proxy to `scanpy.pp.normalize_total`, `scanpy.pp.log1p` and
`scanpy.pp.scale`
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('normalize', locals())
if omic != OMIC.transcriptomic:
org_X = om._X
om._X = om.numpy(omic)
if total:
sc.pp.normalize_total(om,
target_sum=target_sum,
exclude_highly_expressed=exclude_highly_expressed,
max_fraction=max_fraction,
inplace=True)
# since the total counts is normalized, store the old library size
om._name += '_total'
if log1p:
sc.pp.log1p(om, chunked=True, chunk_size=BATCH_SIZE, copy=False)
om._name += '_log1p'
del om.uns['log1p']
# scaling may result negative total counts
if scale:
sc.pp.scale(om, zero_center=True, max_value=max_value, copy=False)
om._name += '_scale'
if omic != OMIC.transcriptomic:
om.obsm[omic.name] = om.X
om._X = org_X
om._calculate_statistics(omic)
return om
# ******************** metrics ******************** #
def neighbors(self,
omic=None,
n_neighbors=12,
n_pcs=100,
knn=True,
method='umap',
metric='euclidean',
random_state=1):
r"""\
Compute a neighborhood graph of observations [McInnes18]_.
The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,
which also provides a method for estimating connectivities of data points -
the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`,
connectivities are computed according to [Coifman05]_, in the adaption of
[Haghverdi16]_.
Arguments:
n_neighbors : `int` (default=12)
The size of local neighborhood (in terms of number of neighboring data
points) used for manifold approximation. Larger values result in more
global views of the manifold, while smaller values result in more local
data being preserved. In general values should be in the range 2 to 100.
If `knn` is `True`, number of nearest neighbors to be searched. If `knn`
is `False`, a Gaussian kernel width is set to the distance of the
`n_neighbors` neighbor.
n_pcs : {`int`, `None`} (default=None)
Use this many PCs. If n_pcs==0 use .X if use_rep is None.
if n_pcs==None, use obsm['X_pca'].
use_rep : {`None`, ‘X’} or any key for .obsm, optional (default=None)
Use the indicated representation. If None, the representation is
chosen automatically: for .n_vars < 50, .X is used, otherwise
‘X_pca’ is used. If ‘X_pca’ is not present, it’s computed with
default parameters.
knn : `bool` (default=True)
If `True`, use a hard threshold to restrict the number of neighbors to
`n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian
Kernel to assign low weights to neighbors more distant than the
`n_neighbors` nearest neighbor.
method : {{'umap', 'gauss', `rapids`}} (default: `'umap'`)
Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_
with adaptive width [Haghverdi16]_) for computing connectivities.
Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU
only).
metric : {`str`, `callable`} (default='euclidean')
A known metric’s name or a callable that returns a distance.
Returns:
returns neighbors object with the following:
**[OMIC]_connectivities** : sparse matrix (dtype `float32`)
Weighted adjacency matrix of the neighborhood graph of data
points. Weights should be interpreted as connectivities.
**[OMIC]_distances** : sparse matrix (dtype `float32`)
Instead of decaying weights, this stores distances for each pair of
neighbors.
**[OMIC]_neighbors** : dictionary
configuration and params of fitted k-NN.
"""
if omic is None:
omic = self.current_omic
self._record('neighbors', locals())
omic = OMIC.parse(omic)
name = f"{omic.name}_neighbors"
if name not in self.uns:
omic_name = omic.name
if self.get_dim(omic) > 100:
self.dimension_reduce(omic, algo='pca', random_state=random_state)
omic_name = omic.name + '_pca'
with catch_warnings_ignore(Warning):
obj = sc.pp.neighbors(self,
n_neighbors=n_neighbors,
knn=knn,
method=method,
metric=metric,
n_pcs=int(n_pcs),
use_rep=omic_name,
random_state=random_state,
copy=True)
self.uns[name] = obj.uns['neighbors']
self.obsp[f"{omic.name}_connectivities"] = obj.obsp['connectivities']
self.obsp[f"{omic.name}_distances"] = obj.obsp['distances']
del obj
return (self.obsp[f"{omic.name}_connectivities"],
self.obsp[f"{omic.name}_distances"], self.uns[name])
def clustering(self,
omic=None,
n_clusters=None,
n_init='auto',
algo='kmeans',
matching_labels=True,
return_key=False,
random_state=1):
r""" Perform clustering for given OMIC type, the cluster labels will be
assigned to `obs` with key "{omic}_{algo}{n_clusters}"
Arguments:
algo : {'kmeans', 'knn', 'pca', 'tsne', 'umap'}.
Clustering algorithm, in case algo in ('pca', 'tsne', 'umap'),
perform dimension reduction before clustering.
matching_labels : a Boolean. Matching OMIC var_names to appropriate
clusters, only when `n_clusters` is string or OMIC type.
return_key : a Boolean. If True, return the name of the labels
stored in `.obs` instead of the labels array.
"""
if omic is None:
omic = self.current_omic
self._record('clustering', locals())
## clustering algorithm
algo = str(algo).strip().lower()
## input data
omic = OMIC.parse(omic)
cluster_omic = None
if n_clusters is None:
cluster_omic = omic
n_clusters = self.get_dim(omic)
elif isinstance(n_clusters, Number):
n_clusters = int(n_clusters)
else:
cluster_omic = OMIC.parse(n_clusters)
n_clusters = self.get_dim(cluster_omic)
n_clusters = int(n_clusters)
n_init = int(n_init) if isinstance(n_init, Number) else \
int(n_clusters) * 3
## check if output already extracted
output_name = f"{omic.name}_{algo}{n_clusters}"
if output_name in self.obs:
return output_name if return_key else self.obs[output_name]
## warning
if n_clusters > 50:
warnings.warn(
f"Found omic type:{cluster_omic} with {n_clusters} clusters")
## fit KMeans
if algo in ('pca', 'tsne', 'umap', 'kmeans'):
if algo in ('pca', 'tsne', 'umap'):
X = self.dimension_reduce(omic=omic, n_components=100, algo=algo)
else:
X = self.numpy(omic)
model = MiniBatchKMeans(n_clusters=int(n_clusters),
max_iter=1000,
n_init=int(n_init),
compute_labels=False,
batch_size=BATCH_SIZE,
random_state=random_state)
# better suffering the batch
for s, e in batching(BATCH_SIZE, self.n_obs, seed=random_state):
x = X[s:e]
model.partial_fit(x)
# make prediction
labels = []
for s, e in batching(BATCH_SIZE, self.n_obs):
x = X[s:e]
labels.append(model.predict(x))
labels = np.concatenate(labels, axis=0)
## fit KNN
elif algo == 'knn':
connectivities, distances, nn = self.neighbors(omic)
n_neighbors = min(nn['params']['n_neighbors'],
np.min(np.sum(connectivities > 0, axis=1)))
model = SpectralClustering(n_clusters=n_clusters,
random_state=random_state,
n_init=n_init,
affinity='precomputed_nearest_neighbors',
n_neighbors=n_neighbors)
labels = model.fit_predict(connectivities)
else:
raise NotImplementedError(algo)
## correlation matrix
if cluster_omic is not None and matching_labels:
_, X, _ = self.probabilistic_embedding(cluster_omic)
# omic-cluster correlation matrix
corr = np.empty(shape=(X.shape[1], n_clusters), dtype=np.float32)
for i, x in enumerate(X.T):
for lab in range(n_clusters):
mask = labels == lab
corr[i, lab] = np.sum(x[mask])
ids = diagonal_linear_assignment(corr)
varnames = self.get_var_names(cluster_omic)
labels_to_omic = {lab: name for lab, name, in zip(ids, varnames)}
labels = np.array([labels_to_omic[i] for i in labels])
## saving data and model
self.obs[output_name] = pd.Categorical(labels)
# self.uns[output_name] = model
return output_name if return_key else labels
def louvain(self,
omic=None,
resolution=None,
restrict_to=None,
adjacency=None,
flavor='vtraag',
directed=True,
use_weights=False,
partition_type=None,
partition_kwargs={},
random_state=1):
r"""Cluster cells into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster cells using the Louvain algorithm [Blondel08]_ in the implementation
of [Traag17]_. The Louvain algorithm has been proposed for single-cell
analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
`~scanpy.external.pp.bbknn` first,
or explicitly passing a ``adjacency`` matrix.
Arguments:
resolution
For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0. See “Time as a resolution parameter” in [Lambiotte09]_.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added
Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency
Sparse adjacency matrix of the graph, defaults to
``adata.uns['neighbors']['connectivities']``.
flavor : {``'vtraag'``, ``'igraph'``}
Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed
Interpret the ``adjacency`` matrix as directed graph?
use_weights
Use weights from knn graph.
partition_type
Type of partition to use.
Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs
Key word arguments to pass to partitioning,
if ``vtraag`` method is being used.
random_state : Change the initialization of the optimization.
Return:
array `[n_samples]` : louvain community indices
array `[n_samples]` : decoded louvain community labels
"""
if omic is None:
omic = self.current_omic
self._record('louvain', locals())
try:
import louvain
except ImportError:
raise ImportError("pip install louvain>=0.6 python-igraph")
omic = OMIC.parse(omic)
output_name = omic.name + '_louvain'
if output_name not in self.obs:
with catch_warnings_ignore(Warning):
connectivities, distances, nn = self.neighbors(omic)
self.uns["neighbors"] = nn
self.obsp["connectivities"] = connectivities
self.obsp["distances"] = distances
sc.tl.louvain(self,
resolution=resolution,
random_state=random_state,
restrict_to=restrict_to,
key_added=output_name,
adjacency=adjacency,
flavor=flavor,
directed=directed,
use_weights=use_weights,
partition_type=partition_type,
partition_kwargs=partition_kwargs,
copy=False)
del self.uns['neighbors']
del self.obsp["connectivities"]
del self.obsp["distances"]
model = self.uns['louvain']
del self.uns['louvain']
self.uns[output_name] = model
y = self.obs[output_name].to_numpy().astype(np.float32)
### decode louvain community into labels
output_labels = f"{output_name}_labels"
if output_labels not in self.obs:
var_names = self.get_var_names(omic)
# mapping community_index -> confident value for each variables
confidence = defaultdict(float)
for i, x in zip(y, self.get_x_probs(omic=omic)):
confidence[int(i)] += x
# thresholding the variables
labels = {}
for community, x in confidence.items():
labels[community] = '_'.join(var_names[_threshold(x, 2, 5)])
# store in obs
self.obs[output_labels] = np.array([labels[i] for i in y])
### return
y_labels = self.obs[output_labels].to_numpy()
return y, y_labels
# ******************** Genes metrics and ranking ******************** #
def top_vars(self, n_vars=100, return_indices=False):
r""" The genes that are highly variated, high dispersion, less dropout
(i.e. smallest counts of zero-values), and appeared in most cells
will be returned.
Arguments:
return_indices : a Boolean. If True, return the index of top genes,
otherwise, return the genes' ID.
"""
self.calculate_quality_metrics()
fnorm = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))
# prepare data
n_cells = fnorm(self.var['n_cells'].values)
zeros = fnorm(self.var['pct_dropout'].values)
dispersion = fnorm(self.var['dispersions'].values)
# higher is better TODO: check again what is the best strategy here
rating = n_cells + (1. - zeros) + dispersion
ids = np.argsort(rating)[::-1]
# indexing the genes
genes = | np.arange(self.n_vars, dtype=np.int64) | numpy.arange |
'''
Defines dataset classes for various used datasets, some are not used anymore
Contains test functions for volume visualization and slice visualization
Author: <NAME>
https://github.com/dscarmo
'''
import os
from os.path import join as add_path
import glob
import pickle
from sys import argv
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import cv2 as cv
import h5py
import collections
import nibabel as nib
import psutil
import time
import copy
import json
import torch
import torch.utils.data as data
from torch.utils.data import ConcatDataset
import torchvision
from transforms import ToTensor, ToFloat32, Compose, RandomAffine, Intensity, Noisify, SoftTarget, CenterCrop
import multiprocessing as mp
from multiprocessing import Lock, Process, Queue, Manager
from utils import normalizeMri, viewnii, myrotate, int_to_onehot, chunks, HALF_MULTI_TASK_NCHANNELS, MULTI_TASK_NCHANNELS
from utils import half_multi_task_labels, limit_multi_labels, imagePrint, type_assert, one_hot, get_slice, ITKManager, split_l_r
from nathip import NatHIP, get_group
cla_lock = Lock()
adni_lock = Lock()
orientations = ["sagital", "coronal", "axial"] # original data orientations
DEFAULT_PATH = "../data"
VALID_MODES = ['train', 'validation', 'test']
# Post migration paths
default_datapath = os.path.join("/home", "diedre", "Dropbox", "bigdata", "mni_hip_data")
default_adni = os.path.join("/home", "diedre", "Dropbox", "bigdata", "manual_selection_rotated", "isometric")
mni_adni = os.path.join("/home", "diedre", "Dropbox", "bigdata", "manual_selection_rotated", "raw2mni")
mni_harp = os.path.join("/home", "diedre", "Dropbox", "bigdata", "harp", "mniharp")
default_harp = os.path.join("/home", "diedre", "Dropbox", "bigdata", "harp")
multitask_path = os.path.join("/home", "diedre", "Dropbox", "bigdata", "Hippocampus", "volbrain", "PACIENTES_E_CONTROLES")
multitask_hip_processed = os.path.join("/home", "diedre", "Dropbox", "bigdata", "Hippocampus", "processed")
multitask_hip_processed_slices = os.path.join("/home", "diedre", "Dropbox", "bigdata", "Hippocampus", "processed_slices")
HARP_CLASSES = ["cn", "mci", "ad"]
class DTISegDataset(data.Dataset):
'''
Abstracts segmentation with DTI data
'''
def __init__(self, mode, path=DEFAULT_PATH, transform=None, verbose=True, orientation=None, zero_background=True,
balance="balanced_205", norm_type="zero_to_plus", split=(0.8, 0.2), overhide_folder_name=None,
limit_masks=False, patch_size=64, register_strategy='v01', use_t1=False, displasia=False, t1_separate=False):
'''
path: folder containing data
mode: one of ['train', 'validation', 'test'], test will return volumes, other will return patches
transform: list of transforms to apply
verbose: wether to print a lot of stuff or not
orientation: one of ['sagital', 'coronal', 'axial'] or None if using test mode
nlr: wether to return left and right labels
display_samples: if different than 0, displays number of samples given
'''
super(DTISegDataset, self).__init__()
assert mode in VALID_MODES, "mode {} should be one of {}".format(mode, VALID_MODES)
assert orientation in orientations or orientation is None, "orientation {} should be one of {}".format(orientation,
orientations)
if mode == 'test':
assert orientation is None, "test mode does not support orientation other than None"
assert balance == "test", "in test mode, balance does not matter, use balance='test'"
assert norm_type in ["zero_to_plus", "minus_to_plus", "mixed"], "norm type {} not support".format(norm_type)
assert balance in ["test", "2020", "205", "51010", "355", "51515", "510"], "norm type {} not support".format(balance)
assert np.array(split).sum() == 1, "split makes no sense, should sum to 1"
assert patch_size in [32, 64]
assert register_strategy in ["v01", "v02"]
assert use_t1 in [False, "t1only", "t1dti"]
self.use_t1 = use_t1
self.mode = mode
self.orientation = orientation
self.zero_background = zero_background
self.transform = transform
self.verbose = verbose
self.limit_masks = limit_masks
self.displasia = displasia
self.t1_separate = t1_separate
separator = os.sep
path_tokens = path.split(separator)
folder_name = None
if overhide_folder_name is not None:
folder_name = overhide_folder_name
else:
if self.displasia:
if mode == "test":
folder_name = "01"
elif balance == "510":
if norm_type == "zero_to_plus":
if orientation == "sagital":
folder_name = "01"
elif orientation == "coronal":
folder_name = "02"
elif orientation == "axial":
folder_name = "03"
else:
if register_strategy == "v01":
if balance == "2020":
folder_name = "00"
elif balance == "205" or balance == "test":
if norm_type == "zero_to_plus":
folder_name = "01"
elif norm_type == "minus_to_plus":
folder_name = "02"
elif norm_type == "mixed":
folder_name = "03"
elif norm_type == "zero_to_plus":
if patch_size == 32:
if balance == "51010":
folder_name = "04"
elif balance == "355":
folder_name = "05"
elif patch_size == 64:
if balance == "51010":
folder_name = "06"
elif balance == "355":
folder_name = "07"
elif register_strategy == "v02":
if balance == "test":
folder_name = "04"
elif norm_type == "zero_to_plus":
if patch_size == 64:
if balance == "355":
folder_name = "16"
elif balance == "51010":
folder_name = "18"
elif balance == "51515":
if orientation == "sagital":
if limit_masks:
folder_name = "23"
else:
# folder_name = "19"
folder_name = "20"
elif orientation == "coronal":
folder_name = "21"
elif orientation == "axial":
folder_name = "22"
elif patch_size == 32:
if balance == "355":
folder_name = "14"
elif balance == "51010":
folder_name = "15"
if folder_name is None:
raise ValueError("Unsupported combination of patch_size, balance, norm_type and register_strategy: "
"{} {} {} {}".format(patch_size, balance, norm_type, register_strategy))
self.folder_name = folder_name
if self.displasia:
if self.mode == "test":
pre_folder = "Displasia/test"
else:
pre_folder = "Displasia/patches"
elif self.mode == "test":
pre_folder = "TestData"
else:
pre_folder = "patches"
if path_tokens[0] == "..": # Work around relative pathing
glob_args = [os.path.dirname(os.getcwd())] + path_tokens[1:] + [pre_folder, folder_name, "*.npz"]
self.items = glob.glob(os.path.join(*glob_args))
else:
glob_args = (path, pre_folder, folder_name, "*.npz")
self.items = glob.glob(os.path.join(*glob_args))
glob_args[-1] = "*.txt"
try:
print(glob_args)
readme_path = glob.glob(os.path.join(*glob_args))[0]
except IndexError:
print("Readme file for dataset not found.")
data_folder = os.path.join(*glob_args[:-1])
if self.mode != "test":
print(os.path.join(data_folder, self.mode + ".pkl"))
if os.path.isfile(os.path.join(data_folder, self.mode + ".pkl")):
with open(os.path.join(data_folder, self.mode + '.pkl'), 'rb') as saved_items:
self.items = pickle.load(saved_items)
for i, v in enumerate(self.items):
self.items[i] = os.path.join(data_folder, os.path.basename(self.items[i])) # support different folders
else:
print("PKL items file not saved, creating new ones...")
stop_point = int(len(self.items)*split[0])
print("Dividing dataset in point: {}".format(stop_point))
with open(os.path.join(data_folder, 'train.pkl'), 'wb') as to_save_items:
pickle.dump(self.items[:stop_point], to_save_items)
with open(os.path.join(data_folder, 'validation.pkl'), 'wb') as to_save_items:
pickle.dump(self.items[stop_point:], to_save_items)
if self.mode == "train":
self.items = self.items[:stop_point]
elif self.mode == "validation":
self.items = self.items[stop_point:]
print("DTISegDataset initialized with nitems: {}, mode: {}, path: {}, transform: {}, "
"orientation: {}, zero_background: {}, "
"balance: {}, norm_type: {}, limit_masks: {}"
"folder_name: {}".format(len(self.items), mode, path, transform, orientation,
zero_background, balance, norm_type, limit_masks, folder_name))
with open(readme_path) as readme_file:
readme = readme_file.read()
print(('-'*20 + "\nREADME: {}\n" + '-'*20).format(readme))
def __len__(self):
'''
Returns number of items in the dataset
'''
return len(self.items)
def __getitem__(self, i):
'''
Returns input data and target
'''
if self.verbose:
print("Dataset returning {}".format(self.items[i]))
npz = np.load(self.items[i])
if self.mode == "test":
dti, target, t1 = (npz["DTI_measures"], npz["mask_onehot"], npz["T1"])
else:
dti, target, t1 = (npz["DTI_measures"], npz[(self.mode != "test")*"patch_" + "mask_onehot"],
npz[(self.mode != "test")*"patch_" + "T1"])
if self.displasia:
t2 = npz["test_T2"] if self.mode == "test" else npz["patch_T2"]
if self.use_t1 == "t1only":
data = | np.zeros((1,) + t1.shape, dtype=t1.dtype) | numpy.zeros |
import pdb
import torch
import sys # NOQA
sys.path.insert(0, '..') # NOQA: E402
import numpy as np
import argparse
import torch.multiprocessing as mp
import os
import glob
import copy
import math
import pathlib
from logger.logger import Logger
import matplotlib
import matplotlib.pyplot as plt
import datetime, time
#from debugtools import compile_results
from utils import step_wrapper, reset_wrapper
import copy
import pygame
from alternateController.potential_field_controller import PotentialFieldController as PFController
from alternateController.social_forces_controller import SocialForcesController
from rlmethods.b_actor_critic import ActorCritic
from rlmethods.b_actor_critic import Policy
from tqdm import tqdm
from envs.drone_data_utils import classify_pedestrians
from envs.drone_data_utils import get_pedestrians_in_viscinity
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser()
#general arguments
parser.add_argument('--render', action='store_true', help="show the env.")
parser.add_argument('--num-trajs', type=int, default=50)
parser.add_argument('--max-ep-length', type=int, default=600, help='Max length of a single episode.')
parser.add_argument('--feat-extractor', type=str, default=None, help='The name of the \
feature extractor to be used in the experiment.')
parser.add_argument('--run-exact', action='store_true')
parser.add_argument('--subject', type=int, default=None)
parser.add_argument('--seed', type=int, default=789)
parser.add_argument('--on-server', action='store_true')
#**************************************************************************#
#arguments related to the environment
parser.add_argument('--annotation-file', type=str,
default='../envs/expert_datasets/\
university_students/annotation/processed/frame_skip_1/\
students003_processed_corrected.txt', help='The location of the annotation file to \
be used to run the environment.')
parser.add_argument('--reward-path' , type=str, nargs='?', default= None)
parser.add_argument('--reward-net-hidden-dims', nargs="*", type=int, default=[128])
#**************************************************************************#
#agent related arguments
parser.add_argument('--agent-type', type=str, default='Potential_field', help='The type of agent to be used to \
in the environment. It can be either a RL/IRL agent, or an alternative controller agent. \
Different agents will then have different arguments.')
#arguments for a network based agent
parser.add_argument('--policy-path', type=str, nargs='?', default=None)
parser.add_argument('--policy-net-hidden-dims', nargs="*", type=int, default=[128])
#arguments for a potential field agent
'''
/home/abhisek/Study/Robotics/deepirl/experiments/results/Beluga/IRL Runs/
Drone_environment_univ_students003_DroneFeatureRisk_updated_risk_v2_general_3kiter2019-09-27 10:24:41-reg-0-seed-8788-lr-0.001/
saved-models/17.pt
'''
#argument for some other agent
#*************************************************************************#
#parameters for informatio collector
parser.add_argument('--save-plots', action='store_true', default=False)
parser.add_argument('--store-results', action='store_true', default=False)
parser.add_argument('--save-folder', type=str, default=None, help= 'The name of the folder to \
store experiment related information.')
#************************************************************************#
parser.add_argument('--reward-analysis', action='store_true', default=False)
parser.add_argument('--crash-analysis', action='store_true', default=False)
parser.add_argument('--plain-run', action='store_true', default=True)
def check_parameters(args):
if args.agent_type=='Policy_network':
if args.policy_path is None or args.policy_net_hidden_dims is None:
print("Please provide correct information to load a policy network.")
exit()
if args.feat_extractor is None:
print("Please provide a feature extractor to continue.")
exit()
if args.reward_analysis:
if args.reward_path is None or args.reward_net_hidden_dims is None:
print("Please provide reward network details to perform reward analysis.")
exit()
#**************************************************
thresh1 = 10
thresh2 = 15
step_size = 2
agent_width = 10
obs_width = 10
grid_size = 3
#**************************************************
ts=time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
args = parser.parse_args()
#checks if all the parameters are in order
check_parameters(args)
if args.on_server:
matplotlib.use('Agg')
os.environ['SDL_VIDEODRIVER'] = 'dummy'
#*************************************************
#initialize information collector
from envs.drone_env_utils import InformationCollector
info_collector = InformationCollector(run_info=args.agent_type,
thresh=thresh2*step_size,
plot_info=args.save_plots,
store_info=args.store_results,
)
#*************************************************
#initialize environment
from envs.gridworld_drone import GridWorldDrone
consider_heading = True
np.random.seed(args.seed)
env = GridWorldDrone(display=args.render, is_onehot=False,
seed=args.seed, obstacles=None,
show_trail=True,
is_random=False,
subject=args.subject,
annotation_file=args.annotation_file,
tick_speed=60,
obs_width=10,
step_size=step_size,
agent_width=agent_width,
external_control=True,
replace_subject=args.run_exact,
show_comparison=True,
consider_heading=consider_heading,
show_orientation=True,
rows=576, cols=720, width=grid_size)
print('Environment initalized successfully.')
#*************************************************
#initialize the feature extractor
from featureExtractor.drone_feature_extractor import DroneFeatureSAM1, DroneFeatureMinimal
from featureExtractor.drone_feature_extractor import DroneFeatureOccup, DroneFeatureRisk
from featureExtractor.drone_feature_extractor import DroneFeatureRisk_v2, DroneFeatureRisk_speed, DroneFeatureRisk_speedv2
if args.feat_extractor == 'DroneFeatureSAM1':
feat_ext = DroneFeatureSAM1(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureOccup':
feat_ext = DroneFeatureOccup(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
window_size=window_size)
if args.feat_extractor == 'DroneFeatureRisk':
feat_ext = DroneFeatureRisk(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=True,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_v2':
feat_ext = DroneFeatureRisk_v2(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=False,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_speed':
feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_agent_persp=True,
thresh1=thresh1, thresh2=thresh2)
if args.feat_extractor == 'DroneFeatureRisk_speedv2':
feat_ext = DroneFeatureRisk_speedv2(agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
thresh1=18, thresh2=30)
#*************************************************
#initialize the agent
if args.agent_type == 'Policy_network':
#initialize the network
print (args.policy_net_hidden_dims)
print (feat_ext.state_rep_size)
print (env.action_space)
pdb.set_trace()
agent = Policy(feat_ext.state_rep_size, env.action_space.n, hidden_dims=args.policy_net_hidden_dims)
if args.policy_path:
agent.load(args.policy_path)
else:
print('Provide a policy path')
if args.agent_type == 'Potential_field':
#initialize the PF agent
max_speed = env.max_speed
orient_quant = env.orient_quantization
orient_div = len(env.orientation_array)
speed_quant = env.speed_quantization
speed_div = len(env.speed_array)
attr_mag = 3
rep_mag = 2
agent = PFController(speed_div, orient_div, orient_quant)
if args.agent_type == 'Social_forces':
orient_quant = env.orient_quantization
orient_div = len(env.orientation_array)
speed_quant = env.speed_quantization
speed_div = len(env.speed_array)
agent = SocialForcesController(speed_div, orient_div, orient_quant)
if args.agent_type == 'Default':
env.external_control = False
agent = None
#the person from the video
pass
#*************************************************
#load reward network if present
if args.reward_path is not None:
from irlmethods.deep_maxent import RewardNet
state_size = feat_ext.extract_features(env.reset()).shape[0]
reward_net = RewardNet(state_size, args.reward_net_hidden_dims)
reward_net.load(args.reward_path)
#*************************************************
#play
def reward_analysis():
'''
A function to analysis the rewards against actions for a given policy.
A helpful visualization/ debugging tool
'''
for i in range(args.num_trajs):
#reset the world
state=env.reset()
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
#pass
#reset the information collector
info_collector.reset_info(state)
done=False
t = 0
while t < args.max_ep_length and not done:
reward_arr = np.zeros(9)
reward_arr_true = np.zeros(9)
if args.feat_extractor is not None:
#************reward analysis block*************
if args.reward_analysis:
for i in range(9): #as there are 9 actions
action = i
state, reward_true, _ , _ = env.step(action)
print('Taking a step', action)
if args.feat_extractor is not None:
state_feat_temp = feat_ext.extract_features(state)
reward_arr[i] = reward_net(state_feat_temp)
reward_arr_true[i] = reward_true
state = env.rollback(1)
state_feat = feat_ext.rollback(2, state)
#print(reward_arr)
#**********************************************
#making sure the graphics are consistent
#if t>0: #skip if this is the first frame
# state_feat = feat_ext.extract_features(state)
#**********************************************
#selecting the action
#action selection for network
if args.agent_type=='Policy_network':
#pdb.set_trace()
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
#pdb.set_trace()
#print('The action finally taken :', action)
#action = int(np.argmax(reward_arr_true))
#**********************************************
if args.reward_analysis:
#comparing the reward network
true_reward_norm = (reward_arr_true - reward_arr_true.mean())/(reward_arr_true.std()+np.finfo(float).eps)
network_reward_norm = (reward_arr - reward_arr.mean())/(reward_arr.std()+np.finfo(float).eps)
#print('The true reward normalized:\n', true_reward_norm)
#print('The network reward normalized: \n', network_reward_norm)
plt.plot(true_reward_norm, c='r')
plt.plot(network_reward_norm, c='b')
plt.plot(probs.cpu().detach().numpy(), c='g')
#action = np.argmax(true_reward_norm)
#print('Action taken from here:', action)
#comparing the policy network
if args.render:
feat_ext.overlay_bins(state)
else:
action = agent.eval_action(state)
#pdb.set_trace()
state, reward, done, _ = env.step(action)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
if args.reward_path is not None:
reward = reward_net(state_feat)
#if args.reward_analysis:
print('Reward : {} for action {}:'.format(reward, action))
#pdb.set_trace()
plt.show()
info_collector.collect_information_per_frame(state)
t+=1
info_collector.collab_end_traj_results()
info_collector.collab_end_results()
info_collector.plot_information()
def crash_analysis():
'''
A visualizing/ debugging tool to analyse with ease the states and conditions
right before an agent crashes
'''
for i in range(args.num_trajs):
#reset the world
crash_analysis = False
state = env.reset()
print('Current subject :', env.cur_ped)
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
#pass
#reset the information collector
done = False
t = 0
while t < args.max_ep_length and not done:
if args.feat_extractor is not None:
if args.agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
if args.render:
feat_ext.overlay_bins(state)
else:
action = agent.eval_action(state)
#pdb.set_trace()
state, reward_true, done, _ = env.step(action)
if args.feat_extractor is not None:
state_feat = feat_ext.extract_features(state)
if crash_analysis:
pdb.set_trace()
if args.reward_path is not None:
reward = reward_net(state_feat)
else:
reward = reward_true
#if args.reward_analysis:
print('Reward : {} for action {}:'.format(reward, action))
#pdb.set_trace()
if done:
print('Crash frame : ', env.current_frame)
print('Agent position history :')
for i in range(len(feat_ext.agent_state_history)):
print(feat_ext.agent_state_history[i]['position'], env.heading_dir_history[i])
if args.crash_analysis:
if reward_true < -0.5:
if not crash_analysis:
if t > 10:
state = env.rollback(10)
state_feat = feat_ext.rollback(11, state)
else:
state = env.rollback(t-1)
state_feat = feat_ext.rollback(t, state)
print('Current frame after rollback :', env.current_frame)
for i in range(len(feat_ext.agent_state_history)):
print(feat_ext.agent_state_history[i]['position'], env.heading_dir_history[i])
done = False
crash_analysis = True
else:
break
else:
break
t += 1
def agent_drift_analysis(agent=agent,
agent_type=args.agent_type,
ped_list=None,
pos_reset=20,
):
'''
if order='by_ped' the drift information is collected per pedestrian
if order='by_density' the drift information is collected per density of nearby peds
step interval after which to reset the position
input : agent, agent_type and pos_reset.
Plays the agent on the provided environment with the assigned reset value
for the assigned number of trajectories. Can be played with or without render
returns :
The an array that contains the drift analysis for each of the
pedestrians in the list for the given pos_reset.
'''
drift_value = 0
segment_counter = 0
env.cur_ped = None
print('Starting drift analysis of agent :{}. Reset\
interval :{}'.format(agent_type, pos_reset))
if ped_list is not None:
num_trajs = len(ped_list)
else:
num_trajs = args.num_trajs
#an array containing the drift value for each pedestrian
drift_info_detailed = np.zeros(num_trajs)
for i in tqdm(range(num_trajs)):
#reset the world
crash_analysis = False
if ped_list is None:
state = env.reset()
else:
state = env.reset_and_replace(ped=ped_list[i])
env.goal_state = copy.deepcopy(env.return_position(env.cur_ped, env.current_frame + pos_reset)['position'])
env.state['goal_state'] = copy.deepcopy(env.goal_state)
state = copy.deepcopy(env.state)
#print('Current subject :', env.cur_ped)
final_frame = env.final_frame
if args.feat_extractor is not None:
feat_ext.reset()
state_feat = feat_ext.extract_features(state)
state_feat = torch.from_numpy(state_feat).type(torch.FloatTensor).to(DEVICE)
#pass
#reset the information collector
info_collector.reset_info(state)
done = False
t = 0
drift_per_ped = 0
segment_counter_per_ped = 0
abs_counter = env.current_frame
while abs_counter < final_frame:
stop_points = []
if args.feat_extractor is not None:
if agent_type == 'Policy_network':
action = agent.eval_action(state_feat)
else:
#action selection for alternate controller namely potential field
action = agent.eval_action(state)
'''
if args.render:
feat_ext.overlay_bins(state)
'''
else:
action = agent.eval_action(state)
state, reward_true, done, _ = env.step(action)
drift_value += | np.linalg.norm(env.ghost_state['position'] - env.agent_state['position'], 2) | numpy.linalg.norm |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downstream classification task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from disentanglement_lib.evaluation.metrics import utils
import numpy as np
from six.moves import range
import gin.tf
@gin.configurable(
"downstream_task",
blacklist=["ground_truth_data", "representation_function", "random_state"])
def compute_downstream_task(ground_truth_data,
representation_function,
random_state,
num_train=gin.REQUIRED,
num_test=gin.REQUIRED,
batch_size=16):
"""Computes loss of downstream task.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
num_train: Number of points used for training.
num_test: Number of points used for testing.
batch_size: Batch size for sampling.
Returns:
Dictionary with scores.
"""
scores = {}
for train_size in num_train:
mus_train, ys_train = utils.generate_batch_factor_code(
ground_truth_data, representation_function, train_size, random_state,
batch_size)
mus_test, ys_test = utils.generate_batch_factor_code(
ground_truth_data, representation_function, num_test, random_state,
batch_size)
predictor_model = utils.make_predictor_fn()
train_err, test_err = _compute_loss(
np.transpose(mus_train), ys_train, np.transpose(mus_test),
ys_test, predictor_model)
size_string = str(train_size)
scores[size_string +
":mean_train_accuracy"] = | np.mean(train_err) | numpy.mean |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch import Tensor
import numpy as np
from collections import OrderedDict
# https://discuss.pytorch.org/t/torch-round-gradient/28628/6
class Round_fn(torch.autograd.function.InplaceFunction):
@staticmethod
def forward(ctx, input):
ctx.input = input
return torch.round(input)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
def softmax_init(bits):
degree = 4
theta = (bits ** degree)/(bits ** degree).sum
return theta
"""
@inproceedings{
esser2020learned,
title={LEARNED STEP SIZE QUANTIZATION},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=rkgO66VKDS}
}
"""
def grad_scale(x, scale):
yOut = x
yGrad = x * scale
return (yOut-yGrad).detach() + yGrad
class Q_ReLU(nn.Module):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU, self).__init__()
self.bits = Parameter(Tensor([32]))
self.act_func = act_func
self.inplace = inplace
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = F.relu(x, self.inplace)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits) # soft forward
#nlvs = torch.round(bits ** 2) # hard forward
x = F.hardtanh(x / a, 0, 1)
x_bar = Round_fn.apply(x.mul(nlvs-1)).div_(nlvs-1) * c
#x_bar = RoundQuant.apply(x, nlvs) * c
return x_bar
class Q_ReLU6(Q_ReLU):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU6, self).__init__(act_func, inplace)
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
self.c.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
#print("Q_ReLU6")
#print("self.bits", self.bits)
#print("self.a", self.a)
#print("self.c", self.c)
def initialize_qonly(self, offset, diff):
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
self.c.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
class Q_Sym(nn.Module):
def __init__(self):
super(Q_Sym, self).__init__()
self.bits = Parameter(Tensor([32]))
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits)
x = F.hardtanh(x / a, -1, 1)
x_bar = Round_fn.apply(x.mul(nlvs/2-1)).div_(nlvs/2-1) * c
#x_bar = RoundQuant.apply(x, torch.round(nlvs / 2)) * c
return x_bar
################## didn't modify Q_HSwish #################
class Q_HSwish(nn.Module):
def __init__(self, act_func=True):
super(Q_HSwish, self).__init__()
self.n_lvs = [1]
self.bits = [32]
self.act_func = act_func
self.a = Parameter(Tensor(1))
self.b = 3/8
self.c = Parameter(Tensor(1))
self.d = -3/8
def initialize(self, n_lvs, offset, diff):
self.n_lvs = n_lvs
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = x * (F.hardtanh(x + 3, 0, 6) / 6)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
x = x + self.b
x = F.hardtanh(x / a, 0, 1)
x = Round_fn.apply(x.mul(self.n_lvs-1)).div_(self.n_lvs) * c
#x = RoundQuant.apply(x, self.n_lvs) * c
x = x + self.d
return x
##########################################################
class Q_Conv2d(nn.Conv2d):
def __init__(self, *args, **kargs):
super(Q_Conv2d, self).__init__(*args, **kargs)
self.bits = Parameter(Tensor([32]))
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
self.weight_old = None
self.computation = 0
def initialize(self, bits):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
self.c.data.fill_(np.log(np.exp(max_val * 0.9)-1))
#print(self.bits)
def initialize_qonly(self):
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
self.c.data.fill_(np.log(np.exp(max_val * 0.9)-1))
def _weight_quant(self):
#print(self.bits)
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits)
w_bar = F.hardtanh(self.weight / a, -1, 1)
w_bar = Round_fn.apply(w_bar.mul(nlvs/2-1)).div_(nlvs/2-1) * c
#w_bar = RoundQuant.apply(w_bar, torch.round(nlvs / 2)) * c
return w_bar
def forward(self, x):
if len(self.bits)==1 and self.bits[0]==32:
return F.conv2d(x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups)
else:
weight = self._weight_quant()
return F.conv2d(x, weight, self.bias,
self.stride, self.padding, self.dilation, self.groups)
class Q_Linear(nn.Linear):
def __init__(self, *args, **kargs):
super(Q_Linear, self).__init__(*args, **kargs)
self.bits = Parameter(Tensor(1), requires_grad=False)
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
self.weight_old = None
self.computation = 0
def initialize(self, bits):
#self.bits = Parameter(Tensor(bits), requires_grad=True)
self.bits = Parameter(Tensor([8]), requires_grad=False)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log( | np.exp(max_val * 0.9) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 18:13:18 2018
@author: matteo
"""
"""References
PGPE: Sehnke, Frank, et al. "Policy gradients with parameter-based exploration for
control." International Conference on Artificial Neural Networks. Springer,
Berlin, Heidelberg, 2008.
"""
import numpy as np
from baselines import logger
import warnings
from contextlib import contextmanager
import time
from baselines.common import colorize
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
def eval_trajectory(env, pol, gamma, horizon, feature_fun):
ret = disc_ret = 0
t = 0
ob = env.reset()
done = False
while not done and t<horizon:
s = feature_fun(ob) if feature_fun else ob
a = pol.act(s)
ob, r, done, _ = env.step(a)
ret += r
disc_ret += gamma**t * r
t+=1
return ret, disc_ret, t
#BINARY line search
def line_search_binary(pol, newpol, actor_params, rets, alpha, natgrad,
normalize=True,
use_rmax=True,
use_renyi=True,
max_search_ite=30, rmax=None, delta=0.2, reassign=None):
rho_init = newpol.eval_params()
bound_init = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
n_bounds = len(bound_init)
low = np.zeros(n_bounds)
high = np.nan * np.ones(n_bounds)
#old_delta_bound = 0.
rho_opt = rho_init
i_opt = 0.
delta_bound_opt = np.zeros(n_bounds)
epsilon_opt = np.zeros(n_bounds)
epsilon = np.ones(n_bounds)
if max_search_ite<=0:
rho = rho_init + alpha*natgrad
newpol.set_params(rho)
delta_bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta) - bound_init
return rho, np.ones(len(epsilon)), delta_bound, 0
for i in range(max_search_ite):
rho = rho_init + reassign(epsilon) * natgrad * alpha
newpol.set_params(rho)
bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
delta_bound = bound - bound_init
cond = np.logical_or(delta_bound<=delta_bound_opt, np.isnan(bound))
cond = np.logical_not(cond)
if np.any(np.isnan(bound)):
warnings.warn('Got NaN bound value')
delta_bound = np.where(np.isnan(delta_bound), -np.inf*np.ones(n_bounds), delta_bound)
high = np.where(cond, high, epsilon)
low = np.where(cond, epsilon, low)
rho_opt = np.where(reassign(cond), rho, rho_opt)
if np.any(delta_bound>delta_bound_opt):
i_opt = i
delta_bound_opt = np.where(cond, delta_bound, delta_bound_opt)
epsilon_opt = np.where(cond, epsilon, epsilon_opt)
old_epsilon = epsilon
epsilon = np.where(np.isnan(high), 2*epsilon, (low + high)/2)
if np.linalg.norm(old_epsilon - epsilon) < 1e-6:
break
return rho_opt, epsilon_opt, delta_bound_opt, i_opt+1
def line_search_parabola(pol, newpol, actor_params, rets, alpha, natgrad,
normalize=True,
use_rmax=True,
use_renyi=True,
max_search_ite=30, rmax=None, delta=0.2, reassign=None):
rho_init = newpol.eval_params()
bound_init = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
n_bounds = len(bound_init)
epsilon = np.ones(n_bounds)
epsilon_old = np.zeros(n_bounds)
max_increase=2.
delta_bound_tol=1e-4
delta_bound_old = -np.inf * np.ones(n_bounds)
rho_old = rho_init
if max_search_ite<=0:
rho = rho_init + alpha*natgrad
newpol.set_params(rho)
delta_bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta) - bound_init
return rho, np.ones(len(epsilon)), delta_bound, 0
for i in range(max_search_ite):
stepsize = alpha*reassign(epsilon)
stepsize = np.where(np.isnan(stepsize), np.zeros(len(stepsize)), stepsize)
rho = rho_init + stepsize * natgrad
newpol.set_params(rho)
bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
if np.any(np.isnan(bound)):
warnings.warn('Got NaN bound value!')
if np.all(np.isnan(bound)):
return rho_old, epsilon_old, delta_bound_old, i + 1
epsilon_old = epsilon
delta_bound = bound - bound_init
epsilon = np.where(delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old,
epsilon_old*max_increase,
epsilon_old ** 2 / (2 * (epsilon_old - delta_bound)))
if np.all(delta_bound <= delta_bound_old + delta_bound_tol):
if np.all(delta_bound_old < 0.):
return rho_init, np.zeros(n_bounds), np.zeros(n_bounds), i + 1
else:
return rho_old, epsilon_old, delta_bound_old, i+1
epsilon = np.where(np.logical_and(delta_bound <= delta_bound_old + delta_bound_tol,
delta_bound_old < 0.),
np.zeros(n_bounds),
epsilon)
epsilon = np.where(np.logical_and(delta_bound <= delta_bound_old + delta_bound_tol,
delta_bound_old >= 0.),
epsilon_old,
epsilon)
epsilon = np.where(np.isnan(epsilon), np.zeros(len(epsilon)), epsilon)
delta_bound = np.where(np.isnan(delta_bound), np.zeros(len(delta_bound)), delta_bound)
delta_bound_old = delta_bound
rho_old = rho
delta_bound_old = np.where(np.isnan(epsilon_old), np.zeros(len(delta_bound_old)), delta_bound_old)
epsilon_old = np.where(np.isnan(epsilon_old), np.zeros(len(epsilon_old)), epsilon_old)
epsilon_old = np.where( | np.isinf(epsilon_old) | numpy.isinf |
""" General functions that can be used by multiple modules
"""
import numpy as np
import scipy.optimize as spo
import logging
logger = logging.getLogger(__name__)
def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}):
"""
This function will setup and dispatch thermodynamic jobs.
Parameters
----------
func : function
Function used in job. Can be any of the following scipy methods: "brent", "least_squares", "TNC", "L-BFGS-B", "SLSQP", 'hybr', 'lm', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', 'df-sane', 'anderson', 'hybr_broyden1', 'hybr_broyden2', 'broyden1', 'broyden2', 'bisect'.
args : tuple, Optional, default=()
Each entry of this list contains the input arguments for each job
method : str, Optional, default="bisect"
Choose the method used to solve the dew point calculation
x0 : float, Optional, default=None
Initial guess in parameter to be optimized
bounds : tuple, Optional, default=None
Parameter boundaries
options : dict, Optional, default={}
These options are used in the scipy method
Returns
-------
output : tuple
This structure contains the outputs of the jobs given
"""
if method not in [
"brentq",
"least_squares",
"TNC",
"L-BFGS-B",
"SLSQP",
"hybr",
"lm",
"linearmixing",
"diagbroyden",
"excitingmixing",
"krylov",
"df-sane",
"anderson",
"hybr_broyden1",
"hybr_broyden2",
"broyden1",
"broyden2",
"bisect",
]:
raise ValueError("Optimization method, {}, not supported.".format(method))
if x0 is None:
logger.debug("Initial guess in optimization not provided")
if np.any(bounds is None):
logger.debug("Optimization bounds not provided")
if x0 is None and method in [
"broyden1",
"broyden2",
"anderson",
"hybr",
"lm",
"linearmixing",
"diagbroyden",
"excitingmixing",
"krylov",
"df-sane",
]:
if np.any(bounds is None):
raise ValueError(
"Optimization method, {}, requires x0. Because bounds were not provided, so problem cannot be solved.".format(
method
)
)
else:
logger.error(
"Optimization method, {}, requires x0, using bisect instead".format(
method
)
)
method = "bisect"
if np.size(x0) > 1 and method in ["brentq", "bisect"]:
logger.error(
"Optimization method, {}, is for scalar functions, using {}".format(
method, "least_squares"
)
)
method = "least_squares"
if (
np.size(x0) == 1
and np.any(bounds is not None)
and np.shape(x0) != np.shape(bounds)[0]
):
bounds = tuple([bounds])
if np.any(bounds is None) and method in ["brentq", "bisect"]:
if x0 is None:
raise ValueError(
"Optimization method, {}, requires bounds. Because x0 was not provided, so problem cannot be solved.".format(
method
)
)
else:
logger.error(
"Optimization method, {}, requires bounds, using hybr".format(method)
)
method = "hybr"
if np.any(bounds is not None):
for bnd in bounds:
if len(bnd) != 2:
raise ValueError("bounds are not of length two")
#################### Root Finding without Boundaries ###################
if method in ["broyden1", "broyden2"]:
outer_dict = {
"fatol": 1e-5,
"maxiter": 25,
"jac_options": {"reduction_method": "simple"},
}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
sol = spo.root(func, x0, args=args, method=method, options=outer_dict)
elif method == "anderson":
outer_dict = {"fatol": 1e-5, "maxiter": 25}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
sol = spo.root(func, x0, args=args, method=method, options=outer_dict)
elif method in [
"hybr",
"lm",
"linearmixing",
"diagbroyden",
"excitingmixing",
"krylov",
"df-sane",
]:
outer_dict = {}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
sol = spo.root(func, x0, args=args, method=method, options=outer_dict)
#################### Minimization Methods with Boundaries ###################
elif method in ["TNC", "L-BFGS-B"]:
outer_dict = {
"gtol": 1e-2 * np.sqrt(np.finfo("float").eps),
"ftol": np.sqrt(np.finfo("float").eps),
}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
if len(bounds) == 2:
sol = spo.minimize(
func,
x0,
args=args,
method=method,
bounds=tuple(bounds),
options=outer_dict,
)
else:
sol = spo.minimize(func, x0, args=args, method=method, options=outer_dict)
elif method == "SLSQP":
outer_dict = {}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
if len(bounds) == 2:
sol = spo.minimize(
func,
x0,
args=args,
method=method,
bounds=tuple(bounds),
options=outer_dict,
)
else:
sol = spo.minimize(func, x0, args=args, method=method, options=outer_dict)
#################### Root Finding with Boundaries ###################
elif method == "brentq":
outer_dict = {"rtol": 1e-7}
for key, value in options.items():
if key in ["xtol", "rtol", "maxiter", "full_output", "disp"]:
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
sol = spo.brentq(func, bounds[0][0], bounds[0][1], args=args, **outer_dict)
elif method == "least_squares":
outer_dict = {}
for key, value in options.items():
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
bnd_tmp = [[], []]
for bnd in bounds:
bnd_tmp[0].append(bnd[0])
bnd_tmp[1].append(bnd[1])
sol = spo.least_squares(
func, x0, bounds=tuple(bnd_tmp), args=args, **outer_dict
)
elif method == "bisect":
outer_dict = {"maxiter": 100}
for key, value in options.items():
if key in ["xtol", "rtol", "maxiter", "full_output", "disp"]:
outer_dict[key] = value
logger.debug(
"Using the method, {}, with the following options:\n{}".format(
method, outer_dict
)
)
sol = spo.bisect(func, bounds[0][0], bounds[0][1], args=args, **outer_dict)
# Given final P estimate
if method not in ["brentq", "bisect"]:
solution = sol.x
logger.info(
"Optimization terminated successfully: {} {}".format(
sol.success, sol.message
)
)
else:
logger.info("Optimization terminated successfully: {}".format(sol))
solution = sol
return solution
def central_difference(x, func, step_size=1e-5, relative=False, args=()):
"""
Take the derivative of a dependent variable calculated with a given function using the central difference method.
Parameters
----------
x : numpy.ndarray
Independent variable to take derivative with respect too, using the central difference method. Must be first input of the function.
func : function
Function used in job to calculate dependent factor. This function should have a single output.
step_size : float, Optional, default=1E-5
Either the step size used in the central difference method, or if ``relative=True``, this variable is a scaling factor so that the step size for each value of x is x * step_size.
args : tuple, Optional, default=()
Each entry of this list contains the input arguments for each job
relative : bool, Optional, default=False
If False, the step_size is directly used to calculate the derivative. If true, step_size becomes a scaling factor, where the step size for each value of x becomes step_size*x.
Returns
-------
dydx : numpy.ndarray
Array of derivative of y with respect to x, given an array of independent variables.
"""
if not isiterable(x):
x - np.array([x])
elif not isinstance(x, np.ndarray):
x = np.array(x)
if relative:
step = x * step_size
if not isiterable(step):
step = np.array([step])
step = np.array(
[2 * np.finfo(float).eps if xx < np.finfo(float).eps else xx for xx in step]
)
else:
step = step_size
y = func(np.append(x + step, x - step), *args)
lx = int(len(y)/2)
dydx = (y[:lx] - y[lx:]) / (2.0 * step)
return dydx
def isiterable(array):
"""
Check if variable is an iterable type with a length (e.g. np.array or list).
Note that this could be tested with ``isinstance(array, Iterable)``, however ``array=np.array(1.0)`` would pass that test and then fail in ``len(array)``.
Parameters
----------
array
Variable of some type, that should be iterable
Returns
-------
isiterable : bool
Will be True if indexing is possible and False if not.
"""
array_tmp = np.array(array, dtype=object)
tmp = np.shape(array_tmp)
if tmp:
isiterable = True
else:
isiterable = False
return isiterable
def check_length_dict(dictionary, keys, lx=None):
"""
This function compared the entries, keys, in the provided dictionary to ensure they're the same length.
All entries in the list ``keys``, will be made into numpy arrays (if present). If a float or array of length one is provided, it will be expanded to the length of other arrays.
Parameters
----------
dictionary : dict
Dictionary containing all or some of the keywords, ``keys``, of what should be arrays of identical size.
keys : list
Possible keywords representing array entries
lx : int, Optional, default=None
The size that arrays should conform to
Returns
-------
new_dictionary : dict
Dictionary of arrays of identical size.
"""
if lx == None:
lx_array = []
for key in keys:
if key in dictionary:
tmp = dictionary[key]
if np.shape(tmp):
lx_array.append(len(tmp))
else:
lx_array.append(1)
if not len(lx_array):
raise ValueError(
"None of the provided keys are found in the given dictionary"
)
lx = max(lx_array)
new_dictionary = {}
for key in keys:
if key in dictionary:
tmp = dictionary[key]
if isiterable(tmp):
l_tmp = len(tmp)
if l_tmp == 1:
new_dictionary[key] = np.array([tmp[0] for x in range(lx)], float)
elif l_tmp == lx:
new_dictionary[key] = np.array(tmp, float)
else:
raise ValueError(
"Entry, {}, should be length {}, not {}".format(key, lx, l_tmp)
)
else:
new_dictionary[key] = np.array([tmp for x in range(lx)], float)
return new_dictionary
def set_defaults(dictionary, keys, values, lx=None):
"""
This function checks a dictionary for the given keys, and if it's not there, the appropriate value is added to the dictionary.
Parameters
----------
dictionary : dict
Dictionary of data
keys : list
Keys that should be present (of the same length as ``lx``)
values : list
Default values for the keys that aren't in dictionary
lx : int, Optional, default=None
If not None, and values[i] is a float, the key will be set to an array of length, ``lx``, populated by ``values[i]``
Returns
-------
new_dictionary : dict
Dictionary of arrays of identical size.
"""
new_dictionary = dictionary.copy()
key_iterable = isiterable(keys)
if not isiterable(values):
if key_iterable:
values = np.ones(len(keys)) * values
else:
values = | np.array([values]) | numpy.array |
from os import read
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import datetime
import sys
from tqdm import tqdm
import cppsolver as cs
from ..solver import Solver, Solver_jac
from ..preprocess import Reading_Data, LM_data, LM_data_2mag
from ..filter import lowpass_filter, mean_filter, median_filter, Magnet_KF, Magnet_UKF, Magnet_KF_cpp
from ..preprocess import read_data
def ang_convert(x):
a = x//(2*np.pi)
result = x-a*(2*np.pi)
if result > np.pi:
result -= np.pi * 2
return result
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def show_track_1mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-10, 15])
ax.set_ylim([-10, 15])
ax.set_zlim([0, 25])
# ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
# model = Solver(1)
# model = Finexus_Solver(-5e-2, -5e-2, 8e-2)
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
directions = []
SNR = []
cut = 5
starting_point = lm_data.get_gt(reading_data.tstamps[cut])[0]
if use_kalman:
kf_params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
model = Magnet_KF_cpp(
1, pSensor, [0.8, 0.8, 1.5]*pSensor.shape[0], kf_params, dt=1/17, ord=3)
else:
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * (-2), 1e-2 * (2), 1e-2 * (20), 0, 0])
for i in tqdm(range(cut, data.shape[0] - cut)):
# fix m value and gx gy gz
datai = data[i].reshape(-1, 3)
if use_kalman:
model.predict()
result = model.update(datai)
else:
result = cs.solve_1mag(
datai.reshape(-1), pSensor.reshape(-1), params)
params = result.copy()
[x0, y0, z0, Gx, Gy, Gz] = [
result[4] * 1e2, result[5] * 1e2,
result[6] * 1e2, result[0],
result[1], result[2]
]
# [m, theta, phy] = [np.exp(result['m0'].value), np.pi * sigmoid(
# result['theta0'].value), np.pi * np.tanh(result['phy0'].value)]
[m, theta, phy, direction] = [
np.exp(result[3]),
ang_convert(result[7]),
ang_convert(result[8]),
np.array([np.sin(ang_convert(result[7]))*np.cos(ang_convert(result[8])),
np.sin(ang_convert(result[7]))*np.sin(ang_convert(result[8])), np.cos(ang_convert(result[7]))]),
]
# [x, y, z, m] = [result['X'].value*1e2, result['Y'].value*1e2,
# result['Z'].value*1e2, result['m'].value]
G = np.array([Gx, Gy, Gz])
noise = np.linalg.norm(G, 2)
signal = np.linalg.norm(datai - G, 2)
pred_position.append(x0)
pred_position.append(y0)
pred_position.append(z0)
changingM.append(m)
changingTheta.append(theta)
changingPhy.append(phy)
changingG.append([Gx, Gy, Gz])
directions.append(direction)
changingG = np.array(changingG)
changingM = np.array(changingM)
changingTheta = np.array(changingTheta)
changingPhy = np.array(changingPhy)
changingAng = np.stack([changingTheta, changingPhy], axis=0).T
directions = np.stack(directions, axis=0)
pred_position = np.array(pred_position).reshape(-1, 3)
compare_label = [' ', '(fixing G)']
ax.plot(pred_position[:, 0],
pred_position[:, 1],
pred_position[:, 2],
c=color[index % len(color)],
label='Magnet')
print(np.mean(pred_position, axis=0))
# sensor position
ax.scatter(1e2 * pSensor[:, 0],
1e2 * pSensor[:, 1],
1e2 * pSensor[:, 2],
c='r',
s=1,
alpha=0.5)
# calculate loss
gt_route = []
losses = {}
losses_count = {}
gt_directions = []
losses_angle = {}
losses_count_angle = {}
for i in range(pred_position.shape[0]):
# Get gt
gt = lm_data.get_gt(reading_data.tstamps[i + cut])
gt_pos = gt[0]
gt_route.append(gt_pos)
gt_direction = gt[1]
gt_directions.append(gt_direction)
# calculate loss
dis = np.linalg.norm(gt_pos - np.mean(pSensor, axis=0), 2)
loss1 = np.linalg.norm(gt_pos - pred_position[i], 2)
loss2 = np.arccos(np.dot(gt_direction, directions[i]))
# store route loss
if not dis in losses.keys():
losses[dis] = loss1
losses_count[dis] = 1
else:
losses[dis] += loss1
losses_count[dis] += 1
# store ang loss
if not dis in losses_angle.keys():
losses_angle[dis] = loss2
losses_count_angle[dis] = 1
else:
losses_angle[dis] += loss2
losses_count_angle[dis] += 1
gt_route = np.stack(gt_route, axis=0)
gt_directions = np.stack(gt_directions, axis=0)
ax.plot(gt_route[:, 0],
gt_route[:, 1],
gt_route[:, 2],
c='b',
alpha=0.5,
linewidth=2,
label='Ground Truth')
plt.legend()
# store the gt route and the reconstructed route
tmp = reading_path.split('/')
file_name = tmp[-1].split('.')[0] + '.npz'
tmp.pop(0)
tmp.pop(-1)
result_path = os.path.join('result', 'reconstruction_result', *tmp)
if not os.path.exists(result_path):
os.makedirs(result_path)
np.savez(os.path.join(result_path, file_name),
gt=gt_route,
result=pred_position, gt_ang=gt_directions, result_ang=directions, G=changingG)
fig5 = plt.figure()
plt.title("Reconstuct Loss")
plot_loss_data = []
for dis in sorted(losses.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses[dis] / losses_count[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0],
plot_loss_data[:, 1], label='Position loss')
plt.legend()
fig6 = plt.figure()
plt.title("Reconstuct angle Loss")
plot_loss_data = []
for dis in sorted(losses_angle.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses_angle[dis] / losses_count_angle[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0], plot_loss_data[:, 1], label='Ang loss')
plt.legend()
fig2 = plt.figure()
plt.title("Magnet Moment")
# plt.ylim(0, 10)
plt.plot(changingM, label='M')
plt.legend()
fig3 = plt.figure()
plt.title("G")
plt.plot(changingG[:, 0], label='Gx')
plt.plot(changingG[:, 1], label='Gy')
plt.plot(changingG[:, 2], label='Gz')
plt.legend()
fig4 = plt.figure()
plt.title("orientation")
plt.ylim(-5, 5)
plt.plot(changingTheta, label='theta')
plt.plot(changingPhy, label='phy')
plt.legend()
plt.show()
# plt.savefig("result/result.jpg", dpi=900)
def show_track_2mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
# ax.set_zlim([-2, 30])
ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data_2mag(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
changingTheta2 = []
changingPhy2 = []
changingDir = []
changingDir2 = []
SNR = []
cut = 0
starting_point = lm_data.get_gt(reading_data.tstamps[cut])
params = {
'X0': 1e-2 * starting_point[0][0],
'Y0': 1e-2 * starting_point[0][1],
'Z0': 1e-2 * starting_point[0][2],
'm0': np.log(My_M),
'theta0': 0.1,
'phy0': 0.1,
'X1': 1e-2 * starting_point[2][0],
'Y1': 1e-2 * starting_point[2][1],
'Z1': 1e-2 * starting_point[2][2],
'm1': np.log(My_M),
'theta1': 0.1,
'phy1': 0.1,
'gx': 0,
'gy': 0,
'gz': 0,
}
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(My_M),
1e-2 * starting_point[0][0], 1e-2 *
starting_point[0][1], 1e-2 * starting_point[0][2], 0, 0,
1e-2 * starting_point[2][0], 1e-2 *
starting_point[2][1], 1e-2 * starting_point[2][2], 0, 0,
])
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, | np.log(3) | numpy.log |
import os
import time
import math
import json
import codecs
import argparse ##python自带的命令行参数解析包
import numpy as np
from tqdm import tqdm
from numpy import finfo
from sklearn.metrics import accuracy_score
from transformers import BertTokenizer
from pytorch_pretrained_bert import BertModel
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.autograd import Variable
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from distributed import apply_gradient_allreduce
import parse_nk
from models import G2PTransformerMask, poly_tonesandhi, Cascaded_Tacotron2
from data_utils import TextMelLoader, TextMelCollate, G2PDatasetMask, get_dataloader, polyTTS_get_dataloader
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
def reduce_tensor(tensor, n_gpus): ##?
rt = tensor.clone() ## 返回一个张量的副本,其与原张量的尺寸和数据类型相同
dist.all_reduce(rt, op=dist.reduce_op.SUM) ## 在所有机器上减少张量数据,通过获得最终的结果。在调用之后张量在所有过程中都是按位相同的。
rt /= n_gpus ## /=是除法赋值运算符 rt=rt/n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
## cuda是否可用,cuda(compute unified device architecture)是显卡厂商NVIDIA推出的运算平台
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
## torhc.cuda.set_device(device)设置当前设备。不鼓励使用此函数来设置,在大多数情况下,最好使用CUDA_VISIBLE_DEVICES环境变量。参数device(int)-所选设备,如果此参数为负,则此函数是无效操作。
## torch.cuda.device_count() 返回可得到的GPU数量
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
## pytorch分布式训练
## backend str/Backend 是通信所用的后端,可以是'nccl''gloo'或者是一个torch.distributed.Backend类(Backend.GLOO)
## init_method str 这个URL指定了如何初始化互相通信的进程
## world_size init 执行训练的所有进程数
## rank int 这个进程的编号,也是其优先级
## group_name str 进程所在group的name
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
## if not 用法: if true才执行 (即 if not false)
if not hparams.load_mel_from_disk:
trainset = TextMelLoader(hparams.training_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
else:
trainset = TextMelLoader(hparams.mel_training_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
valset = TextMelLoader(hparams.mel_validation_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step, hparams.num_classes)
if hparams.distributed_run: ##False
train_sampler = DistributedSampler(trainset)
## 在多机多卡情况下分布式训练数据的读取,不同的卡读到的数据应该是不同的,利用sampler确保dataloader只会load到整个数据集的一个特定子集
## 它为每个子进程划分出一部分数据集,以避免不同进程之间的数据重复。
shuffle = False
else:
train_sampler = None
shuffle = True
## 定义一个可迭代的数据加载器
train_loader = DataLoader(trainset, num_workers=0, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
## dataset(Dataset类,决定数据从哪里读取及如何读取) batch_size(每个batch的大小,批大小) shuffle(是否进行shuffle操作,每个epoch是否乱序)
## num_workers(加载数据时使用几个子进程) drop_last(当样本数不能被batchsize整除时,是否舍弃最后一批数据)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Cascaded_Tacotron2(hparams).cuda()## 参数是hparams,因为继承了nn.module,所以有.cuda()
if hparams.fp16_run: ## False
model.decoder.attention_layer.score_mask_value = finfo('float16').min ##?
if hparams.distributed_run: ## False
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=0,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
val_mel_loss = 0.0
val_gate_loss = 0.0
val_select_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
# original_words = x[3]
# # print('CHECK original_words IN validate:', original_words)
# _, _, select_target = y
# select_target = np.array(select_target.cpu())
# # print('CHECK select_target IN validate:', select_target)
# np.savetxt('select_target.txt',select_target)
# _, _, _, _, select_pred = y_pred
# select_pred = np.array(select_pred.cpu())
# select_pred = np.argmax(select_pred, axis=2)
# # print('CHECK select_pred IN validate:', select_pred)
# np.savetxt('select_pred.txt',select_pred)
# mask_padded_to_show = np.array(mask_padded.cpu())
# mask_padded_to_show = np.sum(mask_padded_to_show, axis=2)
# # print('CHECK mask_padded_to_show IN validate:', mask_padded_to_show)
# np.savetxt('select_mask.txt',mask_padded_to_show)
mask_padded = x[3]
loss, mel_loss, gate_loss, select_loss = criterion(y_pred, y, mask_padded)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_val_mel_loss = reduce_tensor(mel_loss.data, n_gpus).item()
reduced_val_gate_loss = reduce_tensor(gate_loss.data, n_gpus).item()
reduced_val_select_loss = reduce_tensor(select_loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
reduced_val_mel_loss = mel_loss.item()
reduced_val_gate_loss = gate_loss.item()
reduced_val_select_loss = select_loss.item()
val_loss += reduced_val_loss
val_mel_loss += reduced_val_mel_loss
val_gate_loss += reduced_val_gate_loss
val_select_loss += reduced_val_select_loss
val_loss = val_loss / (i + 1)
val_mel_loss = val_mel_loss / (i + 1)
val_gate_loss = val_gate_loss / (i + 1)
val_select_loss = val_select_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, val_mel_loss, val_gate_loss, val_select_loss, model, y, y_pred, iteration)
def train_tts(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed) ##设置(CPU)生成随机数的种子,在每次重新运行程序时,同样的随机数生成代码得到的是同样的结果。
torch.cuda.manual_seed(hparams.seed)## 设置当前GPU的随机数生成种子 torch.cuda.manual_seed_all(seed)设置所有GPU的随机数生成种子
## 手动设置种子一般可用于固定随机初始化的权重值,这样就可以让每次重新从头训练网络时的权重的初始值虽然是随机生成的但却是固定的。
model = load_model(hparams)
learning_rate = hparams.learning_rate
# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
# weight_decay=hparams.weight_decay)
for name, param in model.named_parameters():
# frozen except tts
# if name.split('.')[0] == 'poly_phoneme_classifier':
# param.requires_grad = False
# frozen poly module except tone sandhi & tts
# if name.split('.')[0] == 'poly_phoneme_classifier':
# if name.split('.')[1] != 'linear_pre' and name.split('.')[1] != 'conv_layers' and name.split('.')[1] != 'linear_aft':
# param.requires_grad = False
# frozen except structure CNN & tonesandhi & tts
if name.split('.')[0] == 'poly_phoneme_classifier':
if name.split('.')[1] == 'g2ptransformermask':
if name.split('.')[2] != 'structure_cnn_tts':
param.requires_grad = False
elif name.split('.')[1] != 'linear_pre' and name.split('.')[1] != 'conv_layers' and name.split('.')[1] != 'linear_aft':
param.requires_grad = False
# else:
# param.requires_grad = False
training_parameters_list = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(training_parameters_list, lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
## apex是一款由Nvidia开发的基于PyTorch的混合精度训练加速神奇,用短短几行代码就能实现不同程度的混合精度加速,训练时间直接缩小一半。
## fp16:半精度浮点数,是一种计算机使用的二进制浮点数数据类型,使用2字节(16位)存储。
## fp16优点:减少显存占用;加快训练和推断的计算;张量核心的普及。缺点:量化误差。
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter() ## 返回当前的计算机系统时间
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
# print('CHECK batch:', batch)
model.zero_grad()
x, y = model.parse_batch(batch)
y_pred = model(x)
mask_padded = x[3]
loss, mel_loss, gate_loss, select_loss = criterion(y_pred, y, mask_padded) ## Tacotron2Loss(model_output,targets,mask_padded)
## 区分几种loss
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_val_mel_loss = reduce_tensor(mel_loss.data, n_gpus).item()
reduced_val_gate_loss = reduce_tensor(gate_loss.data, n_gpus).item()
reduced_val_select_loss = reduce_tensor(select_loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
reduced_val_mel_loss = mel_loss.item()
reduced_val_gate_loss = gate_loss.item()
reduced_val_select_loss = select_loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# print('CHECK structure_cnn.convs.0.weight IS CHANGE:', model.structure_cnn.convolutions[0][0].conv.weight)
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
## 在用pytorch训练模型时,通常会在遍历epochs的过程中依次用到optimizer.zero_grad(),loss.backward(),optimizer.step()三个函数,总的来说,这三个函数的作用是先将梯度归零(optimizer.zero_grad()),
## 然后反向传播计算得到每个参数的梯度值(loss.backward()),最后通过梯度下降执行一步参数更新(optimizer.step())
if not is_overflow and rank == 0:
duration = time.perf_counter() - start ## time.perf_counter()返回当前的计算机系统时间,只有连续两次perf_counter()进行差值才能有意义,一般用于计算程序运行时间。
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, reduced_val_mel_loss, reduced_val_gate_loss, reduced_val_select_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
class Mask_Softmax(nn.Module):
def __init__(self, plus=1.0):
super(Mask_Softmax, self).__init__()
self.plus = plus
def forward(self, logits, output_mask):
logits = logits + (output_mask + 1e-45).log()
return torch.nn.functional.log_softmax(logits, dim=-1)
class Gumbel_Softmax(nn.Module):
def __init__(self, temperature=1):
super(Gumbel_Softmax, self).__init__()
self.softmax = nn.Softmax(dim=-1) ## dim=-1 最后一维取softmax
# initial temperature for gumbel softmax (default: 1)
self.temperature = temperature
self.mask_softmax = Mask_Softmax()
# self.mask_softmax = nn.LogSoftmax()
def forward(self, logits, output_mask, hard=False):
y = self._gumbel_softmax_sample(logits, output_mask, hard)
return y
def _sample_gumbel(self, shape, eps=1e-20):
U = torch.rand(shape)
return -torch.log(-torch.log(U + eps) + eps)
def _gumbel_softmax_sample(self, logits, output_mask, hard=False):
sample = Variable(self._sample_gumbel(logits.size()[-1]), requires_grad=True)
if logits.is_cuda:
sample = sample.cuda()
y = logits + sample
# return self.softmax(y / self.temperature)
y_soft = self.mask_softmax(y / self.temperature, output_mask)
# y_soft = self.mask_softmax(y / self.temperature)
if hard:
# Straight through.
index = y_soft.max(-1, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(-1, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
# Reparametrization trick.
ret = y_soft
return ret
def masked_augmax(logits, mask, dim, min_val=-1e7):
logits = logits.exp()
logits = logits.mul(mask)
# one_minus_mask = (1.0 - mask).byte()
# replaced_vector = vector.masked_fill(one_minus_mask, min_val)
# max_value, _ = replaced_vector.max(dim=dim)
max_value = torch.argmax(logits, dim=1)
return max_value
def train_poly(args, hparams):
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
print('CHECK HERE train poly ONLY')
train_dataloader = get_dataloader(hparams.use_output_mask, hparams.train_file, hparams.train_label,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
val_dataloader = get_dataloader(hparams.use_output_mask, hparams.val_file, hparams.val_label,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
# test_dataloader = get_dataloader(args.use_output_mask, args.test_file, args.test_label,
# args.class2idx, args.merge_cedict, args.poly_batch_size,
# args.max_length, shuffle=True)
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
class2idx = json.load(usernames)
print("num classes: {}".format(len(class2idx)))
num_classes = len(class2idx)
model = G2PTransformerMask(num_classes, hparams)
device = torch.cuda.current_device() ## 查看当前使用的gpu序号
model = model.to(device) ## 将模型加载到指定设备上
for name, param in model.named_parameters():
# frozen syntax module
if name.split('.')[0] != 'tree_shared_linear' and name.split('.')[0] != 'structure_cnn_poly' \
and name.split('.')[0] != 'linear_pre' and name.split('.')[0] != 'poly_phoneme_classifier' \
and name.split('.')[0] != 'linear_aft':
param.requires_grad = False
training_parameters_list = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(training_parameters_list, lr=hparams.poly_lr)
criterion = nn.NLLLoss()
# mask_criterion = Mask_Softmax()
mask_criterion = Gumbel_Softmax()
model_dir = "./save/poly_only_syntax_frozen"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
best_acc = 0
for epoch in range(hparams.poly_epochs):
model.train()
for idx, batch in enumerate(train_dataloader, start=1):
# print('CEHCK batch:', batch)
# if idx > 200:
# break
batch = tuple(t.to(device) for t in batch)
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids) ## torch.sign(input,out=None) 符号函数,返回一个新张量,包含输入input张量每个元素的正负(大于0的元素对应1,小于0的元素对应-1,0还是0)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
# inputs = {"input_ids": input_ids,
# "poly_ids": poly_ids,
# "attention_mask": mask}
logits, _ = model(**inputs)
batch_size = logits.size(0)
logits = logits[torch.arange(batch_size), poly_ids]
# logits = mask_criterion(logits, output_mask, True)
logits = mask_criterion(logits, output_mask)
loss = criterion(logits, labels)
loss.backward()
# nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
model.zero_grad()
if idx % 100 == 0: ## %取余
print("loss : {:.4f}".format(loss.item()))
all_preds = []
all_mask_preds = []
all_labels = []
model.eval()
for batch in tqdm(val_dataloader, total=len(val_dataloader)):
batch = tuple(t.to(device) for t in batch)
# input_ids, poly_ids, labels = batch
# mask = torch.sign(input_ids)
# inputs = {"input_ids": input_ids,
# "poly_ids": poly_ids,
# "attention_mask": mask}
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
with torch.no_grad():
logits, _ = model(**inputs)
batch_size = logits.size(0)
logits = logits[torch.arange(batch_size), poly_ids]
# logits = logits.exp()
# output_mask_false = 1.0 - output_mask
# logits = logits - output_mask_false
# logits = mask_criterion(logits, output_mask, True)
logits = mask_criterion(logits, output_mask)
preds = torch.argmax(logits, dim=1).cpu().numpy()
mask_preds = masked_augmax(logits, output_mask, dim=1).cpu().numpy()
if not (preds == mask_preds).all():
print('CHECK preds:', preds)
print('CHECK mask_preds:', mask_preds)
print('CHECK labels:', labels)
print('CHECK output_mask:', np.where(output_mask.cpu().numpy()==1.0))
all_preds.append(preds)
all_mask_preds.append(mask_preds)
all_labels.append(labels.cpu().numpy())
preds = np.concatenate(all_preds, axis=0)
mask_preds = np.concatenate(all_mask_preds, axis=0)
labels = np.concatenate(all_labels, axis=0)
# print('CHECK preds:', preds)
# print('CHECK mask_preds:', mask_preds)
# print('CHECK labels:', labels)
val_acc = accuracy_score(labels, preds)
mask_val_acc = accuracy_score(labels, mask_preds)
pred_diff_acc = accuracy_score(preds, mask_preds)
print("epoch :{}, acc: {:.2f}, mask acc: {:.2f}, pred_diff_acc: {:.2f}".format(epoch, val_acc*100, mask_val_acc*100, pred_diff_acc*100))
if val_acc > best_acc:
best_acc = val_acc
state_dict = model.state_dict()
save_file = os.path.join(
model_dir, "{:.2f}_model.pt".format(val_acc*100))
torch.save(state_dict, save_file)
def train_poly_tts(args, hparams):
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
print('CHECK HERE train TTS poly')
train_dataloader = polyTTS_get_dataloader(hparams.use_output_mask, hparams.training_files,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
val_dataloader = polyTTS_get_dataloader(hparams.use_output_mask, hparams.validation_files,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
class2idx = json.load(usernames)
print("num classes: {}".format(len(class2idx)))
num_classes = len(class2idx)
model = poly_tonesandhi(num_classes, hparams)
device = torch.cuda.current_device()
model = model.to(device)
for name, param in model.named_parameters():
# frozen syntax module
if name.split('.')[0] == 'g2ptransformermask':
if name.split('.')[1] != 'tree_shared_linear' and name.split('.')[1] != 'structure_cnn_poly' \
and name.split('.')[1] != 'linear_pre' and name.split('.')[1] != 'poly_phoneme_classifier' \
and name.split('.')[1] != 'linear_aft':
param.requires_grad = False
training_parameters_list = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(training_parameters_list, lr=hparams.poly_lr)
# criterion = nn.CrossEntropyLoss()
criterion = nn.NLLLoss()
# mask_criterion = Mask_Softmax()
mask_criterion = Gumbel_Softmax()
model_dir = "./save/poly_tts_CNN_syntax_frozen"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
best_acc = 0
for epoch in range(hparams.poly_epochs):
model.train()
for idx, batch in enumerate(train_dataloader, start=1):
# print('CEHCK batch:', batch)
# if idx > 200:
# break
batch = tuple(t.to(device) for t in batch)
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
logits, _ = model(**inputs)
# logits = mask_criterion(logits, output_mask, True)
labels, logits, output_mask = model.select_poly(labels, logits, output_mask, poly_ids)
logits = mask_criterion(logits, output_mask)
loss = criterion(logits, labels)
loss.backward()
# nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
model.zero_grad()
if idx % 100 == 0:
print("loss : {:.4f}".format(loss.item()))
all_preds = []
all_labels = []
model.eval()
for batch in tqdm(train_dataloader, total=len(train_dataloader)):
batch = tuple(t.to(device) for t in batch)
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
with torch.no_grad():
logits, _ = model(**inputs)
labels, logits, output_mask = model.select_poly(labels, logits, output_mask, poly_ids)
logits = mask_criterion(logits, output_mask)
# labels, logits = model.select_poly(labels, logits, poly_ids)
preds = torch.argmax(logits, dim=1).cpu().numpy()
all_preds.append(preds)
all_labels.append(labels.cpu().numpy())
preds = | np.concatenate(all_preds, axis=0) | numpy.concatenate |
import os
import sys
import unittest
from typing import Dict
from nptyping import NDArray
import numpy as np
# TODO: Handle module imports in another way if it is possible
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from dea.utils.symbols import *
from dea.technical.dea_radial import DEARadial
from dea.core.abstract_dea_technical import AbstractDEATechnical
class TestDEARadial(unittest.TestCase):
"""
Tests for RadialDEA model.
"""
# test data
X = np.array(
[[5, 13], [16, 12], [16, 26], [17, 15], [18, 14], [23, 6], [25, 10], [27, 22], [37, 14], [42, 25], [5, 17]])
Y = np.array([[12], [14], [25], [26], [8], [9], [27], [30], [31], [26], [12]])
def test_inputs(self):
self.assertEqual(self.X.shape, (11, 2))
self.assertEqual(self.Y.shape, (11, 1))
all_models: Dict[str, AbstractDEATechnical] = {}
all_models_type: Dict[str, type] = {}
all_models_ndmu: Dict[str, int] = {}
all_models_ninp: Dict[str, int] = {}
all_models_nout: Dict[str, int] = {}
all_models_efficiency: Dict[str, NDArray] = {}
all_models_slacks_X: Dict[str, NDArray] = {}
all_models_slacks_Y: Dict[str, NDArray] = {}
all_models_peersmatrix: Dict[str, NDArray] = {}
# ------------------------------------------------------------
# TEST - Input Oriented CRS
# ------------------------------------------------------------
# input oriented crs
input_crs_radial_dea = DEARadial(orient=Orient.Input, rts=RTS.CSR, disposX=Dispos.Strong, disposY=Dispos.Strong)
input_crs_radial_dea.fit(X, Y)
all_models["input_csr"] = input_crs_radial_dea
all_models_type["input_csr"] = DEARadial
all_models_ndmu["input_csr"] = 11
all_models_ninp["input_csr"] = 2
all_models_nout["input_csr"] = 1
all_models_efficiency["input_csr"] = np.array([
[1.0000000000],
[0.6222896791],
[0.8198562444],
[1.0000000000],
[0.3103709311],
[0.5555555556],
[1.0000000000],
[0.7576690896],
[0.8201058201],
[0.4905660377],
[1.0000000000]
], dtype=np.float64)
all_models_slacks_X["input_csr"] = np.array([
[0.000000000, 0],
[0.000000000, 0],
[0.000000000, 0],
[0.000000000, 0],
[0.000000000, 0],
[4.444444444, 0],
[0.000000000, 0],
[0.000000000, 0],
[1.640211640, 0],
[0.000000000, 0],
[0.000000000, 4]
], dtype=np.float64)
all_models_slacks_Y["input_csr"] = | np.zeros((11, 1), dtype=np.float64) | numpy.zeros |
"""
Convert the rating matrix into conversion rate matrix;
Generate the simulated prediction conversion rate matrix.
"""
import pickle
import numpy as np
file = open("data/predicted_matrix", "rb")
prediction = np.array(pickle.load(file), dtype=float)
user_num = pickle.load(file)
item_num = pickle.load(file)
file.close()
# CVR = [0.1, 0.3, 0.5, 0.7, 0.9]
# ratio = [0.53, 0.24, 0.14, 0.06, 0.03] (the same distribution as in Yahoo R3! MAR test set)
total_num = prediction.shape[0]
index = np.argsort(prediction)
index_inverse = np.argsort(index)
prediction = prediction[index]
prediction[:int(total_num*0.53)] = 0.1
prediction[int(total_num*0.53):int(total_num*0.77)] = 0.3
prediction[int(total_num*0.77):int(total_num*0.91)] = 0.5
prediction[int(total_num*0.91):int(total_num*0.98)] = 0.7
prediction[int(total_num*0.98):] = 0.9
ground_truth = prediction[index_inverse]
print(ground_truth[:20])
# Simulated prediction 1 - ONE
# Randomly select n_0.9 0.1, and set 0.1 to 0.9, where n_0.9 denotes the number of the 0.9 in ground_truth
n_0_1 = np.count_nonzero(np.where(ground_truth == 0.1))
n_0_9 = np.count_nonzero(np.where(ground_truth == 0.9))
select = np.random.choice(n_0_1, n_0_9, replace=False)
prediction = ground_truth[index]
prediction[select] = 0.9
one = prediction[index_inverse]
# Simulated prediction 2 - THREE
# Randomly select n_0.9 0.3, and set 0.3 to 0.9, where n_0.9 denotes the number of the 0.9 in ground_truth
n_0_3 = np.count_nonzero(np.where(ground_truth == 0.3))
select = np.random.choice(n_0_3, n_0_9, replace=False)+int(total_num*0.53)
prediction = ground_truth[index]
prediction[select] = 0.9
three = prediction[index_inverse]
# Simulated prediction 3 - FIVE
# Randomly select n_0.9 0.5, and set 0.5 to 0.9, where n_0.9 denotes the number of the 0.9 in ground_truth
n_0_5 = np.count_nonzero(np.where(ground_truth == 0.5))
select = np.random.choice(n_0_5, n_0_9, replace=False)+int(total_num*0.77)
prediction = ground_truth[index]
prediction[select] = 0.9
five = prediction[index_inverse]
# Simulated prediction 4 - SKEW
# r ~ N(\mu=r, \sigma=(1-r)/2), and then r is clipped to [0.1~0.9]
prediction = | np.copy(ground_truth) | numpy.copy |
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=np.inf)
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import distance
fig = plt.figure(figsize=(6,8))
ax1 = fig.add_subplot(211,projection='3d')
ax2 = fig.add_subplot(212)
ax2.set_title("learing curve")
ax2.set_xlim(0,30)
ax2.set_ylim(0,1)
x_min = -5
x_max = 5
y_min = -5
y_max = 5
z_min = 0
z_max = 15
X = np.arange(x_min, x_max, 0.25)
Y = | np.arange(y_min, y_max, 0.25) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
id
import itertools
import datetime
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
import statistics
import pysolar
#-----------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Codigo para la relación y análisis de los indices de cielo despejado (Kt*) y el indice de claridad (Kt).'
'Se incluye tambien un análisis de su tasa de cambio para evaluar su variabilidad y junto con la fracción'
'de cobertura de nubes. Se hace sobre los datos historicos porque se quiere analizar la variabilidad.'
Theoric_Model = 'GIS' ##---> 'GIS' para que coja el de Gis o 'Piranometro' para que tome el de el piranometro
##############################################################################
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
##############################################################################
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################################
##-----------------------------------LECTURA DE LOS DATOS DE PIRANOMETRO-------------------------------##
##########################################################################################################
df_pira_TS = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018_2019.txt', parse_dates=[2])
df_pira_TS = df_pira_TS.set_index(["fecha_hora"])
df_pira_TS.index = df_pira_TS.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_TS.index = df_pira_TS.index.tz_localize(None)
df_pira_TS = df_pira_TS[df_pira_TS['radiacion'] >=0]
df_pira_CI = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018_2019.txt', parse_dates=[2])
df_pira_CI = df_pira_CI.set_index(["fecha_hora"])
df_pira_CI.index = df_pira_CI.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_CI.index = df_pira_CI.index.tz_localize(None)
df_pira_CI = df_pira_CI[df_pira_CI['radiacion'] >=0]
df_pira_JV = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018_2019.txt', parse_dates=[2])
df_pira_JV = df_pira_JV.set_index(["fecha_hora"])
df_pira_JV.index = df_pira_JV.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_JV.index = df_pira_JV.index.tz_localize(None)
df_pira_JV = df_pira_JV[df_pira_JV['radiacion'] >=0]
## ------------------------------------DATOS HORARIOS DE RADIACON----------------------------- ##
df_pira_JV_h = df_pira_JV.groupby(pd.Grouper(freq="H")).mean()
df_pira_CI_h = df_pira_CI.groupby(pd.Grouper(freq="H")).mean()
df_pira_TS_h = df_pira_TS.groupby(pd.Grouper(freq="H")).mean()
df_pira_JV_h = df_pira_JV_h.between_time('06:00', '17:59')
df_pira_CI_h = df_pira_CI_h.between_time('06:00', '17:59')
df_pira_TS_h = df_pira_TS_h.between_time('06:00', '17:59')
##############################################################################
## ----------------LECTURA DE LOS DATOS DE RADIACION TEORICA--------------- ##
##############################################################################
import datetime
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if Theoric_Model == 'Piranometro':
df_Theoric = pd.read_csv("/home/nacorreasa/Maestria/Datos_Tesis/RadiacionTeorica_DataFrames/df_PIR.csv", sep=',', index_col =0)
df_Theoric.index = pd.to_datetime(df_Theoric.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
elif Theoric_Model == 'GIS':
df_Theoric = pd.read_csv("/home/nacorreasa/Maestria/Datos_Tesis/RadiacionTeorica_DataFrames/df_GIS.csv", sep=',', index_col =0)
df_Theoric.index = pd.to_datetime(df_Theoric.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
fechas_new = [df_Theoric.index[i].replace(year = 2018) for i in range(len(df_Theoric.index))]
df_Theoric.index = fechas_new
df_Theoric.index = pd.to_datetime(df_Theoric.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_Fechas = pd.DataFrame(fechas, index = fechas, columns = ['fechas'])
df_Theoric = pd.concat([df_Fechas,df_Theoric ], axis=1)
df_Theoric = df_Theoric.drop(['fechas' ], axis=1)
##-------------------------------------DUPLICANDO PARA Q SEAN DOS AÑOS-----------------------------##
df_Theoric_2 = df_Theoric.copy()
df_Theoric_2.index = [df_Theoric_2.index[i].replace(year=2019) for i in range(len(df_Theoric_2.index))]
df_result = pd.concat([df_Theoric, df_Theoric_2])
del df_Theoric_2, df_Theoric
df_Theoric = df_result
##############################################################################
## ---------------------------INDICE DE CLARIDAD--------------------------- ##
##############################################################################
# def Radiacion_Tope_Atmosfera(Latitud):
# 'Función para la estimación de la radiación al tope de la atmosfera a partir del lo explicado en el libro de Iqbal(1983), a ventana diaria'
# '(Qd) y horaria (Io), en el que el parametro de entrada es la Latitud de la superficie horizontal, en coordenadas feográdicas, como float.'
# 'El tiempo solar está acotado apra el meridiano 75.'
# ##---DECLINACION SOLAR---##
# J = np.arange(1, 366, 1)
# g = 2*m.pi*(J-1)/365
# d = (0.006918 - 0.399912*np.cos(g) + 0.070257*np.sin(g) - 0.006758*np.cos(2*g) + 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g)+ 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g))
#
# ##---LOCAL STANDART TIME---##
# LT = np.arange(0, 24, 1)
#
# ##---ECUACION DEL TIEMPO---##
# B = 2*m.pi*(J-81)/365
# ET = 9.87*np.sin(2*B)-7.53*np.cos(B)-1.5*np.cos(B)
#
# ##---TIEMPO SOLAR---##
# Ls = -75. #Meridiano estándar en grados decimales
# # Ls = -75*m.pi/180. #Meridiano estándar en radianes
# Ll = -75.56359 #Meridiano local en grados decimales
# # Ll = -75.56359 *m.pi/180. #Meridiano local en radianes
# L = (Ls-Ll)/15
# TS = []
# for j in range(len(ET)):
# for i in range(len(LT)):
# TS.append(LT[i]+(ET[j]/60)+L)
#
# ##----ANGULO HORARIO-----##
# w = []
# for i in range(len(TS)):
# w.append(15*(12-TS[i])) #En grados decimales
#
# w = np.array(w)*m.pi/180 #En radianes
#
# ##---EXCENTRICIDAD---##
# Eo = 1+(0.0033*np.cos((2*np.pi*J)/365))
#
# ##---CAMBIO A RESOLUCIÓN HORARIA---#
# d_h = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in list(d)))
# Eo_h = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in Eo))
#
# ##---LATITUD DE UBICACIÓN---#
# Lat = Latitud*m.pi/180. #En radianes
#
# ##---DISTANCIA ENTRE LA TIERRA Y EL SOL---##
# dist = 1 - 0.01672*(np.cos(0.985*(J-4)))
# dist = dist*10E10
# distM = 1.5*10E11 # Verificar unidades
# So = 1367 # W/m2
#
# ##---ANGULOS HORARIOS DE AMANECIDA Y ATARDECIDA---##
# ##---Ángulo horario por día (amanecer)---##
# ho = np.arccos(-1*(np.tan(Lat)*np.tan(d))) # En grados
# ho = ho*180/np.pi
# ##---Ángulo horario por día (atardecer)---##
# hf = -1*ho # En grados
#
# ##---TIEMPOR DE AMANECIDA Y ATARDECIDA---##
# ##---Tiempo en horas de amanecida a partir del ángulo horario---##
# to = 12 - ho/15 # En horas decimales
# to_m = np.mean(to)
#
# time_o = []
# for i in range(len(to)):
# a = (str(datetime.timedelta(seconds = to[i]*3600))[0:7])
# time_o.append(datetime.datetime.strptime(a, '%H:%M:%S').time())
#
# ##---Tiempo en horas de atardecer a partir del ángulo horario---##
# tf = 12 - hf/15 # En horas decimales
# tf_m = np.mean(tf)
#
# time_f = []
# for i in range(len(tf)):
# a = (str(datetime.timedelta(seconds = tf[i]*3600))[0:7])
# time_f.append(datetime.datetime.strptime(a, '%H:%M:%S').time())
#
# ##---INSOLACIÓN PROMEDIO DIARIA AL TOPE DE LA ATMOSFERA---##
# Qd = So/np.pi*((distM/dist)**2)*(ho*np.sin(Lat)*np.sin(d) + np.cos(Lat)*np.cos(d)*np.sin(ho))
#
# ##---RADIACIÓN HORARIA AL TOPE DE LA ATMOSFERA---##
# Io = []
# for i in range(len(d_h)):
# Io.append(So*Eo_h[i]*(np.sin(d_h[i])*np.sin(Lat) + np.cos(d_h[i])*np.cos(Lat)*np.cos(w[i])))
# return Io, Qd
#
#Ioh, Qd = Radiacion_Tope_Atmosfera(6.217)
import pvlib
Io_h = [pvlib.irradiance.get_extra_radiation(fechas[i], solar_constant=1366.1, method='spencer', epoch_year=2019) for i in range(len(fechas))] ## Wm2
df_Kt = pd.DataFrame(Io_h, index=fechas, columns = ['TAR'] )
df_Kt = pd.concat([df_Kt, df_pira_TS_h['radiacion']], axis=1).reindex(df_Kt.index)
df_Kt = pd.concat([df_Kt, df_pira_CI_h['radiacion']], axis=1).reindex(df_Kt.index)
df_Kt = pd.concat([df_Kt, df_pira_JV_h['radiacion']], axis=1).reindex(df_Kt.index)
df_Kt.columns = ['TAR', 'radiacion_975', 'radiacion_350', 'radiacion_348']
df_Kt['Kt_TS'] = df_Kt['radiacion_975']/df_Kt['TAR']
df_Kt['Kt_CI'] = df_Kt['radiacion_350']/df_Kt['TAR']
df_Kt['Kt_JV'] = df_Kt['radiacion_348']/df_Kt['TAR']
#####################################################################################
## ---------------------------INDICE DE CIELO DESPEJADO--------------------------- ##
#####################################################################################
if Theoric_Model == 'GIS':
df_Kt['Kt*_TS'] = df_Kt['radiacion_975']/ df_Theoric['Rad_teo_975']
df_Kt['Kt*_CI'] = df_Kt['radiacion_350']/ df_Theoric['Rad_teo_350']
df_Kt['Kt*_JV'] = df_Kt['radiacion_348']/ df_Theoric['Rad_teo_348']
elif Theoric_Model == 'Piranometro':
df_Kt['Kt*_TS'] = df_Kt['radiacion_975']/ df_Theoric['Io']
df_Kt['Kt*_CI'] = df_Kt['radiacion_350']/ df_Theoric['Io']
df_Kt['Kt*_JV'] = df_Kt['radiacion_348']/ df_Theoric['Io']
# df_Kt = df_Kt[(df_Kt['Kt*_TS']<=1)]
# df_Kt = df_Kt[(df_Kt['Kt*_CI']<=1)]
# df_Kt = df_Kt[(df_Kt['Kt*_JV']<=1)]
"El dataframe creado esta a resolución de una hora"
# ########################################################################################################
# ## ---------------------------APLICANDO UNA ADICIONAL LIMPUEZA A LOS DATOS--------------------------- ##
# ########################################################################################################
#
# # df_Kt = df_Kt[(df_Kt['Kt*_TS']<=1)&(df_Kt['Kt*_CI']<=1)&(df_Kt['Kt*_JV']<=1)&(df_Kt['Kt_TS']<=1)&(df_Kt['Kt_CI']<=1)&(df_Kt['Kt_JV']<=1)]
# # df_Kt = df_Kt[(df_Kt['Kt*_TS']>=0)&(df_Kt['Kt*_CI']>=0)&(df_Kt['Kt*_JV']>=0)&(df_Kt['Kt_TS']>=0)&(df_Kt['Kt_CI']>=0)&(df_Kt['Kt_JV']>=0)]
#
# def normalize_df_column(dfcol):
# x, y = np.nanmin(dfcol.values), np.nanmax(dfcol.values)
# list = (dfcol-[x]).div(y-x)
# dfcol = list
# return dfcol
#
# normalize_df_column(df_Kt['Kt*_TS'])
# normalize_df_column(df_Kt['Kt*_CI'])
# normalize_df_column(df_Kt['Kt*_JV'])
# normalize_df_column(df_Kt['Kt_TS'])
# normalize_df_column(df_Kt['Kt_CI'])
# normalize_df_column(df_Kt['Kt_JV'])
#
######################################################################################
## ---------------------------CICLOS DIURNOS Y MENSUALES--------------------------- ##
######################################################################################
new_idx_CD = np.arange(6, 18, 1)
df_Kt_CD = df_Kt.groupby(by=[df_Kt.index.hour]).mean()
df_Kt_CD = df_Kt_CD.reindex(new_idx_CD)
new_idx_CA = | np.arange(1, 13, 1) | numpy.arange |
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
self.crop_data(blobs, ratio)
# Check bounding box
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~ invalid)[0]
valid_keys = ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
'box_to_gt_ind_map', 'gt_keypoints']
if cfg.MODEL.IDENTITY_TRAINING is True:
valid_keys = valid_keys + ['instance_id', 'global_instance_id', 'gt_overlaps_id']
if len(valid_inds) < len(boxes):
for key in valid_keys:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn
return blobs
def crop_data(self, blobs, ratio):
data_height, data_width = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1: # width << height, crop height
size_crop = math.ceil(data_width / ratio) # size after crop
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region - size_crop) < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
else:
# CHECK: rethinking the mechnism for the case box_region > size_crop
# Now, the crop is biased on the lower part of box_region caused by
# // 2 for y_s_add
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else \
npr.choice(range(min_y, min_y + y_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, y_s:(y_s + size_crop), :,]
# Update im_info
blobs['im_info'][0] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else: # width >> height, crop width
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region - size_crop) < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else \
npr.choice(range(min_x, min_x + x_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, :, x_s:(x_s + size_crop)]
# Update im_info
blobs['im_info'][1] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
if cfg.TRAIN.ASPECT_GROUPING:
# Given the ratio_list, we want to make the ratio same
# for each minibatch on each GPU.
self.ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
def __iter__(self):
if cfg.TRAIN.ASPECT_GROUPING:
# indices for aspect grouping awared permutation
n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
indices = | np.arange(round_num_data) | numpy.arange |
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
import matplotlib as mpl
import meep
import meep_ext
import pinboard
job = pinboard.pinboard()
nm = 1e-9
um = 1e-6
### geometry
radius = 75*nm
gold = meep_ext.material.Au()
# gold = meep.Medium(index=3.5)
### source
wavelength = 550*nm
fcen = 1/wavelength
src_time = meep.GaussianSource(frequency=1.3/um, fwidth=4.0/um)
source = lambda sim: meep_ext.rhc_polarized_plane_wave(sim, src_time)
### monitor info
pml_monitor_gap = 50*nm
particle_monitor_gap = 50*nm
norm_file_ext = 'norm_{}'
### grid
resolution = 1/(10*nm)
pml = meep.PML(100*nm)
@job.cache
def norm_sim(monitor_size, unique_id):
"""perform normalization simulation with a given box size"""
monitor_size = np.asarray(monitor_size)
cell_size = monitor_size + 2*pml_monitor_gap + 2*pml.thickness
cell = meep.Vector3(*cell_size)
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, 0, 1, [0,0,0], [2*radius, 2*radius, 0])
flux_box_inc = meep_ext.add_flux_box(norm, fcen, 0, 1, [0,0,0], monitor_size)
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, meep.Ex,
pt=meep.Vector3(0,0,monitor_size[2]/2), decay_by=1e-3))
norm.save_flux(norm_file_ext.format(unique_id), flux_box_inc)
return {'area': (2*radius)**2, 'norm': np.asarray(meep.get_fluxes(flux_inc))}
@job.cache
def sim(separation, monitor_size, unique_id):
"""perform scattering simulation"""
monitor_size = | np.asarray(monitor_size) | numpy.asarray |
import scipy
import numpy as np
def GSPCA( data, labels, nComp, param ):
#GSPCA calculates generalised advanced supervised PCA with respect to [1].
# [ V, D ] = GSPCA( data, labels, nComp, kind ) return n-by-nComp
# matrix V with PCs as columns and diagonal nComp-by-nComp
# matrix D with eigenvalues corresponding to PCs.
# data is n-by-m matrix of data (covariance matrix is unacceptable). Data
# MUST be centred before.
# labels is numeric vector with n elements. The same labels corresponds
# to points of the same class. Number of unique values in labels is
# L. Classes are numerated in the order of increasing value of labels.
# nComp is number of required component.
# param is parameter of method:
# scalar numeric value is parameter of intraclass attraction: the
# functional to maximise is mean squared distances between points
# of different classes minus param multiplied to sum of mean
# squared distances between points of each class
# numeric vector with L elements is vector of attractions in each
# class: the functional to maximise is mean squared distances
# between points of different classes minus sum of sum of mean
# squared distances between points of each class multiplied by
# corresponding element of vector param.
# numeric matrix L-by-L is matrix of repulsion coefficients. The
# elements upper than main diagonal are coefficients of repulsion
# between corresponding clusses. The diagonal elements are
# attraction coefficients for corresponding classes.
#
#References
#1. Mirkes, <NAME>., <NAME>., Zinovyev, <NAME>.,
# Supervised PCA, Available online in https://github.com/Mirkes/SupervisedPCA/wiki
#2. Gorban, <NAME>., Zinovyev, <NAME>. “Principal Graphs and Manifolds”,
# Chapter 2 in: Handbook of Research on Machine Learning Applications and Trends:
# Algorithms, Methods, and Techniques, <NAME> et al. (eds),
# IGI Global, Hershey, PA, USA, 2009, pp. 28-59.
#3. Zinovyev, <NAME>. "Visualisation of multidimensional data" Krasnoyarsk: KGTU,
# p. 180 (2000) (In Russian).
#4. Koren, Yehuda, and <NAME>. "Robust linear dimensionality
# reduction." Visualization and Computer Graphics, IEEE Transactions on
# 10.4 (2004): 459-470.
#
#Licensed from CC0 1.0 Universal - Author <NAME> https://github.com/Mirkes/SupervisedPCA/blob/master/
#Get sizes of data
n, m = data.shape
data = data.astype(float)
labels = labels.astype(float)
# List of classes
labs = np.unique(labels)
# Number of classes
L = len(labs)
# Check the type of nComp
if nComp > m or nComp < 1:
raise ValueError('Incorrect value of nComp: it must be positive integer equal to or less than m')
# Form matrix of coefficients
if type(param) in [int,float]:
coef = np.ones((L,L))
coef = coef + np.diag((param - 1) * np.diag(coef))
elif len(param.shape) == 1:
if len(param) != L:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
coef = np.ones((L,L))
coef = coef + np.diag(np.diag(param - 1))
elif len(param.shape) == 2:
[a, b] = param.shape
if a != L or b != L:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
else:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
# Symmetrize coef matrix
coef = coef - np.tril(coef, -1) + np.triu(coef, 1).T
# Calculate diagonal terms of Laplacian matrix without devision by
# number of elements in class
diagV = np.diag(coef)
diagC = np.sum(coef,axis=0) - diagV
# Calculate transformed covariance matrix
M = np.zeros((m,m))
means = np.zeros((L, m))
# Loop to form the diagonal terms and calculate means
for c in range(L):
# Get index of class
ind = labels == labs[c]
# Calculate mean
means[c, :] = np.mean(data[ind, :],axis=0)
# Calculate coefficient for Identity term
nc = np.sum(ind,axis=0)
coefD = diagC[c] / nc - 2 * diagV[c] / (nc - 1)
# Add the diagonal term
M = (M + 2 * diagV[c] * nc / (nc - 1) * (means[[c], :].T @ means[[c], :])
+ coefD * data[ind, :].T @ data[ind, :])
# Loop for off diagonal parts
for c in range(L - 1):
for cc in range(c + 1, L):
tmp = means[[c], :].T @ means[[cc], :]
M = M - coef[c, cc] * (tmp + tmp.T)
#Request calculations from eigs
if nComp<m-1:
D, V = scipy.sparse.linalg.eigs(M, nComp)
else:
D, V = scipy.linalg.eig(M)
ind = | np.argsort(D) | numpy.argsort |
import os
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from model.network import AdaptiveInstanceNormalization
from keras.layers import Input, Dense, Flatten
from keras.layers import Lambda, Conv2D
from keras.models import Model
from keras.optimizers import Adam
from keras.constraints import UnitNorm
from keras.regularizers import l2
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from random_eraser import apply_random_eraser_and_mask
from random_eraser import get_random_eraser_and_mask
import hdf5storage
from tqdm import tqdm
from sklearn.metrics import auc
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 10
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
K.set_learning_phase(False)
tf.set_random_seed(1234)
# Directories of pretrained models/data
model_dir = 'trained_models/lord/model/'
data_loc = 'trained_models/lord/data/celeba_test.npz'
train_data_loc = 'trained_models/lord/data/celeba_vgg.npz'
cbk_loc = 'trained_codebooks/one_sample_fixed.mat'
train_cbk_loc = 'trained_codebooks/train_one_sample_fixed.mat'
# Load all data
all_data = np.load(train_data_loc)
x_d_all = np.copy(all_data['imgs'] / 255.)
y_d_all = np.copy(all_data['classes'])
# Load test data
data = np.load(data_loc)
x_d_test = np.copy(data['imgs'] / 255.)
y_d_test = np.copy(data['classes'])
# Rearrange y_test as ordinal classes (since absolute value of class doesn't matter)
_, y_d_test_ordinal = np.unique(y_d_test, return_inverse=True)
# Filter test data from training data
is_train = np.logical_not(np.isin(y_d_all, y_d_test))
x_d_train = np.copy(x_d_all[is_train])
y_d_train = np.copy(y_d_all[is_train])
# Free up memory
del all_data, x_d_all, y_d_all
# Rearrange y_train as ordinal classes (since absolute value of class doesn't matter)
_, y_d_train_ordinal = np.unique(y_d_train, return_inverse=True)
# Load model by parts
content_encoder = load_model(os.path.join(model_dir, 'content_encoder.h5py'))
class_encoder = load_model(os.path.join(model_dir, 'class_encoder.h5py'))
class_modulation = load_model(os.path.join(model_dir, 'class_modulation.h5py'))
generator = load_model(os.path.join(model_dir, 'generator.h5py'), custom_objects={
'AdaptiveInstanceNormalization': AdaptiveInstanceNormalization})
# Predict content
# Train
train_content = content_encoder.predict(x_d_train)
# Test
test_content = content_encoder.predict(x_d_test)
# Load modulation codebooks
contents = hdf5storage.loadmat(train_cbk_loc)
train_person_mod_codebook = contents['frozen_class_mod']
train_person_codebook = contents['frozen_class']
contents = hdf5storage.loadmat(cbk_loc)
person_mod_codebook = contents['frozen_class_mod']
person_codebook = contents['frozen_class']
# Construct training and validation sets
np.random.seed(2020) # Current year
num_train_persons = 2000
num_val_persons = 100 # Drawn from test persons
train_persons = np.random.choice(np.max(y_d_train_ordinal)+1, size=num_train_persons, replace=False)
val_persons = np.random.choice(np.max(y_d_test_ordinal)+1, size=num_val_persons, replace=False)
x_train = np.copy(x_d_train[np.isin(y_d_train_ordinal, train_persons)])
x_val = np.copy(x_d_test[np.isin(y_d_test_ordinal, val_persons)])
y_train = np.copy(y_d_train_ordinal[np.isin(y_d_train_ordinal, train_persons)])
y_val = np.copy(y_d_test_ordinal[np.isin(y_d_test_ordinal, val_persons)])
c_train = np.copy(train_content[np.isin(y_d_train_ordinal, train_persons)])
c_val = np.copy(test_content[np.isin(y_d_test_ordinal, val_persons)])
# Once we pick validation persons, construct their clean reconstructions
x_match_val = generator.predict([c_val, person_mod_codebook[y_val]])
# Free up memory
del x_d_train, x_d_test, train_content, test_content
# Training parameters
batch_size = 256
mining_steps = 2
num_steps = 20000
alpha = 1e-3 # Weight decay coefficient
best_area_val = 0.
best_val_loss = 1e9
# Learning algorithm
trainer = 'adam'
adv_steps = 10
adv_lr = 16. / 255 # Pixels at once
symmetrical_adv = True # Train symmetrically
# Architecture
latent_dim = 128
# Universal labels (for a single batch)
train_pair_labels = np.concatenate((np.ones(batch_size//2), np.zeros(batch_size//2)))[:, None]
val_pairs = len(x_val)
val_pair_labels = np.concatenate((np.ones(val_pairs), np.zeros(val_pairs)))[:, None]
# Input tensors
input_img = Input(shape=(64, 64, 3))
# Dynamic architecture
# Load a VGG16
core_model = VGG16(input_shape=(64, 64, 3), include_top=False)
encoded = core_model(input_img)
# Feature layer
encoded = Flatten()(encoded)
encoded = Dense(latent_dim, activation='linear', kernel_constraint=UnitNorm())(encoded)
# Create shared model
shared_model = Model(input_img, encoded)
# Two input tensors
img_real = Input(shape=(64, 64, 3))
img_gen = Input(shape=(64, 64, 3))
# Get features
features_real = shared_model(img_real)
features_gen = shared_model(img_gen)
# Compute distance
sim_score = Lambda(euclidean_distance)([features_real, features_gen])
# Siamese model
model = Model([img_real, img_gen], sim_score)
# Optimizer
optimizer = Adam(lr=0.001, amsgrad=True)
# Compile
model.compile(optimizer, loss=contrastive_loss, metrics=['accuracy'])
# Apply L2 weight regularization post-factum
for layer in core_model.layers:
if isinstance(layer, Conv2D) or isinstance(layer, Dense):
layer.add_loss(lambda: l2(alpha)(layer.kernel))
if hasattr(layer, 'bias_regularizer') and layer.use_bias:
layer.add_loss(lambda: l2(alpha)(layer.bias))
# Instantiate cutout
eraser = get_random_eraser_and_mask(p=0.5, s_l=0.02, s_h=0.2, r_1=0.5, r_2=2.,
v_l=0., v_h=1., pixel_level=True)
# Instantiate augmentation generator
image_generator = ImageDataGenerator(width_shift_range=5,
height_shift_range=5,
horizontal_flip=True)
# Setup a graph for patch adversarial attacks
x_adv = tf.placeholder(dtype=tf.float32, shape=(None, 64, 64, 3))
x_adv_pair = tf.placeholder(dtype=tf.float32, shape=(None, 64, 64, 3))
# Get features of both
adv_real_features = shared_model(x_adv)
adv_pair_features = shared_model(x_adv_pair)
# Loss function and its gradient
adv_loss = tf.norm(adv_real_features - adv_pair_features, axis=-1)
grad, = tf.gradients(adv_loss, x_adv)
# Where to save weights
result_dir = 'trained_models/proposed'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
weight_name = result_dir + '/steps%d_lr%.1f' % (adv_steps, adv_lr*255.)
# Granularity of AUC
num_points = 100
tpr_val = np.zeros((num_steps, num_points))
fpr_val = np.zeros((num_steps, num_points))
area_val = np.zeros((num_steps,))
# Training/validation logs
train_loss_log = np.zeros((num_steps,))
val_loss_log = np.zeros((num_steps,))
# Train for each batch
for step_idx in tqdm(range(num_steps)):
# Draw a half batch of samples
random_idx = np.random.choice(len(x_train), size=batch_size//2, replace=False)
# Augment and generate them with their correct class codebook
x_match_real_half_batch = image_generator.flow(x_train[random_idx],
shuffle=False, batch_size=batch_size//2)[0]
# Random erasure
x_match_real_half_batch, x_real_mask_half_batch = apply_random_eraser_and_mask(eraser, x_match_real_half_batch)
# Get content code and generate images with correct class codes
real_content = content_encoder.predict(x_match_real_half_batch)
x_match_gen_half_batch = generator.predict([real_content, train_person_mod_codebook[y_train[random_idx]]])
# Adversarial attack on positive pair
if symmetrical_adv:
if adv_steps > 0:
# Find indices where patch augmentation is applied
patch_attack_idx = np.where(np.sum(x_real_mask_half_batch, axis=(1, 2, 3)))[0]
# Check if at least one such sample exists
if len(patch_attack_idx) > 0:
# Compute feature differences before adversarial attack - enable if manual verification is desired, but will slow down processing
# diff_before = model.predict([x_match_real_half_batch[patch_attack_idx],
# x_match_gen_half_batch[patch_attack_idx]])
# Further minimize distance by adversarial attacks
x_orig = np.copy(x_match_real_half_batch[patch_attack_idx])
x_mask_aug = x_real_mask_half_batch[patch_attack_idx]
for internal_step_idx in range(adv_steps):
# Get gradients and outputs
grad_np = session.run(grad, feed_dict={x_adv: x_orig,
x_adv_pair: x_match_gen_half_batch[patch_attack_idx]})
# Normalize, apply and clip
x_orig = np.clip(x_orig + adv_lr * np.sign(grad_np) * x_mask_aug, 0., 1.)
# Compute feature differences after adversarial attack
# diff_after = model.predict([x_orig, x_match_gen_half_batch[patch_attack_idx]])
# Replace samples with adversarials
x_match_real_half_batch[patch_attack_idx] = x_orig
# Mine for hard candidates that use the same class vectors
fake_person_idx = np.asarray([np.random.choice(np.where(np.logical_not(np.isin(y_train, y_train[random_idx[idx]])))[0],
size=mining_steps, replace=False) for idx in range(batch_size//2)]).flatten()
# Generate fake images with the target's class codes
fake_input_candidates = x_train[fake_person_idx]
mod_input = train_person_mod_codebook[np.mod(np.repeat(y_train[random_idx], mining_steps, axis=0),
len(train_person_mod_codebook)).astype(np.int)]
# Augment all negative pairs and generate them
fake_input_candidates_aug = image_generator.flow(fake_input_candidates,
shuffle=False, batch_size=batch_size//2*mining_steps)[0]
# Random erasure - save the mask for potential attacks
fake_input_candidates_aug, fake_erasure_mask = apply_random_eraser_and_mask(eraser, fake_input_candidates_aug)
# Get content code and generate images with swapped class codes
fake_content = content_encoder.predict(fake_input_candidates_aug)
fake_output_candidates = generator.predict([fake_content, mod_input])
# Get their similarity on input-output pairs
fake_sim_candidates = model.predict([fake_input_candidates_aug, fake_output_candidates])
# Reshape
fake_sim_candidates = np.reshape(fake_sim_candidates, (-1, mining_steps))
fake_output_candidates = np.reshape(fake_output_candidates, (batch_size//2, mining_steps, 64, 64, 3))
fake_input_candidates_aug = np.reshape(fake_input_candidates_aug, (batch_size//2, mining_steps, 64, 64, 3))
fake_masks = np.reshape(fake_erasure_mask, (batch_size//2, mining_steps, 64, 64, 3))
# Pick closest pairs
fake_idx = np.argmin(fake_sim_candidates, axis=-1)
# Assign the other half of batches
x_fake_real_half_batch = fake_input_candidates_aug[np.arange(batch_size//2), fake_idx]
x_fake_mask_half_batch = fake_masks[np.arange(batch_size//2), fake_idx]
x_fake_gen_half_batch = fake_output_candidates[np.arange(batch_size//2), fake_idx]
if adv_steps > 0:
# Find indices where patch augmentation is applied
patch_attack_idx = np.where( | np.sum(x_fake_mask_half_batch, axis=(1, 2, 3)) | numpy.sum |
"""Fama-French and Momentum Research Factors
- CRSP, Compustat, Wharton Research Data Services
<NAME>
License: MIT
"""
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import time
from settings import settings
import os
from finds.database import SQL, Redis
from finds.busday import BusDay
from finds.structured import PSTAT, CRSP, Benchmarks, Signals
from finds.backtesting import BackTest
from finds.solve import fractiles
LAST_DATE = settings['crsp_date']
sql = SQL(**settings['sql'])
user = SQL(**settings['user'])
rdb = Redis(**settings['redis'])
bd = BusDay(sql)
pstat = PSTAT(sql, bd)
crsp = CRSP(sql, bd, rdb)
bench = Benchmarks(sql, bd)
signals = Signals(user)
backtest = BackTest(user, bench, 'RF', LAST_DATE)
logdir = os.path.join(settings['images'], 'ff') # None
# Load items from Compustat Annual
# Construct HML as shareholders equity plus investment tax credits, less
# preferred stock divided by December market cap.
# Require 6 month reporting lag and at least two years history in Compustat
label = 'hml'
lag = 6 # number of months to lag fundamental data
df = pstat.get_linked( # retrieve required fields from compustat
dataset = 'annual', date_field = 'datadate',
fields = ['seq', 'pstk', 'pstkrv', 'pstkl', 'txditc'],
where = (f"indfmt = 'INDL' AND datafmt = 'STD' AND curcd = 'USD' "
f" AND popsrc = 'D' AND consol = 'C' AND seq > 0 "))
# subtract preferred stock, add back deferred investment tax credit
df[label] = np.where(df['pstkrv'].isna(), df['pstkl'], df['pstkrv'])
df[label] = np.where(df[label].isna(), df['pstk'], df[label])
df[label] = np.where(df[label].isna(), 0, df[label])
df[label] = df['seq'] + df['txditc'].fillna(0) - df[label]
df.dropna(subset = [label], inplace=True)
df = df[df[label] > 0][['permno', 'gvkey','datadate',label]]
# years in Compustat
df = df.sort_values(by=['gvkey','datadate'])
df['count'] = df.groupby(['gvkey']).cumcount()
# construct b/m ratio
df['rebaldate'] = 0
for datadate in sorted(df['datadate'].unique()):
f = df['datadate'].eq(datadate)
df.loc[f, 'rebaldate'] = crsp.bd.endmo(datadate, abs(lag)) # 6 month lag
df.loc[f, 'cap'] = crsp.get_cap(crsp.bd.endyr(datadate))\
.reindex(df.loc[f, 'permno']).values # Dec mktcap
print(datadate, sum(f))
df[label] /= df['cap']
df = df[df[label].gt(0) & df['count'].gt(1)] # 2+ years in Compustat
signals.write(df, label)
## Fama French portfolio bi-sort
def famafrench_sorts(stocks, label, signals, rebalbeg, rebalend,
window=0, pctiles=[30, 70], leverage=1, months=[],
minobs=100, minprc=0, mincap=0, maxdecile=10):
"""Generate monthly time series of holdings by two-way sort procedure
Parameters
----------
stocks : Structured object
Stock returns and price data
label : string
Signal name to retrieve either from Signal sql table or {data} dataframe
signals : Signals, or chunk_signal object
Call to extract cross section of values for the signal
rebalbeg : int
First rebalance date (YYYYMMDD)
rebalend : int
Last holding date (YYYYMMDD)
pctiles : tuple of int, default is [30, 70]
Percentile breakpoints to sort into high, medium and low buckets
window: int, optional (default 0)
Number of months to look back for signal values (non-inclusive day),
0 (default) is exact date
months: list of int, optional
Month/s (e.g. 6 = June) to retrieve universe and market cap,
[] (default) means every month
maxdecile: int, default is 10
Include largest stocks from decile 1 through decile (10 is smallest)
minobs: int, optional
Minimum required universe size with signal values
leverage: numerical, default is 1.0
Leverage multiplier, if any
Notes
-----
Independent sort by median (NYSE) mkt cap and 30/70 (NYSE) HML percentiles
Subportfolios of the intersections are value-weighted;
spread portfolios are equal-weighted subportfolios
Portfolio are resorted every June, and other months' holdings are adjusted
by monthly realized retx
"""
rebaldates = stocks.bd.date_range(rebalbeg, rebalend, 'endmo')
holdings = {label: dict(), 'smb': dict()} # to return two sets of holdings
sizes = {h : dict() for h in ['HB','HS','MB','MS','LB','LS']}
for rebaldate in rebaldates: #[:-1]
# check if this is a rebalance month
if not months or (rebaldate//100)%100 in months or not holdings[label]:
# rebalance: get this month's universe of stocks with valid data
df = stocks.get_universe(rebaldate)
# get signal values within lagged window
start = (stocks.bd.endmo(rebaldate, months=-abs(window)) if window
else stocks.bd.offset(rebaldate, offsets=-1))
signal = signals(label=label, date=rebaldate, start=start)
df[label] = signal[label].reindex(df.index)
df = df[df['prc'].abs().gt(minprc) &
df['cap'].gt(mincap) &
df['decile'].le(maxdecile)].dropna()
if (len(df) < minobs): # skip if insufficient observations
continue
# split signal into desired fractiles, and assign to subportfolios
df['fractile'] = fractiles(df[label],
pctiles=pctiles,
keys=df[label][df['nyse']],
ascending=False)
subs = {'HB' : (df['fractile'] == 1) & (df['decile'] <= 5),
'MB' : (df['fractile'] == 2) & (df['decile'] <= 5),
'LB' : (df['fractile'] == 3) & (df['decile'] <= 5),
'HS' : (df['fractile'] == 1) & (df['decile'] > 5),
'MS' : (df['fractile'] == 2) & (df['decile'] > 5),
'LS' : (df['fractile'] == 3) & (df['decile'] > 5)}
weights = {label: dict(), 'smb': dict()}
for subname, weight in zip(['HB','HS','LB','LS'],
[0.5, 0.5, -0.5, -0.5]):
cap = df.loc[subs[subname], 'cap']
weights[label][subname] = leverage * weight * cap / cap.sum()
sizes[subname][rebaldate] = sum(subs[subname])
for subname, weight in zip(['HB','HS','MB','MS','LB','LS'],
[-0.5, 0.5, -0.5, 0.5, -0.5, 0.5]):
cap = df.loc[subs[subname], 'cap']
weights['smb'][subname] = leverage * weight * cap / cap.sum()
sizes[subname][rebaldate] = sum(subs[subname])
#print("(famafrench_sorts)", rebaldate, len(df))
else: # else not a rebalance month, so simply adjust holdings by retx
retx = 1 + stocks.get_ret(stocks.bd.begmo(rebaldate),
rebaldate, field='retx')['retx']
for port, subports in weights.items():
for subport, old in subports.items():
new = old * retx.reindex(old.index, fill_value=1)
weights[port][subport] = new / (abs(np.sum(new))
* len(subports) / 2)
# combine this month's subportfolios
for h in holdings:
holdings[h][rebaldate] = pd.concat(list(weights[h].values()))
return {'holdings': holdings, 'sizes': sizes}
label = 'hml'
rebalend = LAST_DATE
rebalbeg = 19640601
portfolios = famafrench_sorts(crsp, label, signals, rebalbeg, rebalend,
window=12, months=[6])
# Helper for plotting to compare portfolio returns
from finds.display import plot_date, plot_scatter
def plot_ff(y, label, corr=0, num=1, logdir=None):
"""Helper to plot similarity of portfolio and benchmark returns"""
fig, (ax1, ax2) = plt.subplots(2, 1, num=num, clear=True, figsize=(10,12))
plot_date(y, ax=ax1, title=" vs ".join(y.columns))
plot_scatter(y.iloc[:,0], y.iloc[:,1], ax=ax2)
ax2.set_title(f"corr={corr:.4f}", fontsize=8)
plt.tight_layout(pad=3)
if logdir is not None:
plt.savefig(os.path.join(logdir, label + '.jpg'))
with open(os.path.join(logdir, 'index.html'),
'at' if num > 1 else 'wt') as f:
f.write(f"<h2>Correlation of {label} vs {benchname}"
f" ({y.index[0]} - {y.index[-1]}):"
f" corr={corr:.4f}</h2>\n")
f.write(f"<img src='{label}.jpg'><hr><p>\n")
## Construct HML portfolio returns and compare to benchmark
label, benchname = 'hml', 'HML(mo)'
holdings = portfolios['holdings'][label]
result = backtest(crsp, holdings, label)
y = backtest.fit([benchname], 19700101, LAST_DATE)
plot_ff(y, label, corr=np.corrcoef(backtest.excess, rowvar=False)[0,1],
num=1, logdir=logdir)
## Compare SMB
label, benchname = 'smb', 'SMB(mo)'
holdings = portfolios['holdings'][label]
result = backtest(crsp, holdings, label)
y = backtest.fit([benchname], 19700101, LAST_DATE)
plot_ff(y, label, corr=np.corrcoef(backtest.excess, rowvar=False)[0,1],
num=2, logdir=logdir)
## Construct Mom
# Load monthly universe and stock returns from CRSP.
# Signal is stocks' total return from 12 months ago, skipping most recent month
# Construct 2-way portfolio sorts, and backtest returns
label, benchname, past, leverage = 'mom', 'Mom(mo)', (2,12), 1
rebalbeg, rebalend = 19260101, LAST_DATE
df = [] # collect each month's momentum signal values
for rebaldate in bd.date_range(rebalbeg, rebalend, 'endmo'):
beg = bd.endmo(rebaldate, -past[1]) # require price at this date
start = bd.offset(beg, 1) # start date, inclusive, of signal
end = bd.endmo(rebaldate, 1-past[0]) # end date of signal
p = [crsp.get_universe(rebaldate), # retrieve prices and construct signal
crsp.get_ret(start, end)['ret'].rename(label),
crsp.get_section('monthly', ['prc'], 'date', beg)['prc'].rename('beg'),
crsp.get_section('monthly', ['prc'], 'date', end)['prc'].rename('end')]
q = pd.concat(p, axis=1, join='inner').reset_index().dropna()
q['rebaldate'] = rebaldate
df.append(q[['permno', 'rebaldate', label]])
print(rebaldate, len(df), len(q))
df = pd.concat(df)
signals.write(df, label, overwrite=True)
portfolios = famafrench_sorts(crsp, label, signals, rebalbeg, rebalend,
window=0, months=[], leverage=leverage)
holdings = portfolios['holdings'][label]
result = backtest(crsp, holdings, label)
y = backtest.fit([benchname])
plot_ff(y, label, corr= | np.corrcoef(backtest.excess, rowvar=False) | numpy.corrcoef |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import warnings
from math import ceil
from math import cos
from math import sin
from math import tan
from math import pi
from warnings import warn
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Created on March 25, 2013
@author: geoffroy
"""
class HighSymmKpath:
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on <NAME>., & <NAME>. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
It should be used with primitive structures that
comply with the definition from the paper.
The symmetry is determined by spglib through the
SpacegroupAnalyzer class. The analyzer can be used to
produce the correct primitive structure (method
get_primitive_standard_structure(international_monoclinic=False)).
A warning will signal possible compatibility problems
with the given structure.
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to compare the input
structure with the one expected as primitive standard.
A warning will be issued if the lattices don't match.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-8):
self._structure = structure
self._sym = SpacegroupAnalyzer(structure, symprec=symprec,
angle_tolerance=angle_tolerance)
self._prim = self._sym \
.get_primitive_standard_structure(international_monoclinic=False)
self._conv = self._sym.get_conventional_standard_structure(international_monoclinic=False)
self._prim_rec = self._prim.lattice.reciprocal_lattice
self._kpath = None
# Note: this warning will be issued for space groups 38-41, since the primitive cell must be
# reformatted to match Setyawan/Curtarolo convention in order to work with the current k-path
# generation scheme.
if not np.allclose(self._structure.lattice.matrix, self._prim.lattice.matrix, atol=atol):
warnings.warn("The input structure does not match the expected standard primitive! "
"The path can be incorrect. Use at your own risk.")
lattice_type = self._sym.get_lattice_type()
spg_symbol = self._sym.get_space_group_symbol()
if lattice_type == "cubic":
if "P" in spg_symbol:
self._kpath = self.cubic()
elif "F" in spg_symbol:
self._kpath = self.fcc()
elif "I" in spg_symbol:
self._kpath = self.bcc()
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "tetragonal":
if "P" in spg_symbol:
self._kpath = self.tet()
elif "I" in spg_symbol:
a = self._conv.lattice.abc[0]
c = self._conv.lattice.abc[2]
if c < a:
self._kpath = self.bctet1(c, a)
else:
self._kpath = self.bctet2(c, a)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "orthorhombic":
a = self._conv.lattice.abc[0]
b = self._conv.lattice.abc[1]
c = self._conv.lattice.abc[2]
if "P" in spg_symbol:
self._kpath = self.orc()
elif "F" in spg_symbol:
if 1 / a ** 2 > 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf1(a, b, c)
elif 1 / a ** 2 < 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf2(a, b, c)
else:
self._kpath = self.orcf3(a, b, c)
elif "I" in spg_symbol:
self._kpath = self.orci(a, b, c)
elif "C" in spg_symbol or "A" in spg_symbol:
self._kpath = self.orcc(a, b, c)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "hexagonal":
self._kpath = self.hex()
elif lattice_type == "rhombohedral":
alpha = self._prim.lattice.lengths_and_angles[1][0]
if alpha < 90:
self._kpath = self.rhl1(alpha * pi / 180)
else:
self._kpath = self.rhl2(alpha * pi / 180)
elif lattice_type == "monoclinic":
a, b, c = self._conv.lattice.abc
alpha = self._conv.lattice.lengths_and_angles[1][0]
# beta = self._conv.lattice.lengths_and_angles[1][1]
if "P" in spg_symbol:
self._kpath = self.mcl(b, c, alpha * pi / 180)
elif "C" in spg_symbol:
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kgamma > 90:
self._kpath = self.mclc1(a, b, c, alpha * pi / 180)
if kgamma == 90:
self._kpath = self.mclc2(a, b, c, alpha * pi / 180)
if kgamma < 90:
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 < 1:
self._kpath = self.mclc3(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 == 1:
self._kpath = self.mclc4(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 > 1:
self._kpath = self.mclc5(a, b, c, alpha * pi / 180)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "triclinic":
kalpha = self._prim_rec.lengths_and_angles[1][0]
kbeta = self._prim_rec.lengths_and_angles[1][1]
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kalpha > 90 and kbeta > 90 and kgamma > 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma < 90:
self._kpath = self.trib()
if kalpha > 90 and kbeta > 90 and kgamma == 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma == 90:
self._kpath = self.trib()
else:
warn("Unknown lattice type %s" % lattice_type)
@property
def structure(self):
"""
Returns:
The standardized primitive structure
"""
return self._prim
@property
def conventional(self):
"""
Returns:
The conventional cell structure
"""
return self._conv
@property
def prim(self):
"""
Returns:
The primitive cell structure
"""
return self._prim
@property
def prim_rec(self):
"""
Returns:
The primitive reciprocal cell structure
"""
return self._prim_rec
@property
def kpath(self):
"""
Returns:
The symmetry line path in reciprocal space
"""
return self._kpath
def get_kpoints(self, line_density=20, coords_are_cartesian=True):
"""
Returns:
the kpoints along the paths in cartesian coordinates
together with the labels for symmetry points -Wei
"""
list_k_points = []
sym_point_labels = []
for b in self.kpath['path']:
for i in range(1, len(b)):
start = np.array(self.kpath['kpoints'][b[i - 1]])
end = np.array(self.kpath['kpoints'][b[i]])
distance = np.linalg.norm(
self._prim_rec.get_cartesian_coords(start) -
self._prim_rec.get_cartesian_coords(end))
nb = int(ceil(distance * line_density))
sym_point_labels.extend([b[i - 1]] + [''] * (nb - 1) + [b[i]])
list_k_points.extend(
[self._prim_rec.get_cartesian_coords(start)
+ float(i) / float(nb) *
(self._prim_rec.get_cartesian_coords(end)
- self._prim_rec.get_cartesian_coords(start))
for i in range(0, nb + 1)])
if coords_are_cartesian:
return list_k_points, sym_point_labels
else:
frac_k_points = [self._prim_rec.get_fractional_coords(k)
for k in list_k_points]
return frac_k_points, sym_point_labels
def cubic(self):
self.name = "CUB"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'X': np.array([0.0, 0.5, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "X", "M", "\\Gamma", "R", "X"], ["M", "R"]]
return {'kpoints': kpoints, 'path': path}
def fcc(self):
self.name = "FCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'K': np.array([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]),
'L': np.array([0.5, 0.5, 0.5]),
'U': np.array([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]),
'W': np.array([0.5, 1.0 / 4.0, 3.0 / 4.0]),
'X': np.array([0.5, 0.0, 0.5])}
path = [["\\Gamma", "X", "W", "K",
"\\Gamma", "L", "U", "W", "L", "K"], ["U", "X"]]
return {'kpoints': kpoints, 'path': path}
def bcc(self):
self.name = "BCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'H': np.array([0.5, -0.5, 0.5]),
'P': np.array([0.25, 0.25, 0.25]),
'N': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "H", "N", "\\Gamma", "P", "H"], ["P", "N"]]
return {'kpoints': kpoints, 'path': path}
def tet(self):
self.name = "TET"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0]),
'R': np.array([0.0, 0.5, 0.5]),
'X': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "R", "A", "Z"], ["X", "R"],
["M", "A"]]
return {'kpoints': kpoints, 'path': path}
def bctet1(self, c, a):
self.name = "BCT1"
eta = (1 + c ** 2 / a ** 2) / 4.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'M': np.array([-0.5, 0.5, 0.5]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'X': np.array([0.0, 0.0, 0.5]),
'Z': np.array([eta, eta, -eta]),
'Z_1': np.array([-eta, 1 - eta, eta])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "P", "N", "Z_1", "M"],
["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def bctet2(self, c, a):
self.name = "BCT2"
eta = (1 + a ** 2 / c ** 2) / 4.0
zeta = a ** 2 / (2 * c ** 2)
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'\\Sigma': np.array([-eta, eta, eta]),
'\\Sigma_1': np.array([eta, 1 - eta, -eta]),
'X': np.array([0.0, 0.0, 0.5]),
'Y': np.array([-zeta, zeta, 0.5]),
'Y_1': np.array([0.5, 0.5, -zeta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\\Gamma", "X", "Y", "\\Sigma", "\\Gamma", "Z",
"\\Sigma_1", "N", "P", "Y_1", "Z"], ["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def orc(self):
self.name = "ORC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'S': np.array([0.5, 0.5, 0.0]),
'T': np.array([0.0, 0.5, 0.5]),
'U': np.array([0.5, 0.0, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "S", "Y", "\\Gamma",
"Z", "U", "R", "T", "Z"], ["Y", "T"], ["U", "X"], ["S", "R"]]
return {'kpoints': kpoints, 'path': path}
def orcf1(self, a, b, c):
self.name = "ORCF1"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["T", "X_1"], ["X", "A", "Z"], ["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf2(self, a, b, c):
self.name = "ORCF2"
phi = (1 + c ** 2 / b ** 2 - c ** 2 / a ** 2) / 4
eta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
delta = (1 + b ** 2 / a ** 2 - b ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'C': np.array([0.5, 0.5 - eta, 1 - eta]),
'C_1': np.array([0.5, 0.5 + eta, eta]),
'D': np.array([0.5 - delta, 0.5, 1 - delta]),
'D_1': np.array([0.5 + delta, 0.5, delta]),
'L': np.array([0.5, 0.5, 0.5]),
'H': np.array([1 - phi, 0.5 - phi, 0.5]),
'H_1': np.array([phi, 0.5 + phi, 0.5]),
'X': np.array([0.0, 0.5, 0.5]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "C", "D", "X", "\\Gamma",
"Z", "D_1", "H", "C"], ["C_1", "Z"], ["X", "H_1"], ["H", "Y"],
["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf3(self, a, b, c):
self.name = "ORCF3"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["X", "A", "Z"], ["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orci(self, a, b, c):
self.name = "ORCI"
zeta = (1 + a ** 2 / c ** 2) / 4
eta = (1 + b ** 2 / c ** 2) / 4
delta = (b ** 2 - a ** 2) / (4 * c ** 2)
mu = (a ** 2 + b ** 2) / (4 * c ** 2)
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([-mu, mu, 0.5 - delta]),
'L_1': np.array([mu, -mu, 0.5 + delta]),
'L_2': np.array([0.5 - delta, 0.5 + delta, -mu]),
'R': np.array([0.0, 0.5, 0.0]),
'S': np.array([0.5, 0.0, 0.0]),
'T': np.array([0.0, 0.0, 0.5]),
'W': np.array([0.25, 0.25, 0.25]),
'X': np.array([-zeta, zeta, zeta]),
'X_1': np.array([zeta, 1 - zeta, -zeta]),
'Y': np.array([eta, -eta, eta]),
'Y_1': np.array([1 - eta, eta, -eta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\\Gamma", "X", "L", "T", "W", "R", "X_1", "Z",
"\\Gamma", "Y", "S", "W"], ["L_1", "Y"], ["Y_1", "Z"]]
return {'kpoints': kpoints, 'path': path}
def orcc(self, a, b, c):
self.name = "ORCC"
zeta = (1 + a ** 2 / b ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([zeta, zeta, 0.5]),
'A_1': np.array([-zeta, 1 - zeta, 0.5]),
'R': np.array([0.0, 0.5, 0.5]),
'S': np.array([0.0, 0.5, 0.0]),
'T': np.array([-0.5, 0.5, 0.5]),
'X': np.array([zeta, zeta, 0.0]),
'X_1': np.array([-zeta, 1 - zeta, 0.0]),
'Y': np.array([-0.5, 0.5, 0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "S", "R", "A", "Z",
"\\Gamma", "Y", "X_1", "A_1", "T", "Y"], ["Z", "T"]]
return {'kpoints': kpoints, 'path': path}
def hex(self):
self.name = "HEX"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.0, 0.0, 0.5]),
'H': np.array([1.0 / 3.0, 1.0 / 3.0, 0.5]),
'K': np.array([1.0 / 3.0, 1.0 / 3.0, 0.0]),
'L': np.array([0.5, 0.0, 0.5]),
'M': np.array([0.5, 0.0, 0.0])}
path = [["\\Gamma", "M", "K", "\\Gamma", "A", "L", "H", "A"], ["L", "M"],
["K", "H"]]
return {'kpoints': kpoints, 'path': path}
def rhl1(self, alpha):
self.name = "RHL1"
eta = (1 + 4 * cos(alpha)) / (2 + 4 * cos(alpha))
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'B': np.array([eta, 0.5, 1.0 - eta]),
'B_1': np.array([1.0 / 2.0, 1.0 - eta, eta - 1.0]),
'F': np.array([0.5, 0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'L_1': np.array([0.0, 0.0, -0.5]),
'P': np.array([eta, nu, nu]),
'P_1': np.array([1.0 - nu, 1.0 - nu, 1.0 - eta]),
'P_2': np.array([nu, nu, eta - 1.0]),
'Q': np.array([1.0 - nu, nu, 0.0]),
'X': np.array([nu, 0.0, -nu]),
'Z': np.array([0.5, 0.5, 0.5])}
path = [["\\Gamma", "L", "B_1"], ["B", "Z", "\\Gamma", "X"],
["Q", "F", "P_1", "Z"], ["L", "P"]]
return {'kpoints': kpoints, 'path': path}
def rhl2(self, alpha):
self.name = "RHL2"
eta = 1 / (2 * tan(alpha / 2.0) ** 2)
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([0.5, -0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'P': np.array([1 - nu, -nu, 1 - nu]),
'P_1': np.array([nu, nu - 1.0, nu - 1.0]),
'Q': np.array([eta, eta, eta]),
'Q_1': np.array([1.0 - eta, -eta, -eta]),
'Z': np.array([0.5, -0.5, 0.5])}
path = [["\\Gamma", "P", "Z", "Q", "\\Gamma",
"F", "P_1", "Q_1", "L", "Z"]]
return {'kpoints': kpoints, 'path': path}
def mcl(self, b, c, beta):
self.name = "MCL"
eta = (1 - b * cos(beta) / c) / (2 * sin(beta) ** 2)
nu = 0.5 - eta * c * cos(beta) / b
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.0]),
'C': np.array([0.0, 0.5, 0.5]),
'D': np.array([0.5, 0.0, 0.5]),
'D_1': np.array([0.5, 0.5, -0.5]),
'E': np.array([0.5, 0.5, 0.5]),
'H': np.array([0.0, eta, 1.0 - nu]),
'H_1': np.array([0.0, 1.0 - eta, nu]),
'H_2': np.array([0.0, eta, -nu]),
'M': np.array([0.5, eta, 1.0 - nu]),
'M_1': np.array([0.5, 1 - eta, nu]),
'M_2': np.array([0.5, 1 - eta, nu]),
'X': np.array([0.0, 0.5, 0.0]),
'Y': np.array([0.0, 0.0, 0.5]),
'Y_1': np.array([0.0, 0.0, -0.5]),
'Z': np.array([0.5, 0.0, 0.0])}
path = [["\\Gamma", "Y", "H", "C", "E", "M_1", "A", "X", "H_1"],
["M", "D", "Z"], ["Y", "D"]]
return {'kpoints': kpoints, 'path': path}
def mclc1(self, a, b, c, alpha):
self.name = "MCLC1"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
# 'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["Y", "X_1"], ["X", "\\Gamma", "N"], ["M", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc2(self, a, b, c, alpha):
self.name = "MCLC2"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["N", "\\Gamma", "M"]]
return {'kpoints': kpoints, 'path': path}
def mclc3(self, a, b, c, alpha):
self.name = "MCLC3"
mu = (1 + b ** 2 / a ** 2) / 4.0
delta = b * c * cos(alpha) / (2 * a ** 2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([1 - phi, 1 - phi, 1 - psi]),
'F_1': np.array([phi, phi - 1, psi]),
'F_2': np.array([1 - phi, -phi, 1 - psi]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([0.5, -0.5, 0.5]),
'M': | np.array([0.5, 0.0, 0.5]) | numpy.array |
import numpy as np
import pandas as pd
from sklearn.utils.estimator_checks import check_estimator
from autofeat import AutoFeatRegressor, AutoFeatClassifier
def get_random_data(seed=15):
# generate some toy data
np.random.seed(seed)
x1 = np.random.rand(1000)
x2 = np.random.randn(1000)
x3 = np.random.rand(1000)
target = 2 + 15*x1 + 3/(x2 - 1/x3) + 5*(x2 + np.log(x1))**3
X = np.vstack([x1, x2, x3]).T
return X, target
def test_do_almost_nothing():
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=0, featsel_runs=0)
df = afreg.fit_transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]), target)
assert list(df.columns) == ["x1", "x2", "x3"], "Only original columns"
df = afreg.transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]))
assert list(df.columns) == ["x1", "x2", "x3"], "Only original columns"
def test_regular_X_y():
# autofeat with numpy arrays
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(X, target)
assert afreg.score(X, target) >= 0.999, "R^2 should be 1."
assert afreg.score(df, target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["x000", "x001", "x002"], "Wrong column names"
def test_regular_df_X_y():
# autofeat with df without column names
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(pd.DataFrame(X), pd.DataFrame(target))
# score once with original, once with transformed data
assert afreg.score(pd.DataFrame(X), target) >= 0.999, "R^2 should be 1."
assert afreg.score(df, target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["0", "1", "2"], "Wrong column names"
def test_weird_colnames():
# autofeat with df with weird column names
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target))
assert afreg.score(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["x 1.1", "2", "x/3"], "Wrong column names"
# error if the column names aren't the same as before
try:
afreg.score(pd.DataFrame(X, columns=["x 11", 2, "x/3"]), target)
except ValueError:
pass
else:
raise AssertionError("Should throw error on mismatch column names")
def test_nans():
# nans are ok in transform but not fit or predict (due to sklearn model)
X, target = get_random_data()
X[998, 0] = np.nan
X[999, 1] = np.nan
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
try:
_ = afreg.fit_transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target))
except ValueError:
pass
else:
raise AssertionError("fit with NaNs should throw an error")
_ = afreg.fit_transform(pd.DataFrame(X[:900], columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target[:900]))
try:
_ = afreg.predict(pd.DataFrame(X[900:], columns=["x 1.1", 2, "x/3"]))
except ValueError:
pass
else:
raise AssertionError("predict with NaNs should throw an error")
df = afreg.transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]))
assert all([pd.isna(df.iloc[998, 0]), pd.isna(df.iloc[999, 1])]), "Original features should be NaNs"
assert np.sum(np.array(pd.isna(df.iloc[998]), dtype=int)) >= 2, "There should be at least 2 NaNs in row 998"
assert np.sum(np.array(pd.isna(df.iloc[999]), dtype=int)) >= 2, "There should be at least 3 NaNs in row 999"
def test_feateng_cols():
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_cols=["x1", "x3", "x4"], feateng_steps=3)
try:
df = afreg.fit_transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]), target)
except ValueError:
pass
else:
raise AssertionError("feateng_cols not in df should throw an error")
afreg = AutoFeatRegressor(verbose=1, feateng_cols=["x1", "x3"], feateng_steps=3)
df = afreg.fit_transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]), target)
for c in df.columns[3:]:
assert "x2" not in c, "only feateng_cols should occur in engineered features"
def test_categorical_cols():
np.random.seed(15)
x1 = np.random.rand(1000)
x2 = np.random.randn(1000)
x3 = np.random.rand(1000)
x4 = np.array(200*[4] + 300*[5] + 500*[2], dtype=int)
target = 2 + 15*x1 + 3/(x2 - 1/x3) + 5*(x2 + np.log(x1))**3 + x4
X = pd.DataFrame( | np.vstack([x1, x2, x3, x4]) | numpy.vstack |
# <NAME> (github: @elaguerta)
# LBNL GIG
# File created: 19 February 2021
# Create NR3 Solution class, a namespace for calculations used by nr3
from . solution import Solution
from . circuit import Circuit
import numpy as np
from . nr3_lib.compute_NR3FT import compute_NR3FT
from . nr3_lib.compute_NR3JT import compute_NR3JT
from . nr3_lib.map_output import map_output
class SolutionNR3(Solution):
CONVERGENCE_TOLERANCE = 10**-6
@classmethod
def set_zip_values(cls, zip_v):
"""
sets zip values for the Solution class
param zip_V: List or nd.array with 7 values
[a_z_p, a_i_p, a_pq_p, a_z_q, a_i_q, a_pq_q, min voltage pu]
Note that zip values are set both on the Solution class and Circuit
class
"""
Solution.set_zip_values(zip_v)
def __init__(self, dss_fp: str, **kwargs):
super().__init__(dss_fp, **kwargs) # sets self.circuit
self._set_orient('cols')
self._init_XNR()
self._init_slack_bus_matrices()
self._init_KVL_matrices()
self._init_KCL_matrices()
self._init_KVL_matrices_vregs()
def _init_XNR(self):
"""
adapted from
https://github.com/msankur/LinDist3Flow/blob/vectorized/20180601/PYTHON/lib/basematrices.py
written by @kathleenchang
"""
V0, I0 = None, None
Vslack = self.__class__.VSLACK
nline = self.circuit.lines.num_elements
nnode = self.circuit.buses.num_elements
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
# XNR order is bus voltages, line currents, transformer line currents,
# voltage regulator line currents * 2
XNR = np.zeros((2*3*(nnode + nline) + 2*tf_lines + 2*2*vr_lines, 1),
dtype=float)
# intialize node voltage portion of XNR
if V0 is None or len(V0) == 0:
for ph in range(3):
for k1 in range(nnode):
XNR[2*ph*nnode + 2*k1] = Vslack[ph].real
XNR[2*ph*nnode + 2*k1+1] = Vslack[ph].imag
# If initial V is given (usually from CVX)
elif len(V0) != 0:
for ph in range(3):
for k1 in range(nnode):
XNR[2*ph*nnode + 2*k1] = V0[ph, k1].real
XNR[2*ph*nnode + 2*k1+1] = V0[ph, k1].imag
# intialize line current portion of XNR
if I0 is None or len(I0) == 0:
XNR[(2*3*nnode):] = 0.0* | np.ones((6*nline + 2*tf_lines + 2*2* vr_lines, 1), dtype = float) | numpy.ones |
import sys
import cv2
import numpy as np
from src.common.image import CoordinateStore
from src.settings.local import LOCAL_X_AXIS_ITERATIONS, LOCAL_Y_AXIS_ITERATIONS
def roi_selection(numpy_frame: np.ndarray):
"""
Generates the Region of Interest points selection for posterior processing.
:param numpy_frame: Input image decided in the project settings read as a Numpy frame.
:return: Points list.
"""
image = numpy_frame
coordinate_handler = CoordinateStore(numpy_frame=image)
cv2.namedWindow("Test Image")
cv2.setMouseCallback("Test Image", coordinate_handler.select_point)
while True:
cv2.imshow("Test Image", image)
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
break
print("Selected Coordinates: ")
print(coordinate_handler.points)
return coordinate_handler.points
def roi_processing(numpy_frame: np.ndarray, roi_points: list):
"""
Processing of the Region of Interest points on the input image and overlays it on the background image by rotating it
across the X-Axis and Y-Axis.
:param numpy_frame: Input image decided in the project settings read as a Numpy frame.
:param roi_points: Points list.
:return: Generated masks with the Region of Interest rotated.
"""
final_images = list()
if len(roi_points) == 0:
print("Incorrect number of Region of Interest points selected... Exiting")
sys.exit(1)
base_masked_out = roi_extraction(roi_points=roi_points, numpy_frame=numpy_frame)
x_intervals = round(numpy_frame.shape[1] / 20)
y_intervals = round(numpy_frame.shape[0] / 30)
x_start_interval = 0
y_start_interval = 0
for x_index in range(0, LOCAL_X_AXIS_ITERATIONS):
x_start_interval, generated_images = roi_rotation(
masked_image=base_masked_out,
x_start_interval=x_start_interval,
y_start_interval=y_start_interval,
x_intervals=x_intervals,
y_intervals=y_intervals
)
final_images.extend(generated_images)
return final_images
def roi_extraction(roi_points: list, numpy_frame: np.ndarray):
points = np.array(roi_points, dtype=np.int32)
image = numpy_frame
# Masked image generation
mask = np.zeros((image.shape[0], image.shape[1]))
cv2.fillConvexPoly(mask, points, 1)
mask = mask.astype(np.bool)
masked_out = np.zeros_like(image)
masked_out[mask] = image[mask]
base_masked_out = masked_out
return base_masked_out
def roi_rotation(masked_image, x_start_interval, y_start_interval, x_intervals, y_intervals):
images = list()
masked_out = np.roll(masked_image, (x_start_interval, 0), axis=(1, 0)) # Add shifting capabilities to the image
images.append(masked_out)
frame_mask_scan(masked_out)
for y_index in range(0, LOCAL_Y_AXIS_ITERATIONS):
# Add shifting capabilities to the image
masked_out = np.roll(
masked_image, (x_start_interval, y_start_interval), axis=(0, 1)
)
images.append(masked_out)
y_start_interval = y_start_interval + y_intervals
x_start_interval = x_start_interval + x_intervals
images.append(masked_out)
return x_start_interval, images
def frame_mask_scan(numpy_frame: np.ndarray):
threshold_value = 0
# Columns 0 -> Last
column_indexes = list()
limit = numpy_frame.shape[1]
for i in range(0, limit):
current_value = | np.count_nonzero(numpy_frame[:, i]) | numpy.count_nonzero |
from __future__ import division
from future.utils import viewitems
from builtins import int, zip
import concurrent.futures
import os
import itertools
from ._adaptive_threshold import threshold as athreshold
from .pool import pooler
from ._moving_window import moving_window
# from mpglue.raster_tools import create_raster
# from mpglue import moving_window
import numpy as np
import cv2
# SciPy
from scipy.ndimage.measurements import label as nd_label
from scipy.ndimage.measurements import mean as nd_mean
import scipy.stats as sci_stats
from scipy.stats import mode as sci_mode
from sklearn.preprocessing import StandardScaler
# Scikit-image
from skimage.exposure import rescale_intensity
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects, skeletonize
from skimage.morphology import thin as sk_thin
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from skimage.measure import label as sk_label
import pymorph
from mahotas import thin as mthin
from mahotas.morph import hitmiss as mhitmiss
# from tqdm import tqdm
# from joblib import Parallel, delayed
def local_straightness(arr, kernel_filter, w, sigma_color, sigma_space):
"""
https://ieeexplore-ieee-org.ezproxy.library.uq.edu.au/document/1334256
https://docs.opencv.org/master/d4/d70/tutorial_anisotropic_image_segmentation_by_a_gst.html
Example:
>>> conv_kernels = set_kernel_pairs(methods=['compass'])
>>> kernel_filter = conv_kernels['compass']['kernels']
>>> local_straightness(array, kernel_filter, 3, 1, 1)
"""
diff_x = cv2.filter2D(np.float32(arr),
cv2.CV_32F,
kernel_filter[1],
borderType=cv2.BORDER_CONSTANT)
diff_y = cv2.filter2D(np.float32(arr),
cv2.CV_32F,
kernel_filter[0],
borderType=cv2.BORDER_CONSTANT)
diff_xy = diff_x * diff_y
diff_xx = diff_x * diff_x
diff_yy = diff_y * diff_y
c11 = cv2.boxFilter(np.float32(diff_xx), cv2.CV_32F, (w, w))
c22 = cv2.boxFilter(np.float32(diff_yy), cv2.CV_32F, (w, w))
c12 = cv2.boxFilter(np.float32(diff_xy), cv2.CV_32F, (w, w))
# c11 = cv2.bilateralFilter(np.float32(diff_xx), w, sigma_color, sigma_space)
# c22 = cv2.bilateralFilter(np.float32(diff_yy), w, sigma_color, sigma_space)
# c12 = cv2.bilateralFilter(np.float32(diff_xy), w, sigma_color, sigma_space)
gamma_max = (c11 + c22 + np.sqrt((c11 - c22)**2 + 4*c12**2)) / 2.0
gamma_min = (c11 + c22 - np.sqrt((c11 - c22)**2 + 4*c12**2)) / 2.0
s = 1.0 - (gamma_min / gamma_max)
return s
def logistic(x, **params):
return sci_stats.logistic.cdf(x, **params)
def sigmoid(x, a, b):
return 1.0 / (1.0 + np.exp(-b * (x - a)))
def log_transform(egm, scale=1e-6, logistic_alpha=1.6, logistic_beta=0.5):
"""
Transforms an EGM to probabilities
Args:
egm (2d array)
scale (Optional[float]): The scaling factor
logistic_alpha (Optional[float])
logistic_beta (Optional[float])
Returns:
Probabilities (2d array)
"""
# Mask
egm[egm == 0] = np.nan
log_min = np.nanpercentile(np.log(egm * scale), 2)
egm[np.isnan(egm)] = 0
# Log transform
egm_proba = np.where(egm > 0, np.log(egm * scale), log_min)
# Scale and clip
r, c = egm_proba.shape
zegm = np.where(egm_proba.ravel() > log_min)[0]
scaler = StandardScaler().fit(egm_proba.ravel()[zegm][:, np.newaxis])
egm_proba = scaler.transform(egm_proba.ravel()[:, np.newaxis]).reshape(r, c)
egm_proba = rescale_intensity(egm_proba, in_range=(-3, 3), out_range=(-3, 3))
# CDF
return logistic(egm_proba,
loc=logistic_alpha,
scale=logistic_beta)
def bayes(prior_a, prior_b, likelihood):
"""
Bayes rule
Args:
prior_a (float): The class prior probability.
prior_b (float): The class prior probability.
likelihood (float)
"""
posterior = (likelihood * prior_a) / (likelihood * prior_a + prior_b * (1.0 - prior_a))
posterior[np.isnan(posterior)] = 0
return posterior
class Params(object):
def __init__(self, **kwargs):
for k, v in viewitems(kwargs):
setattr(self, k, v)
def mopen(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=iters)
def mclose(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_CLOSE,
se,
iterations=iters)
def merode(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_ERODE,
se,
iterations=iters)
def mdilate(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_DILATE,
se,
iterations=iters)
def closerec(array2morph, se, r=3, iters=5):
"""
Close by reconstruction
Args:
array2morph (2d array)
se (str)
r (Optional[int])
iters (Optional[int])
"""
if se == 'disk':
se = np.uint8(pymorph.sedisk(r=r))
elif se == 'cross':
se = np.uint8(pymorph.secross(r=r))
evi2_dist = np.float32(cv2.distanceTransform(np.uint8(np.where(array2morph >= 20, 1, 0)), cv2.DIST_L2, 3))
seed = np.uint8(np.where(evi2_dist >= 2,
cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=1),
0))
im_result = seed.copy()
for iter in range(0, iters):
im_dilated = cv2.morphologyEx(np.uint8(im_result),
cv2.MORPH_DILATE,
se,
iterations=1)
im_rec = np.minimum(im_dilated, array2morph)
im_result = im_rec.copy()
if np.allclose(seed, im_rec):
break
return im_result
def openrec(array2morph, se, iters=5):
"""
Open by reconstruction
Args:
array2morph (2d array)
se (2d array)
iters (Optional[int])
"""
evi2_dist = np.float32(cv2.distanceTransform(np.uint8(np.where(array2morph >= 20, 1, 0)), cv2.DIST_L2, 3))
seed = np.uint8(np.where(evi2_dist >= 2,
cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=1),
0))
im_result = seed.copy()
for iter in range(0, iters):
im_dilated = merode(im_result, se, iters=1)
im_rec = np.minimum(im_dilated, array2morph)
im_result = im_rec.copy()
if np.allclose(seed, im_rec):
break
return im_result
def set_kernel_pairs(methods=None):
"""
Creates 2d convolution kernels
Args:
methods (Optional[str list]): Choices are ['compass', 'kirsch', 'prewitt', 'roberts', 'scharr', 'sobel'].
Returns:
List of kernel filters
"""
returned_filters = dict()
if methods:
returned_filters['custom'] = dict(kernels=methods,
compass=True)
methods = ['compass', 'kirsch', 'prewitt', 'roberts', 'sobel']
# Prewitt compass
compass_filters = np.array([[[-1, -1, -1],
[1, -2, 1],
[1, 1, 1]],
[[-1, -1, 1],
[-1, -2, 1],
[1, 1, 1]],
[[-1, 1, 1],
[-1, -2, 1],
[-1, 1, 1]],
[[1, 1, 1],
[-1, -2, 1],
[-1, -1, 1]],
[[1, 1, 1],
[1, -2, 1],
[-1, -1, -1]],
[[1, 1, 1],
[1, -2, -1],
[1, -1, -1]],
[[1, 1, -1],
[1, -2, -1],
[1, 1, -1]]], dtype='float32')
# Sobel
sobel_filters = np.array([[[1, 2, 0],
[2, 0, -2],
[0, -2, -1]],
[[-1, -2, 0],
[-2, 0, 2],
[0, 2, 1]],
[[0, 2, 1],
[-2, 0, 2],
[-1, -2, 0]],
[[0, -2, -1],
[2, 0, -2],
[1, 2, 0]],
[[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]],
[[1, 0, -1],
[2, 0, -2],
[1, 0, -1]],
[[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]],
[[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]], dtype='float32')
# Scharr
scharr_filters = np.array([[[10, 3, 0],
[3, 0, -3],
[0, -3, -10]],
[[-10, -3, 0],
[-3, 0, 3],
[0, 3, 10]],
[[0, 3, 10],
[-3, 0, 3],
[-10, -3, 0]],
[[0, -3, -10],
[3, 0, -3],
[10, 3, 0]],
[[-10, 0, 10],
[-3, 0, 3],
[-10, 0, 10]],
[[10, 0, -10],
[3, 0, -3],
[10, 0, -10]],
[[-10, -3, -10],
[0, 0, 0],
[10, 3, 10]],
[[10, 3, 10],
[0, 0, 0],
[-10, -3, -10]]], dtype='float32')
# Roberts cross
roberts_filters = np.array([[[0, -1],
[1, 0]],
[[0, 1],
[-1, 0]],
[[-1, 0],
[0, 1]],
[[1, 0],
[0, -1]]], dtype='float32')
# Prewitt
prewitt_filters = np.array([[[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]],
[[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]],
[[1, 1, 0],
[1, 0, -1],
[0, -1, -1]],
[[-1, -1, 0],
[-1, 0, 1],
[0, 1, 1]],
[[1, 0, -1],
[1, 0, -1],
[1, 0, -1]],
[[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]],
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]],
[[0, -1, -1],
[1, 0, -1],
[1, 1, 0]]], dtype='float32')
# Kirsch compass
kirsch_filters = np.array([[[5, 5, 5],
[-3, 0, -3],
[-3, -3, -3]],
[[5, 5, -3],
[5, 0, -3],
[-3, -3, -3]],
[[5, -3, -3],
[5, 0, -3],
[5, -3, -3]],
[[-3, -3, -3],
[5, 0, -3],
[5, 5, -3]],
[[-3, -3, -3],
[-3, 0, -3],
[5, 5, 5]],
[[-3, -3, -3],
[-3, 0, 5],
[-3, 5, 5]],
[[-3, -3, 5],
[-3, 0, 5],
[-3, -3, 5]]], dtype='float32')
if 'compass' in methods:
returned_filters['compass'] = dict(kernels=compass_filters,
compass=True)
if 'kirsch' in methods:
returned_filters['kirsch'] = dict(kernels=kirsch_filters,
compass=True)
if 'prewitt' in methods:
returned_filters['prewitt'] = dict(kernels=prewitt_filters,
compass=False)
if 'roberts' in methods:
returned_filters['roberts'] = dict(kernels=roberts_filters,
compass=False)
if 'scharr' in methods:
returned_filters['scharr'] = dict(kernels=scharr_filters,
compass=False)
if 'sobel' in methods:
returned_filters['sobel'] = dict(kernels=sobel_filters,
compass=False)
return returned_filters
def find_circles(intensity_array, kernel_size):
"""
Finds circles
Args:
intensity_array (2d array)
kernel_size (int)
"""
kernel_radius = int(kernel_size / 2.0)
kernel_circle = np.uint8(pymorph.sedisk(r=kernel_radius,
dim=2,
metric='euclidean',
flat=True,
h=0) * 1)
kernel_square = np.uint8(pymorph.sebox(r=kernel_radius) * 1)
circles = cv2.filter2D(np.float32(intensity_array),
cv2.CV_32F,
kernel_circle,
borderType=cv2.BORDER_CONSTANT)
squares = cv2.filter2D(np.float32(intensity_array),
cv2.CV_32F,
kernel_square,
borderType=cv2.BORDER_CONSTANT)
diff = circles - squares
local_max_coords = peak_local_max(diff,
min_distance=kernel_size,
indices=True)
local_max = | np.zeros(intensity_array.shape, dtype='uint8') | numpy.zeros |
__author__ = 'mricha56'
__version__ = '4.0'
# Interface for accessing the PASCAL in Detail dataset. detail is a Python API
# that assists in loading, parsing, and visualizing the annotations of PASCAL
# in Detail. Please visit https://sites.google.com/view/pasd/home for more
# information about the PASCAL in Detail challenge. For example usage of the
# detail API, see detailDemo.ipynb.
# Throughout the API "ann"=annotation, "cat"=category, "img"=image,
# "bbox"= bounding box, "kpts"=keypoints, "occl"=occlusion,
# "bounds"=boundaries.
# To import:
# from detail import Detail
# For help:
# help(Detail)
# PASCAL in Detail Toolbox version 4.0
# Modifications of COCO toolbox made by <NAME> and <NAME>
# Forked from:
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by <NAME> and <NAME>, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon,Rectangle,Circle,Arrow,FancyArrow
import matplotlib.colors
import numpy as np
import skimage.io as io
import copy
import itertools
from scipy.ndimage.morphology import binary_dilation
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
# When displaying boundaries, dilate the mask before displaying it, to
# improve visibility
NUM_BOUNDARY_DILATION_ITERATIONS = 1
# When displaying occlusion, draw an arrow every OCCLUSION_ARROW_DISTANCE
# pixels
OCCLUSION_ARROW_DISTANCE = 7
class Detail:
def __init__(self, annotation_file='json/trainval_withkeypoints.json',
image_folder='VOCdevkit/VOC2010/JPEGImages',
phase='trainval'):
"""
Constructor of Detail helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that has pascal JPEG images.
:param phase (str): image set to look at: train, val, test, or any combination
of the three (trainval, trainvaltest)
:return:
"""
# load dataset
self.phase = phase
self.img_folder = image_folder
print('loading annotations into memory...')
tic = time.time()
self.data = json.load(open(annotation_file, 'r'))
assert type(self.data)==dict, 'annotation file format {} not supported'.format(type(self.data))
print('JSON root keys:' + str(self.data.keys()))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.waiting = False
self.__createIndex()
def __createIndex(self):
# create index
tic = time.time()
print('creating index...')
# create class members
self.cats,self.imgs,self.segmentations,self.occlusion,self.parts,\
self.kpts, self.bounds= {},{},{},{},{},{},{}
# Organize data into instance variables
for img in self.data['images']:
self.imgs[img['image_id']] = img
for segm in self.data['annos_segmentation']: # many per image
self.segmentations[segm['id']] = segm
for occl in self.data['annos_occlusion']: # one per image
self.occlusion[occl['image_id']] = occl
for bound in self.data['annos_boundary']: # one per image
self.bounds[bound['image_id']] = bound
#for skeleton in self.data['annos_joints']: # many per image
# # skeletons are 1-indexed in JSON file and
# # 0-indexed in self.kpts
# self.kpts[skeleton['person_id'] - 1] = skeleton
# Follow references
for img in self.data['images']:
img['annotations'] = []
img['categories'] = []
img['parts'] = []
img['keypoints'] = []
for part in self.data['parts']:
part['categories'] = []
part['annotations'] = []
part['images'] = []
self.parts[part['part_id']] = part
# fixed eval_orders here for classification task
self.eval_orders = {}
eval_orders = [2, 23, 25, 31, 34, 45, 59, 65, 72, 98, 397, 113, 207, 258, 284, 308, 347, 368, 416, 427, 9, 18, 22, 33, 44, 46, 68, 80, 85, 104, 115, 144, 158, 159, 162, 187, 189, 220, 232, 259, 260, 105, 296, 355, 295, 324, 326, 349, 354, 360, 366, 19, 415, 420, 424, 440, 445, 454, 458]
for i in range(len(eval_orders)):
self.eval_orders[i] = eval_orders[i]
for order, cat in enumerate(self.data['categories']):
cat['images'] = []
cat['annotations'] = []
#print('adding cat id: %d'%(cat['category_id']))
self.cats[cat['category_id']] = cat
# self.eval_orders[order] = cat['category_id']
if cat.get('parts'):
for partId in cat['parts']:
part = self.parts[partId]
if cat['category_id'] not in part['categories']:
part['categories'].append(cat['category_id'])
self.keypoints_str = ['head', 'neck', 'lsho', 'lelb', 'lhip', 'lwri', 'lknee', 'lank', 'rsho', 'relb', 'rwri', 'rhip', 'rknee', 'rank']
for skeleton_id, skeleton in self.kpts.items():
self.imgs[skeleton['image_id']]['keypoints'].append(skeleton_id)
for segm_id, segm in self.segmentations.items():
img = self.imgs[segm['image_id']]
cat = self.cats[segm['category_id']]
img['annotations'].append(segm_id)
cat['annotations'].append(segm_id)
if cat['category_id'] not in img['categories']:
img['categories'].append(cat['category_id'])
if img['image_id'] not in cat['images']:
cat['images'].append(img['image_id'])
if segm.get('parts'):
for partsegm in segm['parts']:
if partsegm['part_id'] == 255: continue
part = self.parts[partsegm['part_id']]
part['annotations'].append(segm_id)
if img['image_id'] not in part['images']:
part['images'].append(img['image_id'])
if part['part_id'] not in img['parts']:
img['parts'].append(part['part_id'])
print('index created! (t={:0.2f}s)'.format(time.time() - tic))
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.data['info'].items():
print('{}: {}'.format(key, value))
def __getSegmentationAnns(self, anns=[], imgs=[], cats=[], areaRng=[], supercat=None, crowd=None):
"""
Get segmentation annotations that satisfy given filter conditions. default is no filter
:param anns (int array) : get anns with the given IDs
imgs (image array) : get anns in the given imgs
cats (category array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
supercat (str) : filter anns by supercategory
crowd (True/False) : filter anns by 'iscrowd' key
:return: anns (annotation array) : array of annotations
"""
if len(imgs) > 0: imgs = self.getImgs(imgs)
if len(cats) > 0: cats = self.getCats(cats)
anns = self.__toList(anns)
# Get starting list of anns
if len(anns) == 0:
anns = list(self.segmentations.values())
else:
for i in range(len(anns)):
try:
if type(anns[i]) is int: anns[i] = self.segmentations[anns[i]]
elif type(anns[i]) is dict: anns[i] = self.segmentations[anns[i]['id']]
except IndexError: assert False, 'Annotation with id %s not found' % anns[i]['id']
# Filter anns according to params
imgAnns = np.unique(np.array([img['annotations'] for img in imgs]).flatten())
catAnns = np.unique(np.array([cat['annotations'] for cat in cats]).flatten())
if len(imgs) > 0:
anns = [ann for ann in anns if ann['id'] in imgAnns]
if len(cats) > 0:
anns = [ann for ann in anns if ann['id'] in catAnns]
if len(areaRng) == 2:
anns = [ann for ann in anns if ann['area'] >= areaRng[0] and ann['area'] <= areaRng[1]]
if supercat is not None:
subcats = [cat['category_id'] for cat in self.getCats(supercat=supercat)]
anns = [ann for ann in anns if ann['category_id'] in subcats]
if crowd is not None:
if (crowd):
anns = [ann for ann in anns if ann['iscrowd']]
else:
anns = [ann for ann in anns if not ann['iscrowd']]
return anns
# getX() functions #
def getOccl(self, img, show=False):
img = self.getImgs(img)[0]
occl = self.occlusion[img['image_id']]
if show:
self.showOccl(occl, img)
return occl
def getBounds(self, img, show=False):
"""
Get boundary mask for given image.
"""
img = self.getImgs(img)[0]
bound = self.bounds[img['image_id']]
mask = self.decodeMask(bound['boundary_mask'])
if show:
if np.count_nonzero(mask) > 0:
self.showBounds(mask, img)
else:
print('Mask is empty')
return mask
def getBboxes(self, img, cat='object', show=False):
"""
Get bounding box for each instance of given category in image.
:param img : image containing bounding boxes
:param cat : category or supercategory to filter by. Default returns
bboxes for all "object" (non-background) categories.
:param show (boolean): whether to pass result to self.showBboxes() before
proceeding.
:return: bboxes : list of bboxes, where each bbox is a dict:
{'bbox':[pos_x, pos_y, width, height],
'category': 'category_name'}
"""
img = self.getImgs(img)[0]
if cat in ['object', 'animal', 'background']: # supercategory
anns = self.__getSegmentationAnns(imgs=img, supercat=cat,crowd=False)
else:
cat = self.getCats(cat)[0]
assert not cat['onlysemantic'], 'No instance-level data for category %s' % cat['name']
anns = self.__getSegmentationAnns(imgs=img, cats=cat, crowd=False)
bboxes = []
for ann in anns:
bboxes.append({
'bbox': ann['bbox'],
'category': self.getCats(ann['category_id'])[0]['name']})
if show:
self.showBboxes(bboxes, img)
return bboxes
def getMask(self, img, cat=None, instance=None, superpart=None, part=None, show=False):
"""
Get mask for a particular level of segmentation. You may "drill down" to
the desired level of detail by specifying more parameters.
If semantic segmentation of an image is requested (cat=instance=superpart=part=None), the result
is an image whose pixel values are the class IDs for that image.
If instance-level segmentation for one category of an image is requested (img and cat provided),
the result is an image whose pixel values are the instance IDs for that class and 0 everywhere else.
If part-level segmentation of an instance is requested (img, cat, and instance provided),
the result is an image whose pixel values are the part IDs for that instance
and 0 everywhere else.
If a single-part binary mask for a part or superpart is requested (img,
cat, instance, and part or superpart provided), the result is an image whose pixel values are
0 everywhere except for the given part/superpart.
:param img (string/int/dict) : image that mask describes
cat (string/int/dict) : category or supercategory that mask describes
instance (string/int/dict) : instance that the mask describes. If integer, interpreted
as id of an "annotation" object in JSON. If
string starting with #, e.g. '#0', interpreted as 0-based index
of instance within the image (cat is None)
or of instance within the given class (cat not None).
part (string or int) : part that mask describes (None means all parts)
superpart (string): superpart that mask describes
show (boolean) : whether to pass the mask to self.showMask() before returning.
:return: mask (numpy 2D array) : a mask describing the requested annotation.
"""
# Validate params and convert them to dicts
img = self.getImgs(img)[0]
supercat = None
if cat is not None:
if cat in ['object', 'animal', 'background']:
supercat = cat
cat = None
else:
cat = self.getCats(cat)[0]
if part is not None:
part = self.getParts(part)[0]
# When part or superpart is requested, instance is assumed to be first instance
# of the given category
if (cat or supercat) and (part or superpart) and not instance:
instance = '#0'
if instance is not None:
try:
if type(instance) is str:
if instance.startswith('#'):
# If instance is set to '#N' where N is an integer,
# get the Nth (0-indexed) instance of the given category.
if cat is not None:
instance = self.__getSegmentationAnns(imgs=img, cats=cat)[int(instance[1:])]
else:
instance = self.__getSegmentationAnns(imgs=img, supercat='object')[int(instance[1:])]
else:
instance = self.__getSegmentationAnns(int(instance))[0]
elif type(instance) is int:
instance = self.__getSegmentationAnns(instance)[0]
except IndexError:
assert False, 'Couldn\'t find the requested instance'
anns = self.__getSegmentationAnns(imgs=img, cats=[] if cat is None else cat,
supercat=supercat, crowd=None if instance is None else False)
mask = np.zeros((img['height'], img['width']))
# Generate mask based on params
if not (cat or supercat or instance or part):
# Generate class mask
for ann in anns:
m = self.decodeMask(ann['segmentation'])
mask[np.nonzero(m)] = ann['category_id']
elif (cat or supercat) and not (instance or part):
# Generate instance (or single-class semantic) mask
i = 1
for ann in anns:
m = self.decodeMask(ann['segmentation'])
if cat and cat['onlysemantic']:
mask[np.nonzero(m)] = 1
else:
mask[np.nonzero(m)] = i
i = i + 1
elif instance and not part:
assert not instance['iscrowd'], 'Instance-level segmentation not available'
# Generate part mask
for p in instance['parts']:
m = self.decodeMask(p['segmentation'])
mask[np.nonzero(m)] = p['part_id']
if superpart is not None:
parts = [p['part_id'] for p in self.getParts(superpart=superpart)]
newmask = np.zeros(mask.shape)
for p in parts:
newmask += p * (mask == p)
mask = newmask
elif instance and part:
# Generate single-part mask
partMask = [p['segmentation'] for p in instance['parts'] \
if p['part_id'] == part['part_id']]
assert len(partMask) > 0, 'Coudn\'t find a part mask for the given part and instance'
partMask = partMask[0]
m = self.decodeMask(partMask)
mask[np.nonzero(m)] = part['part_id']
else:
assert False, 'Invalid parameters'
if show:
if np.count_nonzero(mask) > 0:
self.showMask(mask, img)
else:
print('Mask is empty')
return mask
def getKptAnno(self, skeleton_id=0):
"""
Get keypoints annotations by skeleton_id
:param skeleton_id (int): get the #skeleton_id of kpts annotations
:return: kpt_annotation (dict) : kpts dicts
"""
assert(type(skeleton_id) is int) # skeleton_id must be int
assert(skeleton_id < len(self.kpts) and skeleton_id >= 0) # skeleton_id can not get out of bound
return self.kpts[skeleton_id]
def getKpts(self, img, show=False):
"""
Get human keypoints for the image.
:param imgs (int/string/dict array) : get cats present in at least one of the given image names/ids
:return: kpts (dict array) : array of kpts dict in the img
"""
img = self.getImgs(img)[0]
kpts = []
for skeleton_id in img['keypoints']:
kpts.append(self.kpts[skeleton_id])
if show:
self.showKpts(kpts, img)
return kpts
def getCats(self, cats=[], imgs=[], supercat=None, with_instances=None):
"""
Get categories abiding by the given filters. default is no filter.
:param cats (int/string/dict array) : get cats for given cat names/ids/dicts
:param imgs (int/string/dict array) : get cats present in at least one of the given image names/ids
:param supercat : get cats that belong to the specified supercategory
:param with_instances (boolean): filter cats based on whether they have
instance-level annotations
:return: cats (dict array) : array of category dicts
"""
cats = self.__toList(cats)
if len(cats) == 0:
cats = list(self.cats.values())
else:
for i in range(len(cats)):
if type(cats[i]) == int: cats[i] = self.cats[cats[i]]
elif type(cats[i]) == dict: cats[i] = self.cats[cats[i]['category_id']]
elif type(cats[i]) == str:
try:
cats[i] = [c for c in self.cats.values() if c['name'] == cats[i]][0]
except IndexError:
assert False, 'Category "%s" not found' % cats[i]
if type(imgs) is not list or len(imgs) > 0:
imgs = self.getImgs(imgs)
catIds = np.unique(np.array([img['categories'] for img in imgs]).flatten())
cats = [cat for cat in cats if cat['category_id'] in catIds]
if supercat is not None:
scs = []
if supercat is 'object': scs = ['object', 'animal']
else: scs = [supercat]
cats = [cat for cat in self.cats.values() if cat['supercategory'] in scs]
if with_instances is not None:
cats = [cat for cat in cats if not cat['onlysemantic'] == with_instances]
return cats
def getSuperparts(self, cat=None):
"""
Get list of all defined superparts.
:return: superparts (string array): list of superpart names
"""
superparts = set()
parts = self.getParts(cat=cat)
for part in parts:
if part['superpart'] != 'none':
superparts.add(part['superpart'])
return list(superparts)
def getParts(self, parts=[], cat=None, superpart=None):
"""
Get parts of a particular category.
:param parts (int/string/dict array) : list of parts to get
:param cat (int, string, or dict) : category to get parts for (default: any)
:param superpart (string) : superpart to get parts for - one of ["object",
"background", "animal"]
:return: parts (dict array) : array of part dicts, e.g.
[{"name": "mouth", "superpart": "head", "part_id": 110},...]
"""
parts = self.__toList(parts)
if len(parts) == 0:
parts = list(self.parts.values())
else:
for i in range(len(parts)):
if type(parts[i]) == int: parts[i] = self.parts[parts[i]]
elif type(parts[i]) == dict: parts[i] = self.parts[parts[i]['part_id']]
elif type(parts[i] == str):
try: parts[i] = [p for p in self.parts.values() if p['name'] == parts[i]][0]
except IndexError: assert False, 'No part named \"%s\"' % parts[i]
if cat is not None:
cat = self.getCats(cat)[0]
if cat is not None:
oldparts = copy.copy(parts)
for part in oldparts:
if part['part_id'] not in cat['parts']:
parts.remove(part)
if superpart is not None:
oldparts = copy.copy(parts)
for part in oldparts:
if part['superpart'] != superpart:
parts.remove(part)
return parts
def getImgs(self, imgs=[], cats=[], supercat=None, phase=None):
'''
Get images that satisfy given filter conditions.
:param imgs (int/string/dict array) : get imgs with given ids
:param cats (int/string/dict array) : get imgs with all given cats
:param supercat (string) : get imgs with the given supercategory
:param phase (string) : filter images by phase. If None, the phase
provided to the Detail() constructor is used.
:return: images (dict array) : array of image dicts
'''
if phase is None:
phase = self.phase
phases = []
if "train" in phase: phases.append("train")
if "val" in phase: phases.append("val")
if "test" in phase: phases.append("test")
assert len(phases) > 0, 'Invalid phase, {}'.format(phase)
imgs = self.__toList(imgs)
if len(imgs) == 0:
imgs = list(self.imgs.values())
else:
for i in range(len(imgs)):
if type(imgs[i]) == int: imgs[i] = self.imgs[imgs[i]]
elif type(imgs[i]) == dict: imgs[i] = self.imgs[imgs[i]['image_id']]
elif type(imgs[i]) == str:
imstr = imgs[i]
imgs[i] = self.imgs[int(imstr[:4] + imstr[5:])]
if type(cats) is not list or len(cats) > 0:
cats = self.getCats(cats)
oldimgs = copy.copy(imgs)
for img in oldimgs:
for cat in cats:
if cat['category_id'] not in img['categories']:
imgs.remove(img)
break
if supercat is not None:
catIds = set([c['category_id'] for c in self.getCats(supercat=supercat)])
oldimgs = copy.copy(imgs)
for img in oldimgs:
if len(catIds & set(img['categories'])) == 0:
imgs.remove(img)
oldimgs = copy.copy(imgs)
for img in oldimgs:
if img['phase'] not in phases:
imgs.remove(img)
return imgs
# showX() functions #
def showImg(self, img, wait=False, ax=None):
"""
Display the given image
"""
img = self.getImgs(img)[0]
jpeg = io.imread(os.path.join(self.img_folder, img['file_name']))
# print image details
print('showing image %s: ' % img['file_name'])
keys = ['image_id', 'width', 'height', 'phase', 'date_captured']
for k in keys:
print('\t%s: %s,' % (k, img[k] if img.get(k) else 'N/A'))
if ax is not None:
ax.imshow(jpeg)
else:
plt.imshow(jpeg)
plt.axis('off')
if wait:
self.waiting = True
else:
plt.show()
def showMask(self, mask, img=None):
"""
Display given mask (numpy 2D array) as a colormapped image.
"""
if img is not None and not self.waiting:
self.showImg(img, wait=True)
# Draw mask, random colormap, 0s transparent
self.waiting = False
mycmap = self.__genRandColormap()
mycmap.set_under(alpha=0.0)
nonzero = np.unique(mask[np.nonzero(mask)])
plt.imshow(mask, cmap=mycmap, vmin=np.min(nonzero), vmax=np.max(nonzero)+1)
plt.axis('off')
plt.show()
def showBboxes(self, bboxes, img=None):
"""
Display given bounding boxes.
"""
fig,ax = plt.subplots(1)
if img is not None and not self.waiting:
self.showImg(img, wait=True, ax=ax)
for bbox in bboxes:
ax.add_patch(Rectangle((bbox['bbox'][0],bbox['bbox'][1]), bbox['bbox'][2], bbox['bbox'][3], linewidth=2,
edgecolor=np.random.rand(3), facecolor='none',
label=bbox['category']))
print('categories: %s' % [bbox['category'] for bbox in bboxes])
self.waiting = False
plt.legend()
plt.axis('off')
plt.show()
def showKpts(self, kpts, img=None):
"""
Display given kpts.
"""
fig,ax = plt.subplots(1)
if img is not None:
self.showImg(img, wait=True, ax=ax)
pv = np.zeros(14)
px = np.zeros(14)
py = np.zeros(14)
for kpt in kpts:
skeleton_color = np.random.rand(3)
num_kpt = len(kpt['keypoints'])/3 # always 14
assert num_kpt == 14, 'Expected 14 keypoints but found {}'.format(num_kpt)
for i in range(int(num_kpt)):
px[i] = kpt['keypoints'][3*i]
py[i] = kpt['keypoints'][3*i+1]
pv[i] = kpt['keypoints'][3*i+2]
kpt_pair = [[0, 1], [1, 2], [2, 3], [3, 4], [2, 5], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13]]
for p in kpt_pair:
p0 = p[0]
p1 = p[1]
if pv[p0] == 0 or pv[p1] == 0:
continue
if pv[p0] == 2 or pv[p1] == 2:
pcolor = 'blue'
else:
pcolor = 'red'
ax.add_patch(Arrow(px[p0], py[p0], px[p1]-px[p0], py[p1]-py[p0],
width=2.0, facecolor=skeleton_color,
edgecolor=skeleton_color))
for i in range(int(num_kpt)):
if pv[i] == 0:
continue
pcolor = 'none'
if pv[i] == 1:
pcolor = 'red'
else:
pcolor = 'blue'
ax.add_patch(Circle((px[i], py[i]), radius=3, facecolor=pcolor,
edgecolor=skeleton_color, linewidth=2.0))
self.waiting = False
plt.axis('off')
plt.show()
def showBounds(self, mask, img):
"""
Dilate mask before passing it to showMask()
"""
img = self.getImgs(img)[0]
# dilate mask (creates new ndarray of bools)
mask = binary_dilation(mask, iterations=NUM_BOUNDARY_DILATION_ITERATIONS)
# show mask
self.showMask(mask, img)
def showOccl(self, occl, img):
"""
Show occlusion data
"""
img = self.getImgs(img)[0]
fig,ax = plt.subplots(1)
if img is not None and not self.waiting:
self.showImg(img, wait=True, ax=ax)
bounds = np.zeros(occl['imsize'])
for i in range(occl['ne']): # ne = "number of edges"
pixel_indices = occl['edges']['indices'][i]
num_pixels = len(pixel_indices)
pixel_coords = | np.unravel_index(pixel_indices, occl['imsize'], order='F') | numpy.unravel_index |
from typing import Sequence, Union
import numpy as np
import scipy.stats
from sklearn.metrics import classification_report, confusion_matrix
from .registry import registry
@registry.register_metric('mse')
def mean_squared_error(target: Sequence[float],
prediction: Sequence[float]) -> float:
target_array = np.asarray(target)
prediction_array = np.asarray(prediction)
return np.mean(np.square(target_array - prediction_array))
@registry.register_metric('mae')
def mean_absolute_error(target: Sequence[float],
prediction: Sequence[float]) -> float:
target_array = np.asarray(target)
prediction_array = np.asarray(prediction)
return np.mean( | np.abs(target_array - prediction_array) | numpy.abs |
import unittest
import dgl
import networkx as nx
from scipy import sparse
import numpy as np
from photonai_graph.GraphConversions import check_dgl
class DglToNetworkxTest(unittest.TestCase):
def setUp(self):
# create dgl graphs
dgl_graph = dgl.DGLGraph()
dgl_graph.add_nodes(3)
dgl_graph.add_edges([0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1])
self.dgl_graph_list = [dgl_graph] * 10
# create networkx graphs
nx_graph = nx.cycle_graph(5)
self.nx_graph_list = [nx_graph] * 10
# create scipy matrix
sparse_matrix = sparse.csr_matrix([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]])
self.sp_matrix_list = [sparse_matrix] * 10
# create numpy matrix
array = np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]])
m4d_array = array[np.newaxis, :, :, np.newaxis]
individuals_array = | np.repeat(m4d_array, 5, axis=0) | numpy.repeat |
import pytest
import numpy as np
import scipy as sp
import pandas as pd
import scdrs
from .test_method_score_cell_main import load_toy_data
def test_select_ctrl_geneset():
"""
Test scdrs.method._select_ctrl_geneset
"""
np.random.seed(0)
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
df_gene = pd.DataFrame(
index=adata.var_names[:100], columns=["gene", "categorical", "continuous"]
)
df_gene["gene"] = df_gene.index
df_gene["categorical"] = [1] * 50 + [2] * 50
df_gene["continuous"] = np.random.rand(100)
gene_list = list(df_gene.index[[0, 55, 27, 80, 2]])
gene_weight = [1.1, 2.5, 3.8, 4.1, 5.2]
for ctrl_match_key in ["categorical", "continuous"]:
dic_ctrl_list, dic_ctrl_weight = scdrs.method._select_ctrl_geneset(
df_gene, gene_list, gene_weight, ctrl_match_key, 1, 10, 0
)
ctrl_gene_list_sort = np.array(dic_ctrl_list[0])[ | np.argsort(dic_ctrl_weight[0]) | numpy.argsort |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_cop_marg_stresstest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_cop_marg_stresstest&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=sdoc-copula-stresstest).
# +
import numpy as np
from scipy.stats import lognorm, gamma
from scipy.linalg import toeplitz
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from arpym.statistics import simulate_t, quantile_sp, cop_marg_sep,\
cop_marg_comb
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_cop_marg_stresstest-parameters)
j_ = 10**4 # number of scenarios
n_ = 30 # dimension of the target X
mu_eps = np.zeros(n_) # location of epsilon
sigma2_eps = np.eye(n_) # dispersion of epsilon
nu_eps = 20 # dof of epsilon
k_ = 15 # dimension of the factors Z
mu_z = np.zeros(k_) # location of Z
sigma2_z = np.eye(k_) # dispersion of Z
nu_z = 5 # dof of Z
b1 = toeplitz(np.linspace(-0.9, 1.1, n_), np.linspace(-0.6, 1.2, k_))
b2 = toeplitz(np.linspace(-2, 0.5, n_), np.linspace(-0.7, 1, k_))
b = b1 + np.sin(b1@((b2.T@([email protected]))@b1))
mu_1 = 0.2 # lognormal location
sigma2_1 = 0.25 # lognormal scale parameter
k_grid = np.linspace(1, 10, (n_-1)) # Gamma degree of freedom
theta_grid = np.linspace(1, 20, (n_-1)) # Gamma scale parameter
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_cop_marg_stresstest-implementation-step00): Generate scenarios for target variable with equal probabilities
z = simulate_t(mu_z, sigma2_z, nu_z, j_)
eps = simulate_t(mu_eps, sigma2_eps, nu_eps, j_)
y = [email protected] + eps
p = np.ones(j_)/j_ # flat flexible probabilities
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_cop_marg_stresstest-implementation-step01): Separation step
u, y_sort, cdf_y = cop_marg_sep(y, p=p)
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_cop_marg_stresstest-implementation-step02): Marginal cdf's
# +
# lognormal marginal
cdf_x_l = lognorm.cdf(y_sort[:, 0], np.sqrt(sigma2_1), np.exp(mu_1))
cdf_x_g = np.zeros((j_, (n_-1)))
for n in range((n_-1)):
# Gamma marginals
cdf_x_g[:, n] = gamma.cdf(y_sort[:, n], k_grid[n], scale=theta_grid[n])
cdf_x = np.c_[cdf_x_l, cdf_x_g]
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_cop_marg_stresstest-implementation-step03): Combination step
x = cop_marg_comb(u, y_sort, cdf_x)
# ## Plots
# +
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'STIXGeneral:italic'
plt.rcParams['mathtext.bf'] = 'STIXGeneral:italic:bold'
plt.style.use('arpm')
# Colors
y_color = [153/255, 205/255, 129/255]
u_color = [60/255, 149/255, 145/255]
x_color = [4/255, 63/255, 114/255]
# Copula-marginal separation
# Figure specifications
plt.figure()
mydpi = 72.0
f = plt.figure(figsize=(1280.0/mydpi, 720.0/mydpi), dpi=mydpi)
gs0 = gridspec.GridSpec(2, 2)
xlim = [np.percentile(y[:, 0], 0.5), np.percentile(y[:, 0], 99.5)]
ylim = [np.percentile(y[:, 1], 0.5), np.percentile(y[:, 1], 99.5)]
u_lim = [0, 1]
# Marginal X1
gs00 = gridspec.GridSpecFromSubplotSpec(23, 20, subplot_spec=gs0[0])
ax1 = plt.Subplot(f, gs00[:-5, 4:-5], ylim=u_lim)
f.add_subplot(ax1)
ax1.tick_params(labelsize=14)
ax1.set_xlim([-20, 20])
plt.plot(y_sort[:, 0], cdf_y[:, 0], lw=2, color=y_color)
plt.title(r'Distribution of $Y_1$', fontsize=20, fontweight='bold', y=1.03)
# Copula scenarios
gs01 = gridspec.GridSpecFromSubplotSpec(46, 18, subplot_spec=gs0[1],
wspace=0, hspace=0.6)
ax2 = plt.Subplot(f, gs01[:-10, 3:-8], ylim=[0, 1], xlim=[0, 1])
f.add_subplot(ax2)
plt.scatter(u[:, 1], u[:, 0], s=5, color=u_color)
ax2.tick_params(labelsize=14)
plt.title(r'Copula $\mathbf{U}$', fontsize=20, fontweight='bold', y=1.03)
ax2_txt = ax2.text(0.1, 0.9 ,"",fontsize=20)
ax2_title_1 = r'$\mathrm{\mathbb{C}}$'+r'r'+r"$\{U_1,U_2\}=%2.2f$" % (np.corrcoef(u[:,:2].T)[0,1])
ax2_txt.set_text(ax2_title_1)
# Grade U1
ax3 = plt.Subplot(f, gs01[:-10, 1])
ax3.tick_params(labelsize=14)
f.add_subplot(ax3)
plt.xlim([0, 2])
plt.ylim([0, 1])
ax3.get_yaxis().set_visible(False)
plt.hist(np.sort(u[:, 0]), weights=p, bins=int(10*np.log(j_)), density=True,
color=u_color, orientation='horizontal')
plt.title('Grade $U_1$', fontsize=16, fontweight='bold', y=1.03)
# Grade U2
ax4 = plt.Subplot(f, gs01[41:46, 3:-8], sharex=ax2)
f.add_subplot(ax4)
ax4.tick_params(labelsize=14)
ax4.get_xaxis().set_visible(False)
plt.hist(np.sort(u[:, 1]), weights=p, bins=int(10*np.log(j_)),
density=True, color=u_color)
ax4.set_title('Grade $U_2$', fontsize=16, fontweight='bold', x=-0.27, y=0)
ax4.yaxis.tick_right()
plt.ylim([0, 2])
plt.xlim([0, 1])
# Joint scenarios
gs02 = gridspec.GridSpecFromSubplotSpec(24, 20, subplot_spec=gs0[2], wspace=0.2, hspace=0.5)
ax5 = plt.Subplot(f, gs02[7:, 4:-5])
f.add_subplot(ax5)
plt.scatter(y[:, 0], y[:, 1], s=5, color=y_color, label=r'$F_{X_{1}}(x)$')
ax5.set_xlim([-20, 20])
ax5.set_ylim([-8, 8])
ax5.tick_params(labelsize=14)
plt.xlabel('$Y_1$', fontsize=17)
plt.ylabel('$Y_2$', fontsize=17)
ax5_title = 'Joint'+r' $\mathbf{Y}=\mathbf{\beta}\mathbf{Z} + \mathbf{\varepsilon}$'
plt.title(ax5_title, fontsize=20, fontweight='bold', y=-0.3)
ax5_txt = ax5.text(-7, 6.5 ,"",fontsize=20)
ax5_title_1 = r'$\mathrm{\mathbb{C}}$'+r'r'+r"$\{Y_1,Y_2\}=%2.2f$" % (np.corrcoef(y[:,:2].T)[0,1])
ax5_txt.set_text(ax5_title_1)
# Histogram Y1
ax7 = plt.Subplot(f, gs02[0:5, 4:-5])
f.add_subplot(ax7)
plt.hist(y[:, 0], weights=p, bins=int(20*np.log(j_)), density=True, color=y_color)
ax7.tick_params(labelsize=14)
ax7.set_ylim([0, 0.45])
ax7.set_xlim([-20, 20])
ax7.get_xaxis().set_visible(False)
# Histogram Y2
ax8 = plt.Subplot(f, gs02[7:, -4:-1])
f.add_subplot(ax8)
plt.hist(y[:, 1], weights=p, bins=int(20*np.log(j_)), density=True,
orientation='horizontal', color=y_color)
ax8.tick_params(labelsize=14)
ax8.set_xlim([0, 0.4])
ax8.set_ylim([-8, 8])
ax8.get_yaxis().set_visible(False)
# Marginal Y2
gs03 = gridspec.GridSpecFromSubplotSpec(25, 18, subplot_spec=gs0[3])
ax6 = plt.Subplot(f, gs03[7:, 3:-8])
f.add_subplot(ax6)
plt.plot(cdf_y[:, 1], y_sort[:, 1], lw=2, color=y_color)
plt.title(r'Distribution of $Y_2$', fontsize=20, fontweight='bold', y=-0.3)
ax6.tick_params(labelsize=14)
ax6.set_ylim([-8, 8])
plt.xlim([0, 1])
add_logo(f, location=4, set_fig_size=False)
plt.tight_layout()
# Copula-marginal combination
plt.style.use('arpm')
y_lim = [np.percentile(x[:, 0], 0.5), np.percentile(x[:, 0], 99.5)]
x_lim = [np.percentile(x[:, 1], 0.5), np.percentile(x[:, 1], 99.5)]
u_lim = [0, 1]
plt.figure()
mydpi = 72.0
f = plt.figure(figsize=(1280.0/mydpi, 720.0/mydpi), dpi=mydpi)
gs0 = gridspec.GridSpec(2, 2)
# # Marginal X2
gs00 = gridspec.GridSpecFromSubplotSpec(44, 18, subplot_spec=gs0[0],
wspace=2, hspace=2.5)
ax1 = plt.Subplot(f, gs00[:-15, 4:-6], ylim=x_lim, xlim=[0, 1])
f.add_subplot(ax1)
plt.plot(np.sort(u[:, 1]), gamma.ppf( | np.sort(u[:, 1]) | numpy.sort |
from __future__ import print_function
from psychopy import event, sound, visual
import numpy as np
import string, math
from copy import deepcopy
import random, os
def drawResponses(responses,respStim,numCharsWanted,drawBlanks):
'''Draw the letters the user has entered
drawBlanks is whether to show empty spaces with _, that's why numCharsWanted would be needed
'''
respStr = ''.join(responses) #converts list of characters (responses) into string
#print 'responses=',responses,' respStr = ', respStr #debugOFF
if drawBlanks:
blanksNeeded = numCharsWanted - len(respStr)
respStr = respStr + '_'*blanksNeeded
respStim.setText(respStr,log=False)
respStim.draw();
def drawChoiceArrayAndCollectResponse(targetImage,lineupImages,lineupImageIndexes, clickSound,myMouse,myWin,imageSz,expStop):
event.clearEvents()
numInArray = len(lineupImages) + 1
targetPos = random.randint(0, numInArray-1)
lineupImages.insert(targetPos,targetImage)
lineupImageIndexes.insert(targetPos,'T')
xPosRange = 2; yPosRange = 2
coords = | np.array( [ [.5,.5],[-.5,.5],[-.5,-.5],[.5,-.5] ] ) | numpy.array |
from konlpy.tag import Mecab
from sklearn.preprocessing import normalize
from soynlp.hangle import decompose, character_is_korean
import numpy as np
class Word2Vec:
def __init__(self, vecs_txt_fname, vecs_bin_fname=None, method="word2vec", tokenizer_name="mecab"):
self.tokenizer = get_tokenizer(tokenizer_name)
self.tokenizer_name = tokenizer_name
self.method = method
self.dictionary, self.words, self.vecs = self.load_vectors(vecs_txt_fname, method)
self.dim = self.vecs.shape[1]
if "fasttext" in method:
from fasttext import load_model as load_ft_model
self.model = load_ft_model(vecs_bin_fname)
def get_word_vector(self, word):
if self.method == "fasttext-jamo":
word = jamo_sentence(word)
if self._is_in_vocabulary(word):
vector = self.dictionary[word]
else:
if "fasttext" in self.method:
vector = self.model.get_word_vector(word)
else:
vector = | np.zeros(self.dim) | numpy.zeros |
#CSTAT+ A GPU-accelerated spatial pattern analysis algorithm for high-resolution 2D/3D hydrologic connectivity using array vectorization and convolutional neural network
#Author: <NAME>, <NAME>
#Department of Earth, Atmospheric and Planetary Sciences, Purdue University, 550 Stadium Mall Dr, West Lafayette, IN 47907 USA.
#Email: <EMAIL>; Alternative: <EMAIL>
#This is the omnidirectional version: CSTAT+/OMNI
import os
from osgeo import gdal
import numpy as np
import copy as cp
from numpy import genfromtxt as gft
from scipy.ndimage.measurements import label
from itertools import combinations_with_replacement,product
from mxnet import nd,gpu
from timeit import default_timer as timer
import pandas as pd
#Binarize pattern
def prep(expe0,threshold,NoData):
#Provide threshold for High/Low, usually the depth of shallow sheetflow
expe1=cp.deepcopy(expe0)
expe2=cp.deepcopy(expe0)
expe1[(expe1>=threshold)]=1
expe1[(expe1<threshold)]=0
expe2[(expe2==NoData)]=-1
expe2[(expe2>0)]=0
connection_structure = np.array([[1,1,1],[1,1,1],[1,1,1]])
expela, num_features =label (expe1,structure=connection_structure)
expe3=expe2+expela
return (expe3)
def itercontrol(regions,k,bins,dibins,dibins4,binnum):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
bins=nd.array(bins,gpu(0))
dibins=nd.array(dibins,gpu(0))
dibins4=nd.array(dibins4,gpu(0))
if k==2:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
vout=distanceAA2(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
else:
vout=distanceAA1(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
return (co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy())
elif k==1:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
count0+=distance2(regions,i,binnum,bins)
else:
count0+=distance1(regions,i,binnum,bins)
return (count0.asnumpy())
else:
#Unpack the tuple
regions_high,regions_low=regions
#Create segment index for the input array to meet the memory requirement
imax_high=list(range(int(regions_high.shape[0]/broadcdp)+(regions_high.shape[0]%broadcdp!=0)))
imax_low=list(range(int(regions_low.shape[0]/broadcdp)+(regions_low.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(product(imax_high,imax_low))
for i in iterator:
count0+=distance11(regions_high,regions_low,i,binnum,bins)
return (count0.asnumpy())
def distanceAA1(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
def distanceAA2(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
# print ("a1",a1,"b1",b1)
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
#Full permutation distance computation
def distance1(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full permutation distance computation between different regions: high and low
def distance11(regions_high,regions_low,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions_high[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions_high.shape[0]),:]
b=regions_low[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions_low.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full combination distance computation
def distance2(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
def omni(taoh_W,mean_d,cardh_his,taoh_W4,mean_d4,binnum):
#Compute OMNI
OMNIW=np.zeros(binnum,dtype="float32")
OMNIW4=np.zeros((4,binnum),dtype="float32")
#Convert Nan to zero to avoid issues
taoh_W1=np.nan_to_num(taoh_W)
mean_d1=np.nan_to_num(mean_d)
taoh_W41=np.nan_to_num(taoh_W4)
mean_d41= | np.nan_to_num(mean_d4) | numpy.nan_to_num |
"""Preprocessing workflow definition."""
import os
import os.path as op
import numpy as np
import pandas as pd
import nibabel as nib
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import decomposition as decomp
from moss.mosaic import Mosaic
import seaborn as sns
from nipype.interfaces import io, fsl, freesurfer as fs
from nipype import Node, MapNode, Workflow, IdentityInterface
from nipype.interfaces.base import (BaseInterface,
BaseInterfaceInputSpec,
InputMultiPath, OutputMultiPath,
TraitedSpec, File, traits)
from nipype.workflows.fmri.fsl import create_susan_smooth
import lyman
from lyman.tools import (SingleInFile, SingleOutFile, ManyOutFiles,
SaveParameters, list_out_file)
def create_preprocessing_workflow(name="preproc", exp_info=None):
"""Return a Nipype workflow for fMRI preprocessing.
This mostly follows the preprocessing in FSL, although some
of the processing has been moved into pure Python.
Parameters
----------
name : string
workflow object name
exp_info : dict
dictionary with experimental information
"""
preproc = Workflow(name)
if exp_info is None:
exp_info = lyman.default_experiment_parameters()
# Define the inputs for the preprocessing workflow
in_fields = ["timeseries", "subject_id"]
if exp_info["whole_brain_template"]:
in_fields.append("whole_brain")
if exp_info["fieldmap_template"]:
in_fields.append("fieldmap")
inputnode = Node(IdentityInterface(in_fields), "inputs")
# Remove equilibrium frames and convert to float
prepare = MapNode(PrepTimeseries(), "in_file", "prep_timeseries")
prepare.inputs.frames_to_toss = exp_info["frames_to_toss"]
# Unwarp using fieldmap images
if exp_info["fieldmap_template"]:
unwarp = create_unwarp_workflow(fieldmap_pe=exp_info["fieldmap_pe"])
# Motion and slice time correct
realign = create_realignment_workflow(
temporal_interp=exp_info["temporal_interp"],
TR=exp_info["TR"],
slice_order=exp_info["slice_order"],
interleaved=exp_info["interleaved"])
# Estimate a registration from funtional to anatomical space
coregister = create_bbregister_workflow(
partial_brain=bool(exp_info["whole_brain_template"]),
init_with=exp_info["coreg_init"])
# Skullstrip the brain using the Freesurfer segmentation
skullstrip = create_skullstrip_workflow()
# Smooth intelligently in the volume
susan = create_susan_smooth()
susan.inputs.inputnode.fwhm = exp_info["smooth_fwhm"]
# Scale and filter the timeseries
filter_smooth = create_filtering_workflow("filter_smooth",
exp_info["hpf_cutoff"],
exp_info["TR"],
"smoothed_timeseries")
filter_rough = create_filtering_workflow("filter_rough",
exp_info["hpf_cutoff"],
exp_info["TR"],
"unsmoothed_timeseries")
# Automatically detect motion and intensity outliers
artifacts = MapNode(ArtifactDetection(),
["timeseries", "mask_file", "motion_file"],
"artifacts")
artifacts.inputs.intensity_thresh = exp_info["intensity_threshold"]
artifacts.inputs.motion_thresh = exp_info["motion_threshold"]
artifacts.inputs.spike_thresh = exp_info["spike_threshold"]
# Extract nuisance variables from anatomical sources
confounds = create_confound_extraction_workflow("confounds",
exp_info["wm_components"])
# Save the experiment info for this run
saveparams = MapNode(SaveParameters(exp_info=exp_info),
"in_file", "saveparams")
preproc.connect([
(inputnode, prepare,
[("timeseries", "in_file")]),
(realign, artifacts,
[("outputs.motion_file", "motion_file")]),
(realign, coregister,
[("outputs.timeseries", "inputs.timeseries")]),
(inputnode, coregister,
[("subject_id", "inputs.subject_id")]),
(realign, skullstrip,
[("outputs.timeseries", "inputs.timeseries")]),
(inputnode, skullstrip,
[("subject_id", "inputs.subject_id")]),
(coregister, skullstrip,
[("outputs.tkreg_mat", "inputs.reg_file")]),
(skullstrip, artifacts,
[("outputs.mask_file", "mask_file")]),
(skullstrip, susan,
[("outputs.mask_file", "inputnode.mask_file"),
("outputs.timeseries", "inputnode.in_files")]),
(susan, filter_smooth,
[("outputnode.smoothed_files", "inputs.timeseries")]),
(skullstrip, filter_smooth,
[("outputs.mask_file", "inputs.mask_file")]),
(skullstrip, filter_rough,
[("outputs.timeseries", "inputs.timeseries")]),
(skullstrip, filter_rough,
[("outputs.mask_file", "inputs.mask_file")]),
(filter_rough, artifacts,
[("outputs.timeseries", "timeseries")]),
(filter_rough, confounds,
[("outputs.timeseries", "inputs.timeseries")]),
(inputnode, confounds,
[("subject_id", "inputs.subject_id")]),
(skullstrip, confounds,
[("outputs.mask_file", "inputs.brain_mask")]),
(coregister, confounds,
[("outputs.tkreg_mat", "inputs.reg_file")]),
(inputnode, saveparams,
[("timeseries", "in_file")]),
])
# Optionally add a connection for unwarping
if bool(exp_info["fieldmap_template"]):
preproc.connect([
(inputnode, unwarp,
[("fieldmap", "inputs.fieldmap")]),
(prepare, unwarp,
[("out_file", "inputs.timeseries")]),
(unwarp, realign,
[("outputs.timeseries", "inputs.timeseries")])
])
else:
preproc.connect([
(prepare, realign,
[("out_file", "inputs.timeseries")]),
])
# Optionally connect the whole brain template
if bool(exp_info["whole_brain_template"]):
preproc.connect([
(inputnode, coregister,
[("whole_brain_template", "inputs.whole_brain_template")])
])
# Define the outputs of the top-level workflow
output_fields = ["smoothed_timeseries",
"unsmoothed_timeseries",
"example_func",
"mean_func",
"functional_mask",
"realign_report",
"mask_report",
"artifact_report",
"confound_file",
"flirt_affine",
"tkreg_affine",
"coreg_report",
"json_file"]
if bool(exp_info["fieldmap_template"]):
output_fields.append("unwarp_report")
outputnode = Node(IdentityInterface(output_fields), "outputs")
preproc.connect([
(realign, outputnode,
[("outputs.example_func", "example_func"),
("outputs.report", "realign_report")]),
(skullstrip, outputnode,
[("outputs.mask_file", "functional_mask"),
("outputs.report", "mask_report")]),
(artifacts, outputnode,
[("out_files", "artifact_report")]),
(coregister, outputnode,
[("outputs.tkreg_mat", "tkreg_affine"),
("outputs.flirt_mat", "flirt_affine"),
("outputs.report", "coreg_report")]),
(filter_smooth, outputnode,
[("outputs.timeseries", "smoothed_timeseries")]),
(filter_rough, outputnode,
[("outputs.timeseries", "unsmoothed_timeseries"),
("outputs.mean_file", "mean_func")]),
(confounds, outputnode,
[("outputs.confound_file", "confound_file")]),
(saveparams, outputnode,
[("json_file", "json_file")]),
])
if bool(exp_info["fieldmap_template"]):
preproc.connect([
(unwarp, outputnode,
[("outputs.report", "unwarp_report")]),
])
return preproc, inputnode, outputnode
# =========================================================================== #
def create_unwarp_workflow(name="unwarp", fieldmap_pe=("y", "y-")):
"""Unwarp functional timeseries using reverse phase-blipped images."""
inputnode = Node(IdentityInterface(["timeseries", "fieldmap"]), "inputs")
# Calculate the shift field
# Note that setting readout_times to 1 will give a fine
# map of the field, but the units will be off
# Since we don't write out the map of the field itself, it does
# not seem worth it to add another parameter for the readout times.
# (It does require that they are the same, but when wouldn't they be?)
topup = MapNode(fsl.TOPUP(encoding_direction=fieldmap_pe,
readout_times=[1] * len(fieldmap_pe)),
["in_file"], "topup")
# Unwarp the timeseries
applytopup = MapNode(fsl.ApplyTOPUP(method="jac", in_index=[1]),
["in_files",
"in_topup_fieldcoef",
"in_topup_movpar",
"encoding_file"],
"applytopup")
# Make a figure summarize the unwarping
report = MapNode(UnwarpReport(),
["orig_file", "corrected_file"], "unwarp_report")
# Define the outputs
outputnode = Node(IdentityInterface(["timeseries", "report"]), "outputs")
# Define and connect the workflow
unwarp = Workflow(name)
unwarp.connect([
(inputnode, topup,
[("fieldmap", "in_file")]),
(inputnode, applytopup,
[("timeseries", "in_files")]),
(topup, applytopup,
[("out_fieldcoef", "in_topup_fieldcoef"),
("out_movpar", "in_topup_movpar"),
("out_enc_file", "encoding_file")]),
(inputnode, report,
[("fieldmap", "orig_file")]),
(topup, report,
[("out_corrected", "corrected_file")]),
(applytopup, outputnode,
[("out_corrected", "timeseries")]),
(report, outputnode,
[("out_file", "report")]),
])
return unwarp
def create_realignment_workflow(name="realignment", temporal_interp=True,
TR=2, slice_order="up", interleaved=True):
"""Motion and slice-time correct the timeseries and summarize."""
inputnode = Node(IdentityInterface(["timeseries"]), "inputs")
# Get the middle volume of each run for motion correction
extractref = MapNode(ExtractRealignmentTarget(), "in_file", "extractref")
# Motion correct to middle volume of each run
mcflirt = MapNode(fsl.MCFLIRT(cost="normcorr",
interpolation="spline",
save_mats=True,
save_rms=True,
save_plots=True),
["in_file", "ref_file"],
"mcflirt")
# Optionally emoporally interpolate to correct for slice time differences
if temporal_interp:
slicetime = MapNode(fsl.SliceTimer(time_repetition=TR),
"in_file",
"slicetime")
if slice_order == "down":
slicetime.inputs.index_dir = True
elif slice_order != "up":
raise ValueError("slice_order must be 'up' or 'down'")
if interleaved:
slicetime.inputs.interleaved = True
# Generate a report on the motion correction
mcreport = MapNode(RealignmentReport(),
["target_file", "realign_params", "displace_params"],
"mcreport")
# Define the outputs
outputnode = Node(IdentityInterface(["timeseries",
"example_func",
"report",
"motion_file"]),
"outputs")
# Define and connect the sub workflow
realignment = Workflow(name)
realignment.connect([
(inputnode, extractref,
[("timeseries", "in_file")]),
(inputnode, mcflirt,
[("timeseries", "in_file")]),
(extractref, mcflirt,
[("out_file", "ref_file")]),
(extractref, mcreport,
[("out_file", "target_file")]),
(mcflirt, mcreport,
[("par_file", "realign_params"),
("rms_files", "displace_params")]),
(extractref, outputnode,
[("out_file", "example_func")]),
(mcreport, outputnode,
[("realign_report", "report"),
("motion_file", "motion_file")]),
])
if temporal_interp:
realignment.connect([
(mcflirt, slicetime,
[("out_file", "in_file")]),
(slicetime, outputnode,
[("slice_time_corrected_file", "timeseries")])
])
else:
realignment.connect([
(mcflirt, outputnode,
[("out_file", "timeseries")])
])
return realignment
def create_skullstrip_workflow(name="skullstrip"):
"""Remove non-brain voxels from the timeseries."""
# Define the workflow inputs
inputnode = Node(IdentityInterface(["subject_id",
"timeseries",
"reg_file"]),
"inputs")
# Mean the timeseries across the fourth dimension
origmean = MapNode(fsl.MeanImage(), "in_file", "origmean")
# Grab the Freesurfer aparc+aseg file as an anatomical brain mask
getaseg = Node(io.SelectFiles({"aseg": "{subject_id}/mri/aparc+aseg.mgz"},
base_directory=os.environ["SUBJECTS_DIR"]),
"getaseg")
# Threshold the aseg volume to get a boolean mask
makemask = Node(fs.Binarize(dilate=4, min=0.5), "makemask")
# Transform the brain mask into functional space
transform = MapNode(fs.ApplyVolTransform(inverse=True,
interp="nearest"),
["reg_file", "source_file"],
"transform")
# Convert the mask to nifti and rename
convertmask = MapNode(fs.MRIConvert(out_file="functional_mask.nii.gz"),
"in_file", "convertmask")
# Use the mask to skullstrip the timeseries
stripts = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripts")
# Use the mask to skullstrip the mean image
stripmean = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripmean")
# Generate images summarizing the skullstrip and resulting data
reportmask = MapNode(MaskReport(), ["mask_file", "orig_file", "mean_file"],
"reportmask")
# Define the workflow outputs
outputnode = Node(IdentityInterface(["timeseries",
"mean_file",
"mask_file",
"report"]),
"outputs")
# Define and connect the workflow
skullstrip = Workflow(name)
skullstrip.connect([
(inputnode, origmean,
[("timeseries", "in_file")]),
(inputnode, getaseg,
[("subject_id", "subject_id")]),
(origmean, transform,
[("out_file", "source_file")]),
(getaseg, makemask,
[("aseg", "in_file")]),
(makemask, transform,
[("binary_file", "target_file")]),
(inputnode, transform,
[("reg_file", "reg_file")]),
(transform, stripts,
[("transformed_file", "mask_file")]),
(transform, stripmean,
[("transformed_file", "mask_file")]),
(inputnode, stripts,
[("timeseries", "in_file")]),
(origmean, stripmean,
[("out_file", "in_file")]),
(stripmean, reportmask,
[("out_file", "mean_file")]),
(origmean, reportmask,
[("out_file", "orig_file")]),
(transform, reportmask,
[("transformed_file", "mask_file")]),
(transform, convertmask,
[("transformed_file", "in_file")]),
(stripts, outputnode,
[("out_file", "timeseries")]),
(stripmean, outputnode,
[("out_file", "mean_file")]),
(convertmask, outputnode,
[("out_file", "mask_file")]),
(reportmask, outputnode,
[("out_files", "report")]),
])
return skullstrip
def create_bbregister_workflow(name="bbregister",
contrast_type="t2",
partial_brain=False,
init_with="fsl"):
"""Find a linear transformation to align the EPI file with the anatomy."""
in_fields = ["subject_id", "timeseries"]
if partial_brain:
in_fields.append("whole_brain_template")
inputnode = Node(IdentityInterface(in_fields), "inputs")
# Take the mean over time to get a target volume
meanvol = MapNode(fsl.MeanImage(), "in_file", "meanvol")
# Do a rough skullstrip using BET
skullstrip = MapNode(fsl.BET(), "in_file", "bet")
# Estimate the registration to Freesurfer conformed space
func2anat = MapNode(fs.BBRegister(contrast_type=contrast_type,
init=init_with,
epi_mask=True,
registered_file=True,
out_reg_file="func2anat_tkreg.dat",
out_fsl_file="func2anat_flirt.mat"),
"source_file",
"func2anat")
# Make an image for quality control on the registration
report = MapNode(CoregReport(), "in_file", "coreg_report")
# Define the workflow outputs
outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]),
"outputs")
bbregister = Workflow(name=name)
# Connect the registration
bbregister.connect([
(inputnode, func2anat,
[("subject_id", "subject_id")]),
(inputnode, report,
[("subject_id", "subject_id")]),
(inputnode, meanvol,
[("timeseries", "in_file")]),
(meanvol, skullstrip,
[("out_file", "in_file")]),
(skullstrip, func2anat,
[("out_file", "source_file")]),
(func2anat, report,
[("registered_file", "in_file")]),
(func2anat, outputnode,
[("out_reg_file", "tkreg_mat")]),
(func2anat, outputnode,
[("out_fsl_file", "flirt_mat")]),
(report, outputnode,
[("out_file", "report")]),
])
# Possibly connect the full_fov image
if partial_brain:
bbregister.connect([
(inputnode, func2anat,
[("whole_brain_template", "intermediate_file")]),
])
return bbregister
def create_filtering_workflow(name="filter",
hpf_cutoff=128,
TR=2,
output_name="timeseries"):
"""Scale and high-pass filter the timeseries."""
inputnode = Node(IdentityInterface(["timeseries", "mask_file"]),
"inputs")
# Grand-median scale within the brain mask
scale = MapNode(ScaleTimeseries(statistic="median", target=10000),
["in_file", "mask_file"],
"scale")
# Gaussian running-line filter
hpf_sigma = (hpf_cutoff / 2.0) / TR
filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma),
"in_file",
"filter")
# Possibly replace the mean
# (In later versions of FSL, the highpass filter removes the
# mean component. Put it back, but be flexible so this isn't
# broken on older versions of FSL).
replacemean = MapNode(ReplaceMean(output_name=output_name),
["orig_file", "filtered_file"],
"replacemean")
# Compute a final mean functional volume
meanfunc = MapNode(fsl.MeanImage(out_file="mean_func.nii.gz"),
"in_file", "meanfunc")
outputnode = Node(IdentityInterface(["timeseries",
"mean_file"]), "outputs")
filtering = Workflow(name)
filtering.connect([
(inputnode, scale,
[("timeseries", "in_file"),
("mask_file", "mask_file")]),
(scale, filter,
[("out_file", "in_file")]),
(scale, replacemean,
[("out_file", "orig_file")]),
(filter, replacemean,
[("out_file", "filtered_file")]),
(replacemean, meanfunc,
[("out_file", "in_file")]),
(replacemean, outputnode,
[("out_file", "timeseries")]),
(meanfunc, outputnode,
[("out_file", "mean_file")]),
])
return filtering
def create_confound_extraction_workflow(name="confounds", wm_components=6):
"""Extract nuisance variables from anatomical sources."""
inputnode = Node(IdentityInterface(["timeseries",
"brain_mask",
"reg_file",
"subject_id"]),
"inputs")
# Find the subject's Freesurfer segmentation
# Grab the Freesurfer aparc+aseg file as an anatomical brain mask
getaseg = Node(io.SelectFiles({"aseg": "{subject_id}/mri/aseg.mgz"},
base_directory=os.environ["SUBJECTS_DIR"]),
"getaseg")
# Select and erode the white matter to get deep voxels
selectwm = Node(fs.Binarize(erode=3, wm=True), "selectwm")
# Transform the mask into functional space
transform = MapNode(fs.ApplyVolTransform(inverse=True,
interp="nearest"),
["reg_file", "source_file"],
"transform")
# Extract eigenvariates of the timeseries from WM and whole brain
extract = MapNode(ExtractConfounds(n_components=wm_components),
["timeseries", "brain_mask", "wm_mask"],
"extract")
outputnode = Node(IdentityInterface(["confound_file"]), "outputs")
confounds = Workflow(name)
confounds.connect([
(inputnode, getaseg,
[("subject_id", "subject_id")]),
(getaseg, selectwm,
[("aseg", "in_file")]),
(selectwm, transform,
[("binary_file", "target_file")]),
(inputnode, transform,
[("reg_file", "reg_file"),
("timeseries", "source_file")]),
(transform, extract,
[("transformed_file", "wm_mask")]),
(inputnode, extract,
[("timeseries", "timeseries"),
("brain_mask", "brain_mask")]),
(extract, outputnode,
[("out_file", "confound_file")]),
])
return confounds
# =========================================================================== #
class PrepTimeseriesInput(BaseInterfaceInputSpec):
in_file = File(exists=True)
frames_to_toss = traits.Int()
class PrepTimeseries(BaseInterface):
input_spec = PrepTimeseriesInput
output_spec = SingleOutFile
def _run_interface(self, runtime):
# Load the input timeseries
img = nib.load(self.inputs.in_file)
data = img.get_data()
aff = img.get_affine()
hdr = img.get_header()
# Trim off the equilibrium TRs
data = self.trim_timeseries(data)
# Save the output timeseries as float32
hdr.set_data_dtype(np.float32)
new_img = nib.Nifti1Image(data, aff, hdr)
new_img.to_filename("timeseries.nii.gz")
return runtime
def trim_timeseries(self, data):
"""Remove frames from beginning of timeseries."""
return data[..., self.inputs.frames_to_toss:]
_list_outputs = list_out_file("timeseries.nii.gz")
class UnwarpReportInput(BaseInterfaceInputSpec):
orig_file = File(exists=True)
corrected_file = File(exists=True)
class UnwarpReport(BaseInterface):
input_spec = UnwarpReportInput
output_spec = SingleOutFile
def _run_interface(self, runtime):
# Make a discrete colormap
cmap = mpl.colors.ListedColormap(["black", "#d65f5f", "white"])
# Initialize the figure
f, axes = plt.subplots(1, 2, figsize=(9, 2.75),
facecolor="black", edgecolor="black")
for ax, fname in zip(axes, [self.inputs.orig_file,
self.inputs.corrected_file]):
# Combine the frames from this image and plot
img = nib.load(fname)
ax.imshow(self.combine_frames(img), cmap=cmap, vmin=0, vmax=2)
ax.set_axis_off()
# Save the figure and close
f.subplots_adjust(0, 0, 1, 1, 0, 0)
f.savefig("unwarping.png", facecolor="black", edgecolor="black")
plt.close(f)
return runtime
def combine_frames(self, img):
# Find a value to loosely segment the brain
d = img.get_data()
counts, bins = np.histogram(d[d > 0], 50)
thresh = bins[np.diff(counts) > 0][0]
# Show the middle slice
middle = d.shape[0] // 2
# Combine a binary mask for each phase direction
a = np.rot90(d[middle, ..., 0] > thresh)
b = np.rot90(d[middle, ..., 1] > thresh)
# Make an image showing overlap and divergence
c = np.zeros_like(a, int)
c[a ^ b] = 1
c[a & b] = 2
return c
_list_outputs = list_out_file("unwarping.png")
class ExtractRealignmentTarget(BaseInterface):
input_spec = SingleInFile
output_spec = SingleOutFile
def _run_interface(self, runtime):
# Load the input timeseries
img = nib.load(self.inputs.in_file)
# Extract the target volume
targ = self.extract_target(img)
# Save a new 3D image
targ_img = nib.Nifti1Image(targ,
img.get_affine(),
img.get_header())
targ_img.to_filename("example_func.nii.gz")
return runtime
def extract_target(self, img):
"""Return a 3D array with data from the middle TR."""
middle_vol = img.shape[-1] // 2
targ = np.empty(img.shape[:-1])
targ[:] = img.dataobj[..., middle_vol]
return targ
_list_outputs = list_out_file("example_func.nii.gz")
class RealignmentReportInput(BaseInterfaceInputSpec):
target_file = File(exists=True)
realign_params = File(exists=True)
displace_params = InputMultiPath(File(exists=True))
class RealignmentReportOutput(TraitedSpec):
realign_report = OutputMultiPath(File(exists=True))
motion_file = File(exists=True)
class RealignmentReport(BaseInterface):
input_spec = RealignmentReportInput
output_spec = RealignmentReportOutput
def _run_interface(self, runtime):
self.out_files = []
# Load the realignment parameters
rot = ["rot_" + dim for dim in ["x", "y", "z"]]
trans = ["trans_" + dim for dim in ["x", "y", "z"]]
df = pd.DataFrame(np.loadtxt(self.inputs.realign_params),
columns=rot + trans)
# Load the RMS displacement parameters
abs, rel = self.inputs.displace_params
df["displace_abs"] = np.loadtxt(abs)
df["displace_rel"] = pd.Series(np.loadtxt(rel), index=df.index[1:])
df.loc[0, "displace_rel"] = 0
# Write the motion file to csv
self.motion_file = op.abspath("realignment_params.csv")
df.to_csv(self.motion_file)
# Plot the motion timeseries
f = self.plot_motion(df)
self.plot_file = op.abspath("realignment_plots.png")
f.savefig(self.plot_file, dpi=100)
plt.close(f)
# Plot the target image
m = self.plot_target()
self.target_file = op.abspath("example_func.png")
m.savefig(self.target_file)
m.close()
return runtime
def plot_motion(self, df):
"""Plot the timecourses of realignment parameters."""
with sns.axes_style("whitegrid"):
fig, axes = plt.subplots(3, 1, figsize=(9, 5), sharex=True)
# Trim off all but the axis name
def axis(s):
return s[-1]
# Plot rotations
pal = sns.color_palette("Reds_d", 3)
rot_df = np.rad2deg(df.filter(like="rot")).rename(columns=axis)
rot_df.plot(ax=axes[0], color=pal, lw=1.5)
# Plot translations
pal = sns.color_palette("Blues_d", 3)
trans_df = df.filter(like="trans").rename(columns=axis)
trans_df.plot(ax=axes[1], color=pal, lw=1.5)
# Plot displacement
def ref(s):
return s[-3:]
pal = sns.color_palette("Greens_d", 2)
disp_df = df.filter(like="displace").rename(columns=ref)
disp_df.plot(ax=axes[2], color=pal, lw=1.5)
# Label the graphs
axes[0].set_xlim(0, len(df) - 1)
axes[0].axhline(0, c=".4", ls="--", zorder=1)
axes[1].axhline(0, c=".4", ls="--", zorder=1)
for ax in axes:
ax.legend(frameon=True, ncol=3, loc="best")
ax.legend_.get_frame().set_color("white")
axes[0].set_ylabel("Rotations (degrees)")
axes[1].set_ylabel("Translations (mm)")
axes[2].set_ylabel("Displacement (mm)")
fig.tight_layout()
return fig
def plot_target(self):
"""Plot a mosaic of the motion correction target image."""
m = Mosaic(self.inputs.target_file, step=1)
return m
def _list_outputs(self):
outputs = self._outputs().get()
outputs["realign_report"] = [self.target_file,
self.motion_file,
self.plot_file]
outputs["motion_file"] = self.motion_file
return outputs
class CoregReportInput(BaseInterfaceInputSpec):
in_file = File(exists=True)
subject_id = traits.Str()
class CoregReport(BaseInterface):
input_spec = CoregReportInput
output_spec = SingleOutFile
def _run_interface(self, runtime):
subjects_dir = os.environ["SUBJECTS_DIR"]
wm_file = op.join(subjects_dir, self.inputs.subject_id, "mri/wm.mgz")
wm_data = nib.load(wm_file).get_data().astype(bool).astype(int)
m = Mosaic(self.inputs.in_file, wm_data, step=3)
m.plot_contours(["#DD2222"])
m.savefig("func2anat.png")
m.close()
return runtime
_list_outputs = list_out_file("func2anat.png")
class MaskReportInput(BaseInterfaceInputSpec):
mask_file = File(exists=True)
orig_file = File(exists=True)
mean_file = File(exsits=True)
class MaskReport(BaseInterface):
input_spec = MaskReportInput
output_spec = ManyOutFiles
def _run_interface(self, runtime):
self.out_files = []
self.plot_mean_image()
self.plot_mask_image()
return runtime
def plot_mean_image(self):
cmap = sns.cubehelix_palette(as_cmap=True, reverse=True,
light=1, dark=0)
m = Mosaic(self.inputs.mean_file, self.inputs.mean_file,
self.inputs.mask_file, step=1)
m.plot_overlay(vmin=0, cmap=cmap, fmt="%d")
m.savefig("mean_func.png")
m.close()
def plot_mask_image(self):
m = Mosaic(self.inputs.orig_file, self.inputs.mask_file,
self.inputs.mask_file, show_mask=False, step=1)
m.plot_mask()
m.savefig("functional_mask.png")
m.close()
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_files"] = [op.abspath("mean_func.png"),
op.abspath("functional_mask.png")]
return outputs
class ArtifactDetectionInput(BaseInterfaceInputSpec):
timeseries = File(exists=True)
mask_file = File(exists=True)
motion_file = File(exists=True)
intensity_thresh = traits.Float()
motion_thresh = traits.Float()
spike_thresh = traits.Either(traits.Float(), None)
class ArtifactDetection(BaseInterface):
input_spec = ArtifactDetectionInput
output_spec = ManyOutFiles
def _run_interface(self, runtime):
# Load the timeseries and mask files
ts = nib.load(self.inputs.timeseries).get_data()
mask = nib.load(self.inputs.mask_file).get_data().astype(bool)
# Normalize the timeseries using robust statistics
norm_ts = self.normalize_timeseries(ts, mask)
# Find the intensity artifacts
art_intensity = | np.abs(norm_ts) | numpy.abs |
import pandas as pd
import os
import portalocker
import contextlib
import yaml
import subprocess
from gnn_acopf.experimental.opf_dataset import OPFDataset
from gnn_acopf.training.training_run import QualityMetric
from gnn_acopf.utils.timer import Timer
from pathlib import Path
import copy
from gnn_acopf.utils.observers import DefaultObserver, Scaler
from gnn_acopf.utils.power_net import PowerNetwork
from gnn_acopf.models.summarize_encoding_model import SummarizedEncodingModel
from gnn_acopf.julia_interface import JuliaInterface
import torch
import numpy as np
class EvaluateOPF:
def __init__(self, model, results_path):
self.model = model
self.results_path = results_path
self.results_fp = results_path / "results.csv"
self.run_fp = results_path / "runs.yaml"
self.scenarios = None
with self.synced_results():
pass
@contextlib.contextmanager
def synced_runs(self):
with portalocker.Lock(self.results_path / ".runs.lock", timeout=120) as lockfile:
lockfile.flush()
os.fsync(lockfile.fileno())
try:
with self.run_fp.open("r") as run_file:
exp_states = yaml.load(run_file, Loader=yaml.FullLoader)
except FileNotFoundError:
exp_states = {}
yield exp_states
with self.run_fp.open("w") as run_file:
yaml.dump(exp_states, run_file)
lockfile.flush()
os.fsync(lockfile.fileno())
def get_slurm_id(self):
# returns either the slurm ID or "running" if no slurm ID can be found.
try:
slurm_id = os.environ["SLURM_JOB_ID"]
except KeyError:
# no slurm available
slurm_id = "running"
return slurm_id
def is_running(self, exp_state):
try:
slurm_id = os.environ["SLURM_JOB_ID"]
except KeyError:
slurm_id = None
if exp_state in [None, "stopped", slurm_id]:
return False
try:
result = subprocess.check_output("squeue -hO jobid:15", shell=True,
stderr=subprocess.DEVNULL).decode("utf-8").strip()
result = result.split("\n")
result = [int(line.strip()) for line in result]
return exp_state in result
except subprocess.CalledProcessError:
return True
@contextlib.contextmanager
def synced_results(self):
with portalocker.Lock(self.results_path / ".results.lock", timeout=120) as lockfile:
lockfile.flush()
os.fsync(lockfile.fileno())
try:
self.results = pd.read_csv(self.results_fp)
except (pd.errors.EmptyDataError, FileNotFoundError):
self.results = pd.DataFrame(columns=[
"scenario_id",
"opf_method",
"time_taken",
"solved",
"power_generated",
"power_loss"
])
yield
self.results.to_csv(self.results_fp, index=False)
lockfile.flush()
os.fsync(lockfile.fileno())
def eval_method(self, eval_func, case_dict, jl, data, observer):
with Timer() as optimization_timer:
solution = eval_func(case_dict, jl, data=data, observer=observer)
ac_pf_result, _ = jl.run_pf(case_dict, method="ac",
previous_result=solution, print_level=0,
max_iter=1)
solved = "SOLVED" in ac_pf_result["termination_status"]
power_demand = sum([g["pd"] for g in case_dict["load"].values() if g["pd"] is not None])
power_generated = sum([g["pg"] for g in ac_pf_result["solution"]["gen"].values() if g["pg"] is not None])
power_loss = power_generated / power_demand
return {
"time_taken": optimization_timer.interval,
"solved": solved,
"power_generated": power_generated,
"power_loss": power_loss
}
def set_run_state(self, exp_name, state):
with self.synced_runs() as exp_states:
exp_states[exp_name] = state
def eval_ac_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="ac", print_level=print_level)
return ac_opf_result
def eval_dc_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="dc", print_level=print_level)
return ac_opf_result
def eval_dcac_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="dcac", print_level=print_level)
return ac_opf_result
def eval_model_and_ac_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=model_output_dict, print_level=print_level)
return ac_opf_result
def eval_model_and_ac_opf_nobranch(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict, keys_to_consider=["bus", "gen"]
)
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=model_output_dict, print_level=print_level)
return ac_opf_result
def eval_model_pf_ac_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
pf_result, _ = jl.run_pf(case_dict, method="ac", previous_result=model_output_dict,
print_level=print_level)
# TODO: Maybe need to combine it.
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=pf_result, print_level=print_level)
return ac_opf_result
def eval_model_pf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
pf_result, _ = jl.run_pf(case_dict, method="ac", previous_result=model_output_dict,
print_level=print_level)
return pf_result
def eval_model(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
return output_dict
def eval_model_feasibility_check_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
ac_pf_result, _ = jl.run_pf(case_dict, method="ac",
previous_result=output_dict, print_level=0)
solved = "SOLVED" in ac_pf_result["termination_status"]
if not solved:
output_dict, _ = jl.run_opf(case_dict, method="ac",
previous_result=output_dict, print_level=print_level)
return output_dict
def statistical_summary(self, filter_by_solved):
group_by = ["opf_method"]
results = self.results
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std"] for c in list(self.results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(self, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 200):
pretty_statistic_string = str(self.statistical_summary(filter_by_solved))
return pretty_statistic_string
def pprint_results(self, results_by_method, n_evaluated):
for method, results in results_by_method.items():
print(method)
n_solved = len([r for r in results["solved"] if r])
print(f"\tSolved: {n_solved}")
print(f"\tTime (solved): {np.mean([results['time_taken'][i] for i in range(n_evaluated) if results['solved'][i]])}")
print(f"\tTime (all): {np.mean(results['time_taken'])}")
print(f"\tCost (solved): {np.mean([results['cost'][i] for i in range(n_evaluated) if results['solved'][i]])}")
print(f"\tPower Gen (solved): {np.mean([results['power_generated'][i] for i in range(n_evaluated) if results['solved'][i]])}")
def claim_scenario_idx(self, scenario_id):
with self.synced_runs() as exp_states:
scen_state = exp_states.get(scenario_id, None)
if not self.is_running(scen_state):
exp_states[scenario_id] = self.get_slurm_id()
return True
return False
def eval_opf(self, dataloader, observer, device, print_level=0):
jl = JuliaInterface()
self.model.eval()
self.model.to(device)
methods = {
"ac_opf": self.eval_ac_opf,
"dcac_opf": self.eval_dcac_opf,
"dc_opf": self.eval_dc_opf,
"model_ac_opf": self.eval_model_and_ac_opf,
"model": self.eval_model,
# "model_feasibility_acopf": self.eval_model_feasibility_check_opf
# "model_pf_ac_opf": self.eval_model_pf_ac_opf,
# "model_ac_opf_nobranch": self.eval_model_and_ac_opf_nobranch,
# "model_pf": self.eval_model_pf
}
results_by_method = {m: {"solved": [], "time_taken": [], "cost": []} for m in methods}
n_evaluated = 0
with torch.no_grad():
for i, data in enumerate(dataloader):
scenario_idx = data["scenario_idx"].item()
if not self.claim_scenario_idx(scenario_idx):
continue
scenario_df = pd.DataFrame()
data.to(device)
n_evaluated += 1
base_case_dict = dataloader.dataset.get_orig_data(data.scenario_idx.item())
for m, eval_m in methods.items():
case_dict = copy.deepcopy(base_case_dict)
result_dict = self.eval_method(
eval_m, case_dict, jl, data=data, observer=observer
)
result_dict["scenario_id"] = scenario_idx
result_dict["opf_method"] = m
result_dict["solved"] = float(result_dict["solved"])
single_results_df = pd.DataFrame.from_dict({k: [v] for k, v in result_dict.items()})
scenario_df = pd.concat([scenario_df, single_results_df], ignore_index=True, axis=0, sort=False)
with self.synced_results():
self.results = pd.concat([self.results, scenario_df], ignore_index=True, axis=0, sort=False)
print(f"Finished {self.results.scenario_id.nunique()} scenarios")
print("CURSTATE: ALL SCENARIOS")
print(self.pretty_statistics(filter_by_solved=False))
print("CURSTATE: SOLVED SCENARIOS")
print(self.pretty_statistics(filter_by_solved=True))
with self.synced_runs() as exp_states:
exp_states[scenario_idx] = "finished"
print("FINALSTATE: ALL SCENARIOS")
print(self.pretty_statistics(filter_by_solved=False))
print("FINALSTATE: SOLVED SCENARIOS")
print(self.pretty_statistics(filter_by_solved=True))
def main(casename, area_name, pgmin_to_zero, cluster, scaler, model_folder):
device = "cpu"
from gnn_acopf.experimental.train_models import OPFTrainAndEval
if cluster:
datapath = Path("/experiment/data")
trained_models_path = Path("/experiment/trained_models")
model_results_path = trained_models_path / model_folder / "results"
model_checkpoint_path = trained_models_path / model_folder / "checkpoints"
results_path = Path("/experiment/results")
# checkpoint_path = Path("/experiment/checkpoints")
print_level = 0
else:
datapath = Path("../../data")
results_path = Path("../../experiment")
model_results_path = results_path
model_checkpoint_path = Path("../../experiment")
print_level = 5
pn = PowerNetwork.from_pickle(datapath / f"case_{casename}.pickle", area_name=area_name,
pgmin_to_zero=pgmin_to_zero)
pn.load_scenarios_file(datapath / f"scenarios_{casename}.m")
obs = DefaultObserver(
jl=JuliaInterface(),
solution_cache_dir=datapath / f"generated_solutions/case_{casename}_solutions",
# solution_cache_dir=Path("/tmp/power_results"),
area_name=area_name,
scaler=scaler
)
dataset = OPFDataset(pn, obs)
batchnorm = True
quality_metric = QualityMetric(name="mse", compare_func="min")
# model = SimpleModel(n_features=obs.n_node_features, n_targets=obs.n_node_targets,
# n_hiddens=32, n_targets_edge=obs.n_branch_targets,
# n_edge_features=obs.n_branch_features,
# n_layers=8, batchnorm=batchnorm, residuals=True)
model = SummarizedEncodingModel(n_features=obs.n_node_features, n_targets=obs.n_node_targets,
n_hiddens=48, n_targets_edge=obs.n_branch_targets,
n_edge_features=obs.n_branch_features,
n_layers=8, batchnorm=batchnorm, residuals=True)
train_and_eval = OPFTrainAndEval(model, dataset, model_results_path, model_checkpoint_path, quality_metric,
# datasetloader=PowerDataSetLoader
)
train_and_eval.load_best_model()
data_loaders = train_and_eval.datasetloader.from_dataset(
train_and_eval.dataset,
batch_size=1,
shuffle=False,
num_workers=0
)
eval_opf = EvaluateOPF(model, results_path)
eval_opf.eval_opf(data_loaders.test, device=device, observer=obs, print_level=print_level)
def with_parsed_args():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--run_local", action="store_true")
parser.add_argument("-d", "--dataset", required=True)
parser.add_argument("--model_to_use", default=None)
args = parser.parse_args()
scaler_200 = Scaler(scales={
"bus": (np.array([13.8, 0.85, 1.05], dtype=np.float32), np.array([230, 0.95, 1.15], dtype=np.float32)),
"load": (np.array([0, 0], dtype=np.float32), np.array([0.76, 0.22], dtype=np.float32)),
"gen": (np.array([-0.55, 0, 0, 0, 0, 0], dtype=np.float32), np.array([0, 0.13, 1.1, 0.28, 0.09, 33.5], dtype=np.float32)),
"shunt": ( | np.array([0.3], dtype=np.float32) | numpy.array |
import itertools
import numpy as np
from PartSegCore.segmentation.border_smoothing import IterativeVoteSmoothing, OpeningSmoothing, VoteSmoothing
from PartSegCore.segmentation.watershed import NeighType
class TestVoteSmoothing:
def test_cube_sides(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 3})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 4})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 5})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 6})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_edges(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 6})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 7})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 9})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 10})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 13})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 14})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_vertex(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 7})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 8})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 11})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 12})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 17})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 18})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_square_sides(self):
data = np.zeros((1, 50, 50), dtype=np.uint8)
data[:, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 2})
assert | np.all(res == data) | numpy.all |
"""
k means on the sphere
Adapted from this stack overflow answer
http://stackoverflow.com/questions/5529625/is-it-possible-to-specify-your-own-distance-function-using-scikit-learn-k-means
"""
from __future__ import print_function
from __future__ import division
import random
import numpy
_TOL_DEF=1.0e-5
_MAXITER_DEF=100
_VERBOSE_DEF=1
class KMeans(object):
"""
A class to perform K-means on the input ra,dec using spherical distances
parameters
----------
centers_guess: array
[ncen, ra, dec] starting guesses. Can reset later with set_centers()
tol: float, optional
The relative change in the average distance to
centers, signifies convergence
verbose: int, optional
How verbose. 0 silent, 1 minimal starting info, 2 prints running
distances
attributes after running
------------------------
.converged: bool
True if converged
.centers: array
the found centers
.labels: array
[N,ra,dec] array
.distances: array
Distance from each point to each center
.X: array
The data that was processed
example
-------
import kmeans_radec
from kmeans_radec import KMeans
cen_guess=numpy.zeros( (ncen, 2) )
cen_guess[:,0] = ra_guesses
cen_guess[:,1] = dec_guesses
km=KMeans(cen_guess)
km.run(X, maxiter=100)
# did it converge?
if not km.converged:
# did not converge. This might be ok, but if we want
# to run more we can
km.run(X, maxiter=maxiter)
# or we could try a different set of center guesses...
km.set_centers(cen_guess2)
km.run(X, maxiter=100)
# results are saved in attributes
print(km.centers, km.labels, km.distances)
print("copy of centers:",km.get_centers())
# once we have our centers, we can identify to which cluster
# a *different* set of points belong. This could be a set
# of random points we want to associate with the same regions
labels=km.find_nearest(X2)
# you can save the centers and load them into a KMeans
# object at a later time
km=KMeans(centers)
labels=km.find_nearest(X)
"""
def __init__(self, centers,
tol=_TOL_DEF,
verbose=_VERBOSE_DEF):
self.set_centers(centers)
self.tol=float(tol)
self.verbose=verbose
def run(self, X, maxiter=_MAXITER_DEF):
"""
run k means, either until convergence is reached or the indicated
number of iterations are performed
parameters
----------
X: array
[N, ra, dec] array
maxiter: int, optional
Max number of iterations to run.
"""
centers=self.get_centers()
_check_dims(X, self.centers)
N, dim = X.shape
ncen, cdim = centers.shape
if self.verbose:
tup=(X.shape, centers.shape, self.tol, maxiter)
print("X %s centers %s tol=%.2g maxiter=%d" % tup)
self.converged=False
allx = numpy.arange(N)
prevdist = 0
for jiter in xrange( 1, maxiter+1 ):
D = cdist_radec(X, centers) # npoints x ncenters
labels = D.argmin(axis=1) # X -> nearest centre
distances = D[allx,labels]
avdist = distances.mean() # median ?
if self.verbose >= 2:
print(" av |X - nearest centre| = %.4g" % avdist)
self.converged = (1 - self.tol) * prevdist <= avdist <= prevdist
if self.converged:
break
if jiter==maxiter:
break
prevdist = avdist
for jc in range(ncen): # (1 pass in C)
c, = numpy.where( labels == jc )
if len(c) > 0:
centers[jc] = X[c].mean( axis=0 )
if self.verbose:
print(jiter,"iterations cluster "
"sizes:", numpy.bincount(labels))
if self.verbose >= 2:
self._print_info()
self.X=X
self.centers=centers
self.labels=labels
self.distances=distances
def set_centers(self, centers):
"""
set starting centers
parameters
----------
centers: array
[Ncen] array of centers
"""
centers= | numpy.asanyarray(centers) | numpy.asanyarray |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assert_index_equal(bf.columns, other.columns)
self.assert_index_equal(bf.index, other.index)
self.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assert_index_equal(bf.columns, self.frame.columns)
self.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame( | np.arange(2, dtype='int64') | numpy.arange |
import os, glob
import numpy as np
from scipy.stats import pearsonr
sub_list = []
sub_list_file = f'{os.environ.get("FD_DIR")}/sublist.txt'
with open(sub_list_file) as f:
lines = f.readlines()
for line in lines:
sub_list.append(line.strip('\n').replace('_', '-'))
low_motion_sub_list = []
low_motion_sub_list_file = f'{os.environ.get("FD_DIR")}/good_30.txt'
with open(low_motion_sub_list_file) as f:
lines = f.readlines()
for line in lines:
low_motion_sub_list.append(line.strip('\n').replace('_', '-'))
high_motion_sub_list = []
for sub in sub_list:
if sub not in low_motion_sub_list:
high_motion_sub_list.append(sub)
print('subject list length: ' + str(len(sub_list)))
print('low motion subject list length: ' + str(len(low_motion_sub_list)))
print('high motion subject list length: ' + str(len(high_motion_sub_list)))
def get_fd_corr(sub_list, type, name):
run_path = f'{os.environ.get("PH_SERVER_ROOT")}/sgia/XinMotion/runs/'
afni = 'v181-MotionVariation'
afni_last_folder = 'v181-MotionVariation-last'
fsl = 'v181-MotionVariation-MC'
fsl_last_folder = 'v181-MotionVariation-last-MC'
afni_mean, afni_median, afni_first, afni_last, fsl_mean, fsl_median, fsl_first, fsl_last = [], [], [], [], [], [], [], []
for sub in sub_list:
print(sub)
# mean volume 1
afni_mean_file = glob.glob(run_path+afni+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-1_framewise-displacement-'+type+'.1D')[0]
afni_mean.append(np.loadtxt(afni_mean_file))
fsl_mean_file = glob.glob(run_path+fsl+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-1_framewise-displacement-'+type+'.1D')[0]
fsl_mean.append(np.loadtxt(fsl_mean_file))
# median volume 2
afni_median_file = glob.glob(run_path+afni+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-2_framewise-displacement-'+type+'.1D')[0]
afni_median.append(np.loadtxt(afni_median_file))
fsl_median_file = glob.glob(run_path+fsl+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-2_framewise-displacement-'+type+'.1D')[0]
fsl_median.append(np.loadtxt(fsl_median_file))
# first volume 3
afni_first_file = glob.glob(run_path+afni+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-3_framewise-displacement-'+type+'.1D')[0]
afni_first.append(np.loadtxt(afni_first_file))
fsl_first_file = glob.glob(run_path+fsl+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_desc-3_framewise-displacement-'+type+'.1D')[0]
fsl_first.append(np.loadtxt(fsl_first_file))
# last volume
afni_last_file = glob.glob(run_path+afni_last_folder+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_framewise-displacement-'+type+'.1D')[0]
afni_last.append(np.loadtxt(afni_last_file))
fsl_last_file = glob.glob(run_path+fsl_last_folder+'/output/cpac_cpac-default-pipeline/'+sub+'_ses-1/func/'+sub+'_ses-1_task-movieTP_framewise-displacement-'+type+'.1D')[0]
fsl_last.append(np.loadtxt(fsl_last_file))
print('AFNI mean')
print( np.mean(afni_mean), np.std(afni_mean) )
print('FSL mean')
print( np.mean(fsl_mean), np.std(fsl_mean) )
print('AFNI median')
print( np.mean(afni_median), np.std(afni_median) )
print('FSL median')
print( np.mean(fsl_median), np.std(fsl_median) )
print('AFNI first')
print( np.mean(afni_first), np.std(afni_first) )
print('FSL first')
print( np.mean(fsl_first), np.std(fsl_first) )
print('AFNI last')
print( np.mean(afni_last), np.std(afni_last) )
print('FSL last')
print( | np.mean(fsl_last) | numpy.mean |
"""
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import itertools
import math
import warnings
from collections import namedtuple
import numpy as np
from ..adapters import Adapter
from ..config import ConfigValidator, NumberField, StringField, ListField, ConfigError
from ..postprocessor.nms import NMS
from ..representation import DetectionPrediction
FaceDetectionLayerOutput = namedtuple('FaceDetectionLayerOutput', [
'prob_name',
'reg_name',
'anchor_index',
'anchor_size',
'win_scale',
'win_length',
'win_trans_x',
'win_trans_y'
])
class TFObjectDetectionAPIAdapter(Adapter):
"""
Class for converting output of SSD model to DetectionPrediction representation
"""
__provider__ = 'tf_object_detection'
def validate_config(self):
super().validate_config(on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'classes_out': StringField(description="Classes output layer name."),
'boxes_out': StringField(description="Boxes output layer name."),
'scores_out': StringField(description="Scores output layer name."),
'num_detections_out': StringField(description="Number of detections output layer name.")
})
return parameters
def configure(self):
self.classes_out = self.get_value_from_config('classes_out')
self.boxes_out = self.get_value_from_config('boxes_out')
self.scores_out = self.get_value_from_config('scores_out')
self.num_detections_out = self.get_value_from_config('num_detections_out')
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of DetectionPrediction objects
"""
prediction_batch = self._extract_predictions(raw, frame_meta)
classes_batch = prediction_batch[self.classes_out]
scores_batch = prediction_batch[self.scores_out]
boxes_batch = prediction_batch[self.boxes_out]
num_detections_batch = prediction_batch[self.num_detections_out].astype(int)
result = []
for identifier, classes, scores, boxes, num_detections in zip(
identifiers, classes_batch, scores_batch, boxes_batch, num_detections_batch
):
valid_classes = classes[:num_detections]
valid_scores = scores[:num_detections]
valid_boxes = boxes[:num_detections]
y_mins, x_mins, y_maxs, x_maxs = valid_boxes.T
result.append(DetectionPrediction(identifier, valid_classes, valid_scores, x_mins, y_mins, x_maxs, y_maxs))
return result
class MTCNNPAdapter(Adapter):
__provider__ = 'mtcnn_p'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update(
{
'probability_out': StringField(description='Name of Output layer with detection boxes probabilities'),
'region_out': StringField(description='Name of output layer with detected regions'),
'regions_format': StringField(
optional=True, choices=['hw', 'wh'], default='wh',
description='determination of coordinates order in regions, wh uses order x1y1x2y2, hw - y1x1y2x2'
)
}
)
return parameters
def configure(self):
self.probability_out = self.get_value_from_config('probability_out')
self.region_out = self.get_value_from_config('region_out')
self.regions_format = self.get_value_from_config('regions_format')
@staticmethod
def nms(boxes, threshold, overlap_type):
"""
Args:
boxes: [:,0:5]
threshold: 0.5 like
overlap_type: 'Min' or 'Union'
Returns:
indexes of passed boxes
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = boxes[:, 4]
area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
inds = np.array(scores.argsort())
pick = []
while np.size(inds) > 0:
xx1 = np.maximum(x1[inds[-1]], x1[inds[0:-1]])
yy1 = np.maximum(y1[inds[-1]], y1[inds[0:-1]])
xx2 = np.minimum(x2[inds[-1]], x2[inds[0:-1]])
yy2 = np.minimum(y2[inds[-1]], y2[inds[0:-1]])
width = np.maximum(0.0, xx2 - xx1 + 1)
height = np.maximum(0.0, yy2 - yy1 + 1)
inter = width * height
if overlap_type == 'Min':
overlap = inter / np.minimum(area[inds[-1]], area[inds[0:-1]])
else:
overlap = inter / (area[inds[-1]] + area[inds[0:-1]] - inter)
pick.append(inds[-1])
inds = inds[np.where(overlap <= threshold)[0]]
return pick
def process(self, raw, identifiers=None, frame_meta=None):
total_boxes_batch = self._extract_predictions(raw, frame_meta)
results = []
for total_boxes, identifier in zip(total_boxes_batch, identifiers):
if np.size(total_boxes) == 0:
results.append(DetectionPrediction(identifier, [], [], [], [], [], []))
continue
pick = self.nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick]
regh = total_boxes[:, 3] - total_boxes[:, 1]
regw = total_boxes[:, 2] - total_boxes[:, 0]
x_mins = total_boxes[:, 0] + total_boxes[:, 5] * regw
y_mins = total_boxes[:, 1] + total_boxes[:, 6] * regh
x_maxs = total_boxes[:, 2] + total_boxes[:, 7] * regw
y_maxs = total_boxes[:, 3] + total_boxes[:, 8] * regh
scores = total_boxes[:, 4]
results.append(
DetectionPrediction(identifier, np.full_like(scores, 1), scores, x_mins, y_mins, x_maxs, y_maxs)
)
return results
@staticmethod
def generate_bounding_box(mapping, reg, scale, t, r_format):
stride = 2
cellsize = 12
mapping = mapping.T
indexes = [0, 1, 2, 3] if r_format == 'wh' else [1, 0, 3, 2]
dx1 = reg[indexes[0], :, :].T
dy1 = reg[indexes[1], :, :].T
dx2 = reg[indexes[2], :, :].T
dy2 = reg[indexes[3], :, :].T
(x, y) = np.where(mapping >= t)
yy = y
xx = x
score = mapping[x, y]
reg = np.array([dx1[x, y], dy1[x, y], dx2[x, y], dy2[x, y]])
if reg.shape[0] == 0:
pass
bounding_box = np.array([yy, xx]).T
bb1 = np.fix((stride * bounding_box + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * bounding_box + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
bounding_box_out = np.concatenate((bb1, bb2, score, reg), axis=0)
return bounding_box_out.T
def _extract_predictions(self, outputs_list, meta):
scales = [1] if not meta[0] or 'scales' not in meta[0] else meta[0]['scales']
total_boxes = np.zeros((0, 9), np.float)
for idx, outputs in enumerate(outputs_list):
scale = scales[idx]
mapping = outputs[self.probability_out][0, 1, :, :]
regions = outputs[self.region_out][0]
boxes = self.generate_bounding_box(mapping, regions, scale, 0.6, self.regions_format)
if boxes.shape[0] != 0:
pick = self.nms(boxes, 0.5, 'Union')
if | np.size(pick) | numpy.size |
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: base_tomophantom_loader
:platform: Unix
:synopsis: A loader that generates synthetic 3D projection full-field tomo data\
as hdf5 dataset of any size.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import os
import h5py
import logging
import numpy as np
from mpi4py import MPI
from savu.data.chunking import Chunking
from savu.plugins.utils import register_plugin
from savu.plugins.loaders.base_loader import BaseLoader
from savu.plugins.savers.utils.hdf5_utils import Hdf5Utils
from savu.data.plugin_list import PluginList
import tomophantom
from tomophantom import TomoP2D, TomoP3D
@register_plugin
class BaseTomophantomLoader(BaseLoader):
def __init__(self, name='BaseTomophantomLoader'):
super(BaseTomophantomLoader, self).__init__(name)
self.cor = None
self.n_entries = None
def setup(self):
exp = self.exp
data_obj = exp.create_data_object('in_data', 'synth_proj_data')
data_obj.set_axis_labels(*self.parameters['axis_labels'])
self.__convert_patterns(data_obj,'synth_proj_data')
self.__parameter_checks(data_obj)
self.tomo_model = self.parameters['tomo_model']
# setting angles for parallel beam geometry
self.angles = np.linspace(0.0, 180.0-(1e-14), self.parameters['proj_data_dims'][0], dtype='float32')
path = os.path.dirname(tomophantom.__file__)
self.path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
data_obj.backing_file = self.__get_backing_file(data_obj, 'synth_proj_data')
data_obj.data = data_obj.backing_file['/']['entry1']['tomo_entry']['data']['data']
# create a phantom file
data_obj2 = exp.create_data_object('in_data', 'phantom')
data_obj2.set_axis_labels(*['voxel_x.voxel', 'voxel_y.voxel', 'voxel_z.voxel'])
self.__convert_patterns(data_obj2, 'phantom')
self.__parameter_checks(data_obj2)
data_obj2.backing_file = self.__get_backing_file(data_obj2, 'phantom')
data_obj2.data = data_obj2.backing_file['/']['phantom']['data']
data_obj.set_shape(data_obj.data.shape)
group_name = '1-TomoPhantomLoader-phantom'
self.n_entries = data_obj.get_shape()[0]
cor_val = 0.5*(self.parameters['proj_data_dims'][2])
self.cor = np.linspace(cor_val, cor_val, self.parameters['proj_data_dims'][1], dtype='float32')
self._set_metadata(data_obj, self._get_n_entries())
return data_obj, data_obj2
def __get_backing_file(self, data_obj, file_name):
fname = '%s/%s.h5' % \
(self.exp.get('out_path'), file_name)
if os.path.exists(fname):
return h5py.File(fname, 'r')
self.hdf5 = Hdf5Utils(self.exp)
dims_temp = self.parameters['proj_data_dims'].copy()
proj_data_dims = tuple(dims_temp)
if (file_name == 'phantom'):
dims_temp[0] = dims_temp[1]
dims_temp[2] = dims_temp[1]
proj_data_dims = tuple(dims_temp)
patterns = data_obj.get_data_patterns()
p_name = list(patterns.keys())[0]
p_dict = patterns[p_name]
p_dict['max_frames_transfer'] = 1
nnext = {p_name: p_dict}
pattern_idx = {'current': nnext, 'next': nnext}
chunking = Chunking(self.exp, pattern_idx)
chunks = chunking._calculate_chunking(proj_data_dims, np.int16)
h5file = self.hdf5._open_backing_h5(fname, 'w')
if file_name == 'phantom':
group = h5file.create_group('/phantom', track_order=None)
else:
group = h5file.create_group('/entry1/tomo_entry/data', track_order=None)
data_obj.dtype = np.dtype('<f4')
dset = self.hdf5.create_dataset_nofill(group, "data", proj_data_dims, data_obj.dtype, chunks=chunks)
self.exp._barrier()
slice_dirs = list(nnext.values())[0]['slice_dims']
nDims = len(dset.shape)
total_frames = np.prod([dset.shape[i] for i in slice_dirs])
sub_size = \
[1 if i in slice_dirs else dset.shape[i] for i in range(nDims)]
# need an mpi barrier after creating the file before populating it
idx = 0
sl, total_frames = \
self.__get_start_slice_list(slice_dirs, dset.shape, total_frames)
# calculate the first slice
for i in range(total_frames):
if sl[slice_dirs[idx]].stop == dset.shape[slice_dirs[idx]]:
idx += 1
if idx == len(slice_dirs):
break
tmp = sl[slice_dirs[idx]]
if (file_name == 'synth_proj_data'):
#generate projection data
gen_data = TomoP3D.ModelSinoSub(self.tomo_model, proj_data_dims[1], proj_data_dims[2],
proj_data_dims[1], (tmp.start, tmp.start + 1), -self.angles,
self.path_library3D)
else:
#generate phantom data
gen_data = TomoP3D.ModelSub(self.tomo_model, proj_data_dims[1], (tmp.start, tmp.start + 1),
self.path_library3D)
dset[tuple(sl)] = np.swapaxes(gen_data,0,1)
sl[slice_dirs[idx]] = slice(tmp.start+1, tmp.stop+1)
self.exp._barrier()
try:
#nxsfile = NXdata(h5file)
#nxsfile.save(file_name + ".nxs")
h5file.close()
except IOError as exc:
logging.debug('There was a problem trying to close the file in random_hdf5_loader')
return self.hdf5._open_backing_h5(fname, 'r')
def __get_start_slice_list(self, slice_dirs, shape, n_frames):
n_processes = len(self.exp.get('processes'))
rank = self.exp.get('process')
frames = np.array_split( | np.arange(n_frames) | numpy.arange |
import numpy as np
import pytest
from scipy.stats import (bootstrap, BootstrapDegenerateDistributionWarning,
monte_carlo_test, permutation_test)
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`method = 'BCa' is only available for one-sample statistics"
def statistic(x, y, axis):
mean1 = np.mean(x, axis)
mean2 = np.mean(y, axis)
return mean1 - mean2
with pytest.raises(ValueError, match=message):
bootstrap(([.1, .2, .3], [.1, .2, .3]), statistic, method='BCa')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
if not paired and method == 'BCa':
# should re-assess when BCa is extended
pytest.xfail(reason="BCa currently for 1-sample statistics only")
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=0)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
res = bootstrap((data,), np.mean, n_resamples=5000,
confidence_level=alpha, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum((sample.mean(axis) for sample in data))
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
with pytest.warns(BootstrapDegenerateDistributionWarning):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
assert_equal(res.standard_error, 0)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_gh15678(method):
# Check that gh-15678 is fixed: when statistic function returned a Python
# float, method="BCa" failed when trying to add a dimension to the float
rng = np.random.default_rng(354645618886684)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
data = (data,)
res = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563))
# this always worked because np.apply_along_axis returns NumPy data type
ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563), vectorized=False)
| assert_allclose(res.confidence_interval, ref.confidence_interval) | numpy.testing.assert_allclose |
import torch.utils.data
import torch
import os
import numpy as np
from model import Model
import omegaconf
from train import load_test_set
from dataset import ShapeNetPartDataset
from tqdm import tqdm
from multiprocessing import cpu_count
N_PARTS = 50
N_CATS = 16
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
color_map = {
0: (0.65, 0.95, 0.05),
1: (0.35, 0.05, 0.35),
2: (0.65, 0.35, 0.65),
3: (0.95, 0.95, 0.65),
4: (0.95, 0.65, 0.05),
5: (0.35, 0.05, 0.05),
8: (0.05, 0.05, 0.65),
9: (0.65, 0.05, 0.35),
10: (0.05, 0.35, 0.35),
11: (0.65, 0.65, 0.35),
12: (0.35, 0.95, 0.05),
13: (0.05, 0.35, 0.65),
14: (0.95, 0.95, 0.35),
15: (0.65, 0.65, 0.65),
16: (0.95, 0.95, 0.05),
17: (0.65, 0.35, 0.05),
18: (0.35, 0.65, 0.05),
19: (0.95, 0.65, 0.95),
20: (0.95, 0.35, 0.65),
21: (0.05, 0.65, 0.95),
36: (0.05, 0.95, 0.05),
37: (0.95, 0.65, 0.65),
38: (0.35, 0.95, 0.95),
39: (0.05, 0.95, 0.35),
40: (0.95, 0.35, 0.05),
47: (0.35, 0.05, 0.95),
48: (0.35, 0.65, 0.95),
49: (0.35, 0.05, 0.65)
}
import hydra
@hydra.main(config_path='config', config_name='shapenet')
def main(cfg):
weight_path = hydra.utils.to_absolute_path('prin/state79.pkl')
torch.backends.cudnn.benchmark = True
model = Model(N_PARTS, cfg)
model.cuda()
model.load_state_dict(torch.load(weight_path))
print("{} paramerters in total".format(sum(x.numel() for x in model.parameters())))
test_set = load_test_set(rand_rot=True, aug=False, bw=cfg.bw)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=cfg.batch_size, shuffle=False, num_workers=cpu_count() // 2, drop_last=False)
model.eval()
# -------------------------------------------------------------------------------- #
total_correct = 0
shape_ious = {cat: [] for cat in seg_classes.keys()}
for batch_idx, (data, target_index, target, pt_cloud, category) in enumerate(test_loader):
# Transform category labels to one_hot.
category_labels = torch.LongTensor(category)
one_hot_labels = torch.zeros(category.size(0), 16).scatter_(1, category_labels, 1).cuda()
data, target_index, target = data.cuda(), target_index.cuda(), target.cuda()
# print (data.shape)
with torch.no_grad():
_, prediction = model(data, target_index, one_hot_labels)
prediction = prediction.view(-1, 2048, 50)
target = target.view(-1, 2048)
for j in range(target.size(0)):
cat = seg_label_to_cat[target.cpu().numpy()[j][0]]
prediction_np = prediction.cpu().numpy()[j][:, seg_classes[cat]].argmax(1) + seg_classes[cat][0]
target_np = target.cpu().numpy()[j]
correct = np.mean((prediction_np == target_np).astype(np.float32))
total_correct += correct
segp = prediction_np
segl = target_np
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
part_ious[l - seg_classes[cat][0]] = 1.0
else:
part_ious[l - seg_classes[cat][0]] = | np.sum((segl == l) & (segp == l)) | numpy.sum |
# coding: utf-8
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def step_function(x):
return | np.array(x > 0, dtype=np.int) | numpy.array |
from re import L
import numpy as np
from pyrsistent import discard
from sympy import Q
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn.functional as F
import math
from scipy.special import softmax
from scipy.ndimage.filters import gaussian_filter, uniform_filter
from sklearn.preprocessing import normalize
from matplotlib.colors import LinearSegmentedColormap
from numpy import linalg as LA
from SpikeVidUtils import get_frame_idx
from utils import top_k_top_p_filtering
from scipy import signal
def convolve_atts_3D(stim_atts):
'''
input: (ID, T, Y, X)
'''
sigma = 2.0 # width of kernel
x = np.arange(-3,4,1) # coordinate arrays -- make sure they contain 0!
y = np.arange(-3,4,1)
z = np.arange(-3,4,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
for n_id in range(stim_atts.shape[0]):
stim_atts[n_id] = signal.convolve(stim_atts[n_id], kernel, mode="same")
return stim_atts
def rollout_attentions(att):
''' Rollout attentions
Input: (L, H, ID, F)
'''
rollout_att = torch.eye(att.shape[-2], att.shape[-1])
for i in range(att.shape[0]):
if i==0:
continue
I = torch.eye(att.shape[-2], att.shape[-1])
a = att[i]
a = a.max(axis=0)[0]
a = (a + 1.0*I) / 2
a = a / a.sum(axis=-1, keepdims=True)
rollout_att = a @ rollout_att
return rollout_att
def grad_rollout(attentions, gradients, discard_ratio=0.8, idx=None, n_layers=0):
result = None
# attentions = [rollout_attentions(torch.cat(attentions))]
# if len(attentions) > 1:
# attentions = [torch.cat(attentions).sum(0)[None, ...]]
n_layers = len(attentions) if n_layers is None else n_layers
with torch.no_grad():
for i, (attention, grad) in enumerate(zip(attentions, gradients)):
if i <= n_layers:
continue
# attention = attention if idx is None else attention[:, :, idx]
# grad = grad if idx is None else grad[:, :, idx]
attention_heads_fused = (grad*attention).mean(axis=1)
# attention_heads_fused[attenti,on_heads_fused < discard_ratio] = 0
# Drop the lowest attentions, but
# don't drop the class token
# flat = attention_heads_fused.view(attention_heads_fused.size(0), -1)
# _, indices = flat.topk(int(flat.size(-1)*discard_ratio), -1, False)
# #indices = indices[indices != 0]
# flat[0, indices] = 0
I = torch.eye(attention_heads_fused.size(-2), attention_heads_fused.size(-1))
# a = (attention_heads_fused + 1.0*I)/2
a = attention_heads_fused
# a = a.clamp(min=0)
# a = a[:, pos_index]
if result == None:
result = a
else:
# print(result.shape, a.shape)
result = result + a * result
# print(result.shape)
# # Look at the total attention between the class token,
# # and the image patches
# mask = result[0, 0 ,pos_index]
# # In case of 224x224 image, this brings us from 196 to 14
# width = int(mask.size(-1)**0.5)
# mask = mask.reshape(width, width).numpy()
# mask = mask / np.max(mask)
return result
def grad_att(attentions, gradients, discard_ratio=0.8):
with torch.no_grad():
# atts = attentions * gradients
# return atts
return attentions
def interpret(x, y, model, idx=None, n_layer=0):
def get_attention(module, n_blocks, block_size, pad=0, rollout=False):
# aggregate attention from n_Blocks
atts = []
T = block_size
for n in range(n_blocks):
att = module[n].attn.att
# n_heads = att.size()[1]
if pad != 0:
att = att[:, :, T - pad, :,]
atts.append(att)
return atts
model.zero_grad(set_to_none=True)
mconf = model.config
preds, _, _ = model(x)
logits_id = preds['id']
category_mask = torch.zeros(logits_id.size()).detach().cpu().numpy()
y_id = x['id'].flatten()
y_idx = y_id if idx == None else y_id[idx]
category_mask[:, torch.arange(len(y_id)), y_idx] = 1
category_mask = torch.from_numpy(category_mask).requires_grad_(True)
loss = torch.sum(logits_id * category_mask)
model.zero_grad()
id_id_att = get_attention(model.neural_visual_transformer.neural_state_blocks, mconf.n_state_layers, mconf.id_block_size)
id_vis_att = get_attention(model.neural_visual_transformer.neural_state_stimulus_blocks, mconf.n_stimulus_layers, mconf.id_block_size)
R_id = torch.eye(id_id_att[0].shape[-2], id_id_att[0].shape[-1])
for blk_att in id_id_att:
grad = torch.autograd.grad(loss, blk_att, retain_graph=True)[0].detach()
blk_att = blk_att.detach()
blk_att = grad * blk_att
blk_att = blk_att.clamp(min=0).mean(dim=1)
R_id = R_id + torch.matmul(blk_att, R_id)
del grad
# R_id_vis = torch.eye(id_vis_att[0].shape[-2], id_vis_att[0].shape[-1])
R_id_vis = None
R_vis = torch.eye(id_vis_att[0].shape[-1], id_vis_att[0].shape[-1])
for i, blk_att in enumerate(id_vis_att):
if i < n_layer:
continue
grad = torch.autograd.grad(loss, blk_att, retain_graph=True)[0].detach()
blk_att = blk_att.detach()
blk_att = grad.clamp(min=0) * blk_att
blk_att = blk_att.clamp(min=0).mean(dim=1)
# blk_att[blk_att < 0.75] = 0
R_id_vis = blk_att if R_id_vis is None else R_id_vis + blk_att
# R_id_vis = R_id_vis + torch.transpose(R_id, -1, -2) @ blk_att @ R_vis
del grad
if idx is not None:
R_id_vis = R_id_vis[:, idx, :,]
else:
R_id_vis = R_id_vis
model.zero_grad(set_to_none=True)
del loss
del category_mask
return R_id, R_id_vis
class VITAttentionGradRollout:
"""
This class is an adaptation of Jacob Gildenblat's implementation:
https://github.com/jacobgil/vit-explain
We calculate Attention Rollou (<NAME>, 2020),
for stimuluts-state attention, and condition
it on the gradient of a specific target neuron.
This way we can get neuron-specific attentions.
"""
def __init__(self, model, module, attn_layer_name='attn_drop', discard_ratio=0.5, idx=None):
self.model = model
self.module = module
self.idx = idx
self.discard_ratio = discard_ratio
for name, module in self.module.named_modules():
if attn_layer_name in name:
module.register_forward_hook(self.get_attention)
module.register_full_backward_hook(self.get_attention_gradient)
self.attentions = []
self.attention_gradients = []
def get_attention(self, module, input, output):
# output = output if self.idx is None else output[:, :, self.idx]
self.attentions.append(output.cpu())
# print(output.shape)
def get_attention_gradient(self, module, grad_input, grad_output):
grad = grad_input[0]
# grad = grad if self.idx is None else grad[:, :, self.idx]
self.attention_gradients.append(grad_input[0].cpu())
# print(grad_input[0].shape)
def __call__(self, x, y):
self.model.zero_grad()
preds, _, _ = self.model(x)
logits_id = preds['id'] # if self.idx==None else preds['id'][:, self.idx]
# return preds['id']
category_mask = torch.zeros(logits_id.size()).detach().cpu().numpy()
y_id = y['id'].flatten()
y_idx = y_id if self.idx==None else y_id[self.idx]
# y_idx = self.idx
category_mask[:, :, y_idx] = 1
category_mask = torch.from_numpy(category_mask).requires_grad_()
loss = (logits_id*category_mask).sum()
# loss = loss['id']
loss.backward()
# print(len(self.attention_gradients))
return grad_rollout(self.attentions, self.attention_gradients, self.discard_ratio, self.idx)
# return grad_att(torch.cat(self.attentions), torch.cat(self.attention_gradients)) # grad_rollout(self.attentions, self.attention_gradients, self.discard_ratio)
@torch.no_grad()
def get_attention(module, n_blocks, block_size, pad=0, rollout=False):
# aggregate attention from n_Blocks
atts = None
T = block_size
# TODO: get index of 166, get attentions up until that stage
for n in range(n_blocks):
att = module[n].attn.att
# n_heads = att.size()[1]
if pad != 0:
att = att[:, :, T - pad, :,]
att = att.detach().squeeze(0).to('cpu').numpy()
atts = att[None, ...] if atts is None else np.concatenate((atts, att[None, ...]))
return atts
class AttentionVis:
'''attention Visualizer'''
# def getAttention(self, spikes, n_Blocks):
# spikes = spikes.unsqueeze(0)
# b, t = spikes.size()
# token_embeddings = self.model.tok_emb(spikes)
# position_embeddings = self.model.pos_emb(spikes)
# # position_embeddings = self.model.pos_emb(spikes)
# x = token_embeddings + position_embeddings
# # aggregate attention from n_Blocks
# atts = None
# for n in n_Blocks:
# attBlock = self.model.blocks[n].attn
# attBlock(x).detach().numpy() # forward model
# att = attBlock.att.detach().numpy()
# att = att[:, 1, :, :,].squeeze(0)
# atts = att if atts is None else np.add(atts, att)
# # normalize
# atts = atts/len(n_Blocks)
# return atts
def visAttention(att):
plt.matshow(att)
att_range = att.max()
cb = plt.colorbar()
cb.ax.tick_params()
plt.show()
def grad_attentions(self, model, x, y, stoi, n_layer=0):
grad_attentions = None
for idx, id_ in enumerate(y['id'].flatten()):
y_id = y['id'].flatten()
T = len(y_id)
y_id = y_id[: T - int(x['pad'])]
# idx = np.arange(len(y_id))
_, att = interpret(x, y, model, idx=idx, n_layer=n_layer)
# grad_attentions = att[None, ...] if grad_attentions is None else torch.cat((grad_attentions, att[None, ...]))
grad_attentions = att if grad_attentions is None else torch.cat((grad_attentions, att))
model.zero_grad()
if id_ >= stoi['SOS']:
break
return grad_attentions
# def grad_attentions(self, model, x, y, stoi, n_layer=0):
# grad_attentions = None
# y_id = y['id'].flatten()
# T = len(y_id)
# y_id = y_id[: T - int(x['pad'])]
# # idx = np.arange(len(y_id))
# _, att = interpret(x, y, model, n_layer=n_layer)
# # grad_attentions = att[None, ...] if grad_attentions is None else torch.cat((grad_attentions, att[None, ...]))
# grad_attentions = att if grad_attentions is None else torch.cat((grad_attentions, att))
# grad_attentions = grad_attentions[0][:T - int(x['pad'])]
# model.zero_grad()
# return grad_attentions
# @torch.no_grad()
def att_interval_frames(self, model, module, loader, n_blocks, block_size,
rollout=False, pad_key=None, agg=False, stoi=None, max_it=None, n_layer=0):
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
device = 'cpu'
model.to(device)
mconf = model.config
model = model.eval()
T = block_size
attention_scores = None
len_loader = len(loader) if max_it is None else max_it
pbar = tqdm(enumerate(loader), total=len_loader)
if rollout: grad_rollout = VITAttentionGradRollout(model, module)
for it, (x, y) in pbar:
pad = x[pad_key] if pad_key is not None else 0
# place data on the correct device
for key, value in x.items():
x[key] = x[key].to(device)
for key, value in y.items():
y[key] = y[key].to(device)
# att = np.swapaxes(att, -1, -2)
if rollout:
# preds, features, loss, = model(x, y)
# att = AttentionVis.get_attention(module, n_blocks, T)
# att = self.rollout_attentions(att)
# grad_rollout = VITAttentionGradRollout(model, module)
# att = grad_rollout(x, y)[0]
att = self.grad_attentions(model, x, y, stoi, n_layer=n_layer)
if att == None:
continue
if not rollout:
with torch.no_grad():
preds, features, loss, = model(x, y)
# preds_id = F.softmax(preds['id'] / 0.8, dim=-1).squeeze(0)
# ix = torch.multinomial(preds_id, num_samples=1).flatten()
att = get_attention(module, n_blocks, T)
## predict iteratively
# ix, att = self.predict_iteratively(model, mconf, x, stoi, top_k=0, top_p=0.5, temp=0.5, sample=True, pred_dt=False)
with torch.no_grad():
if agg:
t_seq = int(T - x['pad'])
# att = att - att.mean(axis=-2, keepdims=True)
# att = att - att.mean(axis=(0, 1, 2), keepdims=True)
if not rollout:
att = np.max(att, axis=1)
att = np.mean(att, axis=0)
# att = np.sum(att, axis=0)
# att = np.max(att, axis=(0, 1))
score = np.zeros((mconf.id_vocab_size, mconf.frame_block_size))
# score = score.reshape(-1, 20, 8, 14).min(axis=1)
xid = x['id'].cpu().flatten().tolist()[:t_seq]
yid = y['id'].cpu().flatten().tolist()[:t_seq]
# score[ix] = att
score[xid] = att[:t_seq]
# score[t_seq:] == 0
else:
score = att
if attention_scores is None:
attention_scores = score[None, ...]
else:
attention_scores = np.concatenate((attention_scores, score[None, ...]))
if max_it is not None and it == max_it:
break
# att_dict[int(y['id'][:, n])] = step
# atts[tuple(x['interval'].cpu().numpy().flatten())] = att_dict
return attention_scores
# take attentions from last step
@torch.no_grad()
def att_models(model, module, loader, n_blocks, block_size, pad_key=None):
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
device = 'cpu'
model.to(device)
model = model.eval()
mconf = model.config
T = block_size
attention_scores = np.zeros(mconf.id_vocab_size)
pbar = tqdm(enumerate(loader), total=len(loader))
for it, (x, y) in pbar:
pad = x[pad_key] if pad_key is not None else 0
# place data on the correct device
for key, value in x.items():
x[key] = x[key].to(device)
for key, value in y.items():
y[key] = y[key].to(device)
# forward model to calculate attentions
_, _, _ = model(x)
# scores = np.array(np.zeros(len(neurons)))
att = np.zeros(len(mconf.id_vocab_size))
score = get_attention(module, n_blocks, T, pad)
score = np.sum(score, axis=0) # sum over all heads
score = | np.sum(score, axis=0) | numpy.sum |
import numpy as np
# import matplotlib.pylab as plt
import proper
# from medis.Utils.plot_tools import quicklook_im, quicklook_wf, loop_frames,quicklook_IQ
from medis.params import tp, cp, mp, ap,iop#, fp
# from medis.Utils.misc import dprint
import medis.Atmosphere.atmos as atmos
def offset_companion(wf_array, it):
cont_scaling = | np.linspace(1./ap.C_spec, 1, ap.nwsamp) | numpy.linspace |
from collections.abc import Iterable
import numpy as np
def _chunks_(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def __get_lambdas__(data):
if len(data.shape) == 4: # RGB mode (4D volume)
lambdas = {'x': lambda x: data[:, :, x, :],
'y': lambda x: data[:, x, :, :],
'z': lambda x: data[x, :, :, :]}
else: # standard 3D label volume
lambdas = {'x': lambda x: data[:, :, x],
'y': lambda x: data[:, x, :],
'z': lambda x: data[x, :, :]}
return lambdas
def __get_abs_minmax(data, axis, slices, margin=5):
bb = {}
lambdas = __get_lambdas__(data)
for a, chunk in enumerate(slices):
bb[a] = []
for i, slice_index in enumerate(chunk):
test = np.flip(np.swapaxes(np.abs(lambdas[axis](int(slice_index))),
0, 1), 0)
xs, ys = np.where(test != 0)
bb[a].append((xs, ys))
min_xs, max_xs = 9999, 0
min_ys, max_ys = 9999, 0
for a, bba in bb.items():
for xs, ys in bba:
min_xs = min(min_xs, min(xs))
max_xs = max(max_xs, max(xs))
min_ys = min(min_ys, min(ys))
max_ys = max(max_ys, max(ys))
# Create mock bounding-box
res = {}
for a, bba in bb.items():
res[a] = []
for each in bba:
i = [max(int(min_xs - margin), 0), int(max_xs + margin)],\
[max(int(min_ys - margin), 0), int(max_ys + margin)]
res[a].append(i)
return res
def __maxsize__(data):
d = []
lambdas = __get_lambdas__(data)
maxsize = 0
slice_index = 0
for slice_index in range(0, data.shape[2]):
test = np.flip(np.swapaxes(np.abs(lambdas['x'](int(slice_index))),
0, 1), 0)
if len(data.shape) == 4:
black_pixels_mask = np.all(test == [0, 0, 0], axis=-1)
else:
black_pixels_mask = | np.all(test == 0, axis=-1) | numpy.all |
import numpy as np
from env.flatland.Flatland import get_new_position
class RailGraph():
def __init__(self):
pass
def reset(self, env):
self.env = env
# self._build()
# self._recalc_weights()
# self.any_deadlocked = False
self.env.distance_map.reset(self.env.agents, self.env.rail)
def deadlock_agent(self, handle):
return
h, w = self.env.agents[handle].position
for d in range(4):
if (h, w, d) in self.nodes:
node_i = self.nodes_dict[(h, w, d)]
if min(np.min(self.amatrix[node_i]), np.min(self.amatrix[:, node_i])) != np.inf:
self.any_deadlocked = True
self.amatrix[node_i, :] = np.inf
self.amatrix[:, node_i] = np.inf
for edge in self.cell_to_edge[h][w]:
if self.amatrix[edge] != 0:
self.any_deadlocked = True
self.amatrix[edge] = np.inf
def update(self):
return
if self.any_deadlocked:
self._recalc_weights()
self.any_deadlocked = False
def dist_to_target(self, handle, h, w, d):
return self.env.distance_map.get()[handle, h, w, d]
i = self.target_i[self.env.agents[handle].target]
return self.dtarget[i, h, w, d]
def _build(self):
self.nodes = set(agent.target for agent in self.env.agents)
height, width = self.env.height, self.env.width
self.valid_pos = list()
for h in range(height):
for w in range(width):
pos = (h, w)
transition_bit = bin(self.env.rail.get_full_transitions(*pos))
total_transitions = transition_bit.count("1")
if total_transitions > 2:
self.nodes.add(pos)
if total_transitions > 0:
self.valid_pos.append((h, w))
n_nodes = set()
for h, w in self.nodes:
for d in range(4):
cell_transitions = self.env.rail.get_transitions(h, w, d)
if np.any(cell_transitions):
n_nodes.add((h, w, d))
self.nodes = n_nodes
self.dist_to_node = -np.ones((height, width, 4, 4))
self.next_node = [[[[None for _ in range(4)] for _ in range(4)] for _ in range(width)] for _ in range(height)]
self.dfs_used = np.zeros((height, width, 4))
for h in range(height):
for w in range(width):
for d in range(4):
if not self.dfs_used[h, w, d]:
self.dfs(h, w, d)
self.n_nodes = len(self.nodes)
self.nodes_dict = | np.empty((height, width, 4), dtype=np.int) | numpy.empty |
"""
Tests for periodogram module.
"""
import numpy as np
import utide.periodogram as pgram
def random_ts(ndays, dt_hours, is_complex=True):
"""Returns t (time in days) and x (random series)."""
np.random.seed(1)
npts = int(ndays * 24 / dt_hours)
if npts % 2:
npts -= 1
t = np.arange(npts, dtype=float) * dt_hours / 24
if is_complex:
x = np.random.randn(npts) + 1j * np.random.randn(npts)
else:
x = np.random.randn(npts)
return t, x
def test_fft_ls_consistency():
"""
With the exception of the frequency band including the
Nyquist, band_psd should yield identical results with
equi=True (fft method) and False (Lomb-Scargle method).
"""
t, x = random_ts(20, 0.5)
y_fft = pgram.band_psd(t, x, [1 / 12.42], equi=True)
y_ls = pgram.band_psd(t, x, [1 / 12.42], equi=False)
# skip the last frequency bin because y_fft includes nyquist
sl = slice(0, -1)
for key in ["Puu", "Pvv", "Puv"]:
np.testing.assert_array_almost_equal(y_fft[key][sl], y_ls[key][sl])
def test_uv_consistency():
t, x = random_ts(20, 0.5, is_complex=False)
xx = x + 1j * x
y_fft = pgram.band_psd(t, xx, [1 / 12.42], equi=True)
y_ls = pgram.band_psd(t, xx, [1 / 12.42], equi=False)
for y in y_fft, y_ls:
| np.testing.assert_array_almost_equal(y.Puu, y.Pvv) | numpy.testing.assert_array_almost_equal |
"""
Primal and dual cuts for two stage bidding strategies of energy hubs
"""
from energy_hub.bidding_strategy.bidding_strategy import EnergyHubManagement # import the energy hub management class
from numpy import zeros, ones, array, eye, hstack, vstack, inf, transpose
import numpy as np
from solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as lp
from solvers.benders_decomposition import BendersDecomposition
class TwoStageBidding():
def __init__(self):
self.name = "two_stage_bidding_strategy"
def problem_formualtion(self, ELEC_DA=None, ELEC_RT=None, BIC=None, ESS=None, CCHP=None, HVAC=None, THERMAL=None,
CHIL=None, BOIL=None, T=None, N=None):
"""
:param ELEC: Scenarios in the second stage scheduling
:param BIC:
:param ESS:
:param CCHP:
:param HVAC:
:param THERMAL:
:param CHIL:
:param BOIL:
:param T:
:param N: The number of scenarios in the second stage operation.
:return:
"""
energy_hub_management = EnergyHubManagement() # Initialize the solutions
# The second stage decision are shown as follows, no decomposition
model = energy_hub_management.problem_formulation(ELEC=ELEC_DA, CCHP=CCHP, THERMAL=THERMAL, BIC=BIC, ESS=ESS,
HVAC=HVAC, BOIL=BOIL, CHIL=CHIL, T=T)
neq = model["Aeq"].shape[0]
nx = model["Aeq"].shape[1]
if model["A"] is None:
nineq = 0
else:
nineq = model["A"].shape[0]
self.nx = nx
self.T = T
self.N = N
Aeq_second_stage = zeros((neq * N, nx * N))
beq_second_stage = zeros((neq * N, 1))
lb_second_stage = zeros((nx * N, 1))
ub_second_stage = zeros((nx * N, 1))
c_second_stage = zeros((nx * N, 1))
elec = [0] * N # using the list to store the data set
model_second_stage = [0] * N
for i in range(N):
elec[i] = {"UG_MAX": ELEC_DA["UG_MAX"],
"UG_MIN": ELEC_DA["UG_MIN"],
"UG_PRICE": ELEC_RT["UG_PRICE"][:, i],
"AC_PD": ELEC_RT["AC_PD"][:, i],
"DC_PD": ELEC_RT["DC_PD"][:, i],
"PV_PG": ELEC_RT["PV_PG"][:, i], }
model_second_stage[i] = energy_hub_management.problem_formulation(ELEC=elec[i], CCHP=CCHP, THERMAL=THERMAL,
BIC=BIC, ESS=ESS,
HVAC=HVAC, BOIL=BOIL, CHIL=CHIL, T=T)
lb_second_stage[i * nx:(i + 1) * nx] = model_second_stage[i]["lb"]
Aeq_second_stage[i * neq:(i + 1) * neq, i * nx:(i + 1) * nx] = model_second_stage[i]["Aeq"]
beq_second_stage[i * neq:(i + 1) * neq] = model_second_stage[i]["beq"]
lb_second_stage[i * nx:(i + 1) * nx] = model_second_stage[i]["lb"]
ub_second_stage[i * nx:(i + 1) * nx] = model_second_stage[i]["ub"]
c_second_stage[i * nx:(i + 1) * nx] = model_second_stage[i]["c"] / N
lb_first_stage = zeros((T, 1))
ub_first_stage = zeros((T, 1))
c_first_stage = zeros((T, 1))
Aeq_first_stage = zeros((neq * N, T))
A_first_stage = zeros((2 * T * N, T))
A_second_stage = zeros((2 * T * N, nx * N))
b_second_stage = zeros((2 * T * N, 1))
b_second_stage[0:T * N] = ELEC_DA["UG_MAX"]
b_second_stage[T * N:2 * T * N] = -ELEC_DA["UG_MIN"]
for i in range(N):
A_first_stage[i * T:(i + 1) * T, :] = eye(T) # The upper limit
A_first_stage[N * T + i * T:N * T + (i + 1) * T, :] = -eye(T) # The lower limit
for j in range(T):
A_second_stage[i * T + j, i * nx + j * model["nx"] + model["pug"]] = 1 # The upper limit
A_second_stage[N * T + i * T + j, i * nx + j * model["nx"] + model["pug"]] = -1 # The lower limit
for i in range(T):
lb_first_stage[i] = ELEC_DA["UG_MIN"]
ub_first_stage[i] = ELEC_DA["UG_MAX"]
c_first_stage[i] = ELEC_DA["UG_PRICE"][i]
for i in range(N):
Aeq_first_stage[i * neq + model["ac_eq"][0]:i * neq + model["ac_eq"][1], 0:T] = eye(T)
model["Aeq"] = hstack([Aeq_first_stage, Aeq_second_stage])
model["beq"] = beq_second_stage
if model["A"] is None:
model["A"] = hstack([A_first_stage, A_second_stage])
model["b"] = b_second_stage
else:
model["A"] = vstack([model["A"], hstack([A_first_stage, A_second_stage])])
model["b"] = vstack([model["A"], b_second_stage])
model["lb"] = vstack([lb_first_stage, lb_second_stage])
model["ub"] = vstack([ub_first_stage, ub_second_stage])
model["c"] = vstack([c_first_stage, c_second_stage])
# Test model for the boundary information
n = N - 1
nslack = 2 * T
nx_first_stage = T + nslack
neq_extended = neq + nslack
# The decision of the first stage optimization
c_first_stage = vstack([ELEC_DA["UG_PRICE"], zeros((nslack, 1)), model_second_stage[0]["c"]])
lb_first_stage = vstack([ones((T, 1)) * ELEC_DA["UG_MIN"], zeros((nslack, 1)), model_second_stage[0]["lb"]])
ub_first_stage = vstack(
[ones((T, 1)) * ELEC_DA["UG_MAX"], inf * ones((nslack, 1)), model_second_stage[0]["ub"]])
nx_extended_first_stage = nx_first_stage + nx
Aeq = zeros((neq_extended, nx_extended_first_stage))
Aeq[model["ac_eq"][0]:model["ac_eq"][1], 0:T] = eye(T)
Aeq[0:neq, nx_first_stage:nx_extended_first_stage] = model_second_stage[0]["Aeq"]
Aeq[neq:neq + T, 0:T] = eye(T)
Aeq[neq:neq + T, T:2 * T] = eye(T)
Aeq[neq + T:neq + 2 * T, 0:T] = -eye(T)
Aeq[neq + T:neq + 2 * T, 2 * T:3 * T] = eye(T)
beq = vstack(
[model_second_stage[0]["beq"], ones((T, 1)) * ELEC_DA["UG_MAX"], -ones((T, 1)) * ELEC_DA["UG_MIN"]])
Aeq_first_stage = zeros((neq_extended * n, nx_extended_first_stage))
Aeq_second_stage = | zeros((neq_extended * n, nx * n)) | numpy.zeros |
#!/usr/bin/env python
import os
import h5py
import numpy as np
import matplotlib.pyplot as plt
import fluidsim as fls
from base import (
_k_f,
set_figsize,
matplotlib_rc,
set_figsize,
_index_where,
load_sim as load_sim_from_path,
)
from paths import paths_lap as paths_sim, exit_if_figure_exists
from make_fig_phys_fields_wave import fig_phys_subplot
load_sim = lambda short_name: load_sim_from_path(
paths_sim.get(short_name), coarse=False
)
if __name__ == "__main__":
matplotlib_rc(fontsize=10)
path_fig = exit_if_figure_exists(__file__)
set_figsize(6.65, 5.8)
fig, axes = plt.subplots(2, 2)
short_names = [
"noise_c10nh960Buinf", # WL1
"noise_c10nh2880Buinf", # WL3
"noise_c200nh960Buinf", # WL17
"noise_c200nh2880Buinf", # WL18
]
# 'noise_c20nh2880Buinf', # WL7
# 'noise_c200nh2880Buinf' # WL13
# ]
# sim0 = load_sim(short_names[0])
# sim1 = sim0
# sim1 = load_sim(short_names[1])
# keys = ['<KEY>', '<KEY>']
# for ax, sim, key_field in zip(axes.ravel(), [sim0, sim0, sim1, sim1], keys):
keys = ["div"] * 4
for ax, sim, key_field in zip(axes.ravel(), map(load_sim, short_names), keys):
vmax = 10 if key_field == "div" else 3.5
vmin = -50 if key_field == "div" else -3.5
fig_phys_subplot(
sim,
fig,
ax,
key_field,
x_slice=[0, 3.01],
y_slice=[0, 3.01],
vmax=vmax,
vmin=vmin,
)
sim.output.close_files()
ax.set_xticks( | np.arange(0, 4.0) | numpy.arange |
from __future__ import unicode_literals
from __future__ import absolute_import
# system modules
import math
import os
import tempfile
import pandas as pd
from numpy import random
from random import sample
import matplotlib.pyplot as plt
from scipy import stats
import math
import numpy as np
import scipy.linalg
# QGIS modules
from qgis.core import *
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import *
from qgis.PyQt import uic
from PyQt5.QtCore import *
from .environment import get_ui_path
UI_PATH = get_ui_path('ui_rain_generator.ui')
class PluginDialog(QDialog):
def __init__(self, iface, parent=None, flags=Qt.WindowFlags()):
QDialog.__init__(self, parent, flags)
uic.loadUi(UI_PATH, self)
self.iface = iface
self.input_layer = None
self.RainGaugeLayer.setFilters(QgsMapLayerProxyModel.PointLayer)
self.GenerationAreaLayer.setFilters(QgsMapLayerProxyModel.PolygonLayer)
self.DataAddressField.setFilters(QgsFieldProxyModel.String)
self.RainGaugeLayer.layerChanged.connect(self.UpdateFields)
self.AnalyzeAllDataBox.stateChanged.connect(self.UpdateUntilFromBoxes)
self.SpatialInterpolationMethodBox.activated.connect(self.UpdateExponentFactorField)
self.SaveSpatialInterpolationBox.stateChanged.connect(self.UpdateOutputLocation)
self.SaveStormStatisticsBox.stateChanged.connect(self.UpdateOutputLocation)
self.RainGaugeLayer.setLayer(None)
self.GenerationAreaLayer.setLayer(None)
self.SpatialInterpolationMethodBox.addItem("Inversed Distance Weighting")
self.SpatialInterpolationMethodBox.addItem("Trend Surface Analysis (Polynomial 1st Order)")
self.SpatialInterpolationMethodBox.addItem("Trend Surface Analysis (Polynomial 2nd Order)")
# self.SpatialInterpolationMethodBox.setCurrentIndex(-1)
self.DelimiterBox.addItem("space")
self.DelimiterBox.addItem(",")
self.DelimiterBox.addItem("-")
self.dxBox.setValue(5000)
self.dyBox.setValue(5000)
self.browseButton.clicked.connect(self.onBrowseButtonClicked)
self.browseButton_dataanalysis.clicked.connect(self.onBrowseButtonClicked_dataanalysis)
self.browseButton.setAutoDefault(False)
self.browseButton_dataanalysis.setAutoDefault(False)
self.FromBox.setEnabled(False)
self.UntilBox.setEnabled(False)
self.CheckButton2.setEnabled(False)
self.label_30.setEnabled(False)
self.label_31.setEnabled(False)
self.folderEdit_dataanalysis.setEnabled(False)
self.browseButton_dataanalysis.setEnabled(False)
self.ProcessButton.setEnabled(False)
self.CheckButton.setEnabled(False)
self.ExponentFactorBox.setEnabled(False)
self.label_32.setEnabled(False)
self.groupBox_2.setEnabled(False)
self.groupBox_3.setEnabled(False)
self.groupBox_5.setEnabled(False)
def UpdateFields(self, layer):
self.DataAddressField.setLayer(self.RainGaugeLayer.currentLayer())
self.FromBox.clear()
self.UntilBox.clear()
self.groupBox_2.setEnabled(False)
self.groupBox_3.setEnabled(False)
self.groupBox_5.setEnabled(False)
self.ProcessButton.setEnabled(False)
def UpdateOutputLocation(self):
if self.SaveSpatialInterpolationBox.isChecked() or self.SaveStormStatisticsBox.isChecked():
self.folderEdit_dataanalysis.setEnabled(True)
self.browseButton_dataanalysis.setEnabled(True)
else:
self.folderEdit_dataanalysis.setEnabled(False)
self.browseButton_dataanalysis.setEnabled(False)
def UpdateExponentFactorField(self):
if self.SpatialInterpolationMethodBox.currentText() == "Inversed Distance Weighting":
self.ExponentFactorBox.setEnabled(True)
self.label_32.setEnabled(True)
else:
self.ExponentFactorBox.setEnabled(False)
self.label_32.setEnabled(False)
def UpdateUntilFromBoxes(self):
if self.AnalyzeAllDataBox.isChecked():
self.FromBox.setEnabled(False)
self.UntilBox.setEnabled(False)
self.CheckButton2.setEnabled(False)
self.label_30.setEnabled(False)
self.label_31.setEnabled(False)
self.groupBox_2.setEnabled(True)
else:
self.FromBox.setEnabled(True)
self.UntilBox.setEnabled(True)
self.CheckButton2.setEnabled(True)
self.label_30.setEnabled(True)
self.label_31.setEnabled(True)
self.groupBox_2.setEnabled(False)
self.groupBox_3.setEnabled(False)
def onBrowseButtonClicked(self):
currentFolder = self.folderEdit.text()
folder = QFileDialog.getExistingDirectory(self.iface.mainWindow(), 'Rain Generator', currentFolder)
if folder != '':
self.folderEdit.setText(folder)
self.folderEdit.editingFinished.emit()
def onBrowseButtonClicked_dataanalysis(self):
currentFolder = self.folderEdit_dataanalysis.text()
folder = QFileDialog.getExistingDirectory(self.iface.mainWindow(), 'Rain Generator', currentFolder)
if folder != '':
self.folderEdit_dataanalysis.setText(folder)
self.folderEdit_dataanalysis.editingFinished.emit()
class RainGenerator(object):
def __init__(self, iface):
self.iface = iface
self.dialog = None
self.cancel = False
self.act = QAction('Rain Generator', iface.mainWindow())
self.act.triggered.connect(self.execDialog)
def initGui(self, menu=None):
if menu is not None:
menu.addAction(self.act)
else:
self.iface.addToolBarIcon(self.act)
def unload(self, menu=None):
if menu is None:
menu.removeAction(self.act)
else:
self.iface.removeToolBarIcon(self.act)
def execDialog(self):
"""
"""
self.dialog = PluginDialog(self.iface, self.iface.mainWindow())
self.dialog.accepted.connect(self.execTool)
self.dialog.rejected.connect(self.quitDialog)
self.dialog.setModal(False)
self.act.setEnabled(False)
self.dialog.show()
self.dialog.ProcessAreaButton.clicked.connect(self.CreateGenerationArea)
self.dialog.CheckButton.clicked.connect(self.CheckFiles)
self.dialog.ProcessButton.clicked.connect(self.PreSpatialInterpolation)
self.dialog.CheckButton2.clicked.connect(self.AnalyzeFromUntil)
self.dialog.UpdateButton.clicked.connect(self.PreCheckFiles)
def scheduleAbort(self):
self.cancel = True
def quitDialog(self):
self.dialog = None
self.act.setEnabled(True)
self.cancel = False
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
# checking files
data = []
ngauges = 0
ntimes = 0
nrains = 0
############################################################
# updates the time and rain column values
def PreCheckFiles(self):
if type(self.dialog.RainGaugeLayer.currentLayer()) == type(None):
self.dialog.iface.messageBar().pushCritical(
'Rain Generator',
'No Layer Selected !'
)
return
files, ok = QgsVectorLayerUtils.getValues(self.dialog.RainGaugeLayer.currentLayer(),
self.dialog.DataAddressField.expression(), False)
if not ok:
return
for i, locations in enumerate(files):
address = locations.replace("\\", "/")
self.dialog.TimeColumnBox.clear()
self.dialog.RainColumnBox.clear()
try:
if self.dialog.DelimiterBox.currentText() == "space":
df = pd.read_csv(address.strip("\u202a"), delimiter=" ")
else:
df = pd.read_csv(address.strip("\u202a"), delimiter=self.dialog.DelimiterBox.currentText())
for c in df.columns:
self.dialog.TimeColumnBox.addItem(c)
self.dialog.RainColumnBox.addItem(c)
except:
return
self.dialog.CheckButton.setEnabled(True)
self.dialog.FromBox.clear()
self.dialog.UntilBox.clear()
self.dialog.groupBox_2.setEnabled(False)
self.dialog.groupBox_3.setEnabled(False)
self.dialog.groupBox_5.setEnabled(False)
self.dialog.ProcessButton.setEnabled(False)
self.data = []
def CheckFiles(self):
self.data = []
files, ok = QgsVectorLayerUtils.getValues(self.dialog.RainGaugeLayer.currentLayer(),
self.dialog.DataAddressField.expression(), False)
if not ok:
self.iface.messageBar().pushCritical(
'Rain Generator',
'Invalid File Locations!'
)
return
numberoftimes = 0
numberofrains = 0
for i, locations in enumerate(files):
address = locations.replace("\\", "/")
if not os.path.isfile(address.strip("\u202a")):
self.iface.messageBar().pushCritical(
'Rain Generator',
'File Does Not Exist!'
)
return
###################################
# f = open(address.strip("\u202a"), "r")
# if self.dialog.HeaderBox.isChecked():
# lines = f.readlines()[1:]
# else:
# lines = f.readlines()
# times = []
# rains = []
# for x in lines:
# times.append(x.split(' ')[0])
# rains.append(x.split(' ')[1])
# f.close()
# if len(times) >= numberoftimes:
# numberoftimes = len(times)
# if len(rains) >= numberofrains:
# numberofrains = len(rains)
#######################################
try:
if self.dialog.DelimiterBox.currentText() == "space":
df = pd.read_csv(address.strip("\u202a"), delimiter=" ")
else:
df = pd.read_csv(address.strip("\u202a"), delimiter=self.dialog.DelimiterBox.currentText())
times = df[self.dialog.TimeColumnBox.currentText()].tolist()
rains = df[self.dialog.RainColumnBox.currentText()].tolist()
if len(times) >= numberoftimes:
numberoftimes = len(times)
if len(rains) >= numberofrains:
numberofrains = len(rains)
except:
self.iface.messageBar().pushCritical(
'Rain Generator',
'Could not read Files!'
)
return
#######################################
# putting data in an array
self.ngauges = len(files)
self.ntimes = numberoftimes
self.nrains = numberofrains
for x in range(self.ngauges):
self.data.append([])
for y in range(2):
self.data[x].append([])
# for z in range(nrains):
# data[x][y].append(0)
for i, locations in enumerate(files):
address = locations.replace("\\", "/")
if self.dialog.DelimiterBox.currentText() == "space":
df = pd.read_csv(address.strip("\u202a"), delimiter=" ")
else:
df = pd.read_csv(address.strip("\u202a"), delimiter=self.dialog.DelimiterBox.currentText())
times = df[self.dialog.TimeColumnBox.currentText()].tolist()
rains = df[self.dialog.RainColumnBox.currentText()].tolist()
for j in range(len(times)):
self.data[i][0].append(times[j])
self.data[i][1].append(rains[j])
print(self.data)
# filling the for and until boxes
self.dialog.FromBox.clear()
self.dialog.UntilBox.clear()
lengths = []
for j in range(len(self.data)):
lengths.append(len(self.data[j][0]))
for k in self.data[lengths.index(max(lengths))][0]: # adds the time values for the shortest time series
self.dialog.FromBox.addItem(str(k))
self.dialog.UntilBox.addItem(str(k))
# self.dialog.FromBox.currentIndex(0)
# self.dialog.UntilBoxBox.currentIndex(min(lengths)-1)
if self.dialog.AnalyzeAllDataBox.isChecked():
self.dialog.groupBox_2.setEnabled(True)
self.iface.messageBar().pushSuccess(
'Rain Generator',
'Files seem ok !'
)
##################################################################################
def AnalyzeFromUntil(self):
# checks if the values in the from and until boxes are correct and puts them in self.data
tempdata = []
for x in range(len(self.data)):
tempdata.append([])
for y in range(2):
tempdata[x].append([])
fromindex = 0
untilindex = 0
for i in range(len(self.data)):
if self.dialog.FromBox.currentText() not in str(
self.data[i][0]) or self.dialog.UntilBox.currentText() not in str(self.data[i][0]):
self.iface.messageBar().pushCritical(
'Rain Generator',
'Entered Values Dont Exist in At least One of the Input Files !'
)
return
for j in range(len(self.data[i][0])):
if str(self.data[i][0][j]) == self.dialog.FromBox.currentText():
fromindex = j
if str(self.data[i][0][j]) == self.dialog.UntilBox.currentText():
untilindex = j
if fromindex > untilindex:
self.iface.messageBar().pushCritical(
'Rain Generator',
'The Values Entered Are Not Valid !'
)
return
for k in range(fromindex, untilindex + 1):
tempdata[i][0].append(self.data[i][0][k])
tempdata[i][1].append(self.data[i][1][k])
self.data = tempdata
self.dialog.groupBox_2.setEnabled(True)
print(self.data)
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
# spatial interpolation
##########################################################################
# layer2 = spatial interpolation layer
layer2 = QgsVectorLayer("Polygon", 'Generation_Area', 'memory')
nx = 0
ny = 0
def CreateGenerationArea(self):
if type(self.dialog.GenerationAreaLayer.currentLayer()) == type(None):
self.dialog.iface.messageBar().pushCritical(
'Rain Generator',
'No Layer Selected !'
)
return
self.layer2 = QgsVectorLayer("Polygon", 'Generation_Area', 'memory')
layer = self.dialog.GenerationAreaLayer.currentLayer()
ex = layer.extent()
xmax = ex.xMaximum()
ymax = ex.yMaximum()
xmin = ex.xMinimum()
ymin = ex.yMinimum()
prov = self.layer2.dataProvider()
fields = QgsFields()
fields.append(QgsField('ID', QVariant.Int, '', 10, 0))
fields.append(QgsField('XMIN', QVariant.Double, '', 24, 6))
fields.append(QgsField('XMAX', QVariant.Double, '', 24, 6))
fields.append(QgsField('YMIN', QVariant.Double, '', 24, 6))
fields.append(QgsField('YMAX', QVariant.Double, '', 24, 6))
prov.addAttributes(fields)
self.layer2.updateExtents()
self.layer2.updateFields()
if self.dialog.dxBox.value() <= 0 or self.dialog.dyBox.value() <= 0:
self.dialog.iface.messageBar().pushCritical(
'Rain Generator',
'Invalid Values for dx or dy !'
)
return
else:
hspacing = self.dialog.dxBox.value()
vspacing = self.dialog.dyBox.value()
self.nx = math.ceil((xmax - xmin) / hspacing)
self.ny = math.ceil((ymax - ymin) / vspacing)
id = 0
y = ymax
while y >= ymin:
x = xmin
while x <= xmax:
point1 = QgsPointXY(x, y)
point2 = QgsPointXY(x + hspacing, y)
point3 = QgsPointXY(x + hspacing, y - vspacing)
point4 = QgsPointXY(x, y - vspacing)
vertices = [point1, point2, point3, point4] # Vertices of the polygon for the current id
inAttr = [id, x, x + hspacing, y - vspacing, y]
feat = QgsFeature()
feat.setGeometry(QgsGeometry().fromPolygonXY([vertices])) # Set geometry for the current id
feat.setAttributes(inAttr) # Set attributes for the current id
prov.addFeatures([feat])
x = x + hspacing
id += 1
y = y - vspacing
self.layer2.setCrs(
QgsCoordinateReferenceSystem(self.iface.mapCanvas().mapSettings().destinationCrs().authid()))
self.layer2.updateExtents()
QgsProject.instance().addMapLayer(self.layer2)
self.dialog.groupBox_5.setEnabled(True)
self.dialog.ProcessButton.setEnabled(True)
####################################################################
def PreSpatialInterpolation(self):
self.dialog.StatusIndicator.setText("Performing Spatial Interpolation...")
QTimer.singleShot(50, self.SpatialInterpolation) # waits half a second for the message to be displayed
#############################################################################################
def SpatialInterpolation(self):
foldername = self.dialog.folderEdit_dataanalysis.text()
if self.dialog.SaveSpatialInterpolationBox.isChecked() or self.dialog.SaveStormStatisticsBox.isChecked():
if not foldername:
self.iface.messageBar().pushCritical(
'Rain Generator',
'No output folder given!'
)
return
filepath = os.path.join(tempfile.gettempdir(), "RainfallSpatialInterpolation" + '.txt')
try: # deletes previous files
if os.path.isfile(filepath):
os.remove(filepath)
except:
pass
try:
file = open(filepath, 'w')
file.close()
except:
pass
with open(filepath, 'a') as SpatialInterpolation:
raingaugelocations = []
generationlocations = []
# getting the locations of raingauges
point_layer = self.dialog.RainGaugeLayer.currentLayer()
features = point_layer.getFeatures()
for feature in features:
buff = feature.geometry()
raingaugelocations.append(buff.asPoint())
# getting the generation locations
area_layer = self.layer2
features = area_layer.getFeatures()
for feature in features:
buff = feature.geometry()
generationlocations.append(buff.centroid().asPoint())
# calculate generation duration
rainlengths = []
for j in range(len(self.data)):
rainlengths.append(len(self.data[j][0]))
###############################################################
# time viewer layer
if self.dialog.TImeVieweLayerBox.isChecked():
layer = self.layer2
feats = [feat for feat in layer.getFeatures()]
timeviewerlayer = QgsVectorLayer("Polygon", 'Time_Viewer_Layer', 'memory')
timeviewerlayer_data = timeviewerlayer.dataProvider()
attr = layer.dataProvider().fields().toList()
timeviewerlayer_data.addAttributes(attr)
timeviewerlayer.dataProvider().addAttributes(
[QgsField("Boundary Value", QVariant.Double), QgsField("date_time", QVariant.Double)])
for i in range(min(rainlengths)):
timeviewerlayer_data.addFeatures(feats)
fieldids = []
fields = timeviewerlayer.dataProvider().fields()
# deleting extra fields
fieldstodelete = ["XMIN", "XMAX", "YMIN", "YMAX"]
for field in fields:
if field.name() in fieldstodelete:
fieldids.append(fields.indexFromName(field.name()))
timeviewerlayer.dataProvider().deleteAttributes(fieldids)
timeviewerlayer.setCrs(
QgsCoordinateReferenceSystem(self.iface.mapCanvas().mapSettings().destinationCrs().authid()))
timeviewerlayer.updateFields()
##################################################################
#################################################################################################
# Inversed Distance Weighting
if self.dialog.SpatialInterpolationMethodBox.currentText() == "Inversed Distance Weighting":
# writing the file
for i in range(len(generationlocations)):
SpatialInterpolation.write('BEGIN\n')
SpatialInterpolation.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths))))
counter = 0
n = self.dialog.ExponentFactorBox.value() # exponent factor for the invert distance weighting formula
while counter + 1 <= min(rainlengths):
upperformula = 0
lowerformula = 0
for j in range(len(self.data)):
distance = raingaugelocations[j].distance(generationlocations[i])
upperformula = upperformula + ((1 / (distance ** n)) * float(self.data[j][1][counter]))
lowerformula = lowerformula + (1 / (distance ** n))
rainvalue = round((upperformula / lowerformula), 3)
SpatialInterpolation.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue), str(rainvalue)))
###############################################
# time viewer layer
if self.dialog.TImeVieweLayerBox.isChecked():
fields = timeviewerlayer.dataProvider().fields()
datetimefieldid = fields.indexFromName("date_time")
rainvaluefieldid = fields.indexFromName("Boundary Value")
idfieldid = fields.indexFromName("ID")
featureids = []
for feature in timeviewerlayer.getFeatures():
if float(feature.attributes()[idfieldid]) == float(i):
featureids.append(feature.id())
try:
atts = {
datetimefieldid: float(self.data[rainlengths.index(min(rainlengths))][0][counter]),
rainvaluefieldid: rainvalue}
except:
atts = {datetimefieldid: self.data[rainlengths.index(min(rainlengths))][0][counter],
rainvaluefieldid: rainvalue}
timeviewerlayer.dataProvider().changeAttributeValues({featureids[counter]: atts})
###############################################
if counter + 1 == min(rainlengths):
SpatialInterpolation.write('!END')
SpatialInterpolation.write('\n\n')
counter = counter + 1
######################################################################################################
# Trend Surface Analysis (Polynomial 1st Order)
elif self.dialog.SpatialInterpolationMethodBox.currentText() == "Trend Surface Analysis (Polynomial 1st Order)":
allrainvalues = []
for counter in range(min(rainlengths)):
xs = []
ys = []
zs = []
# putting all x and y and z values in seperate arrays
for r, i in enumerate(raingaugelocations):
xs.append(i.x())
ys.append(i.y())
zs.append(float(self.data[r][1][counter]))
data = np.c_[xs, ys, zs]
# grid covering the domain of the data
# getting the minimum and maximum x and ys of generation area
layer = self.dialog.GenerationAreaLayer.currentLayer()
ex = layer.extent()
xmax = ex.xMaximum()
ymax = ex.yMaximum()
xmin = ex.xMinimum()
ymin = ex.yMinimum()
X, Y = np.meshgrid(np.linspace(xmin, xmax, self.dialog.dxBox.value()),
np.linspace(ymin, ymax, self.dialog.dyBox.value()))
order = 1 # 1: linear, 2: quadratic
if order == 1:
# best-fit linear plane
A = np.c_[data[:, 0], data[:, 1], np.ones(data.shape[0])]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2]) # coefficients
# formula
# Z = C[0] * X + C[1] * Y + C[2]
rainvaluesintimestep = []
for i in generationlocations:
value = (C[0] * i.x()) + (C[1] * i.y()) + C[2]
rainvaluesintimestep.append(value)
allrainvalues.append(rainvaluesintimestep)
# writing the file
for i in range(len(generationlocations)):
SpatialInterpolation.write('BEGIN\n')
SpatialInterpolation.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths))))
counter = 0
while counter + 1 <= min(rainlengths):
rainvalue = float(allrainvalues[counter][i])
###############################################
# time viewer layer
if self.dialog.TImeVieweLayerBox.isChecked():
fields = timeviewerlayer.dataProvider().fields()
datetimefieldid = fields.indexFromName("date_time")
rainvaluefieldid = fields.indexFromName("Boundary Value")
idfieldid = fields.indexFromName("ID")
featureids = []
for feature in timeviewerlayer.getFeatures():
if float(feature.attributes()[idfieldid]) == float(i):
featureids.append(feature.id())
try:
atts = {
datetimefieldid: float(self.data[rainlengths.index(min(rainlengths))][0][counter]),
rainvaluefieldid: rainvalue}
except:
atts = {datetimefieldid: self.data[rainlengths.index(min(rainlengths))][0][counter],
rainvaluefieldid: rainvalue}
timeviewerlayer.dataProvider().changeAttributeValues({featureids[counter]: atts})
###############################################
SpatialInterpolation.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue), str(rainvalue)))
if counter + 1 == min(rainlengths):
SpatialInterpolation.write('!END')
SpatialInterpolation.write('\n\n')
counter = counter + 1
######################################################################################
elif self.dialog.SpatialInterpolationMethodBox.currentText() == "Trend Surface Analysis (Polynomial 2nd Order)":
allrainvalues = []
for counter in range(min(rainlengths)):
xs = []
ys = []
zs = []
# putting all x and y and z values in seperate arrays
for r, i in enumerate(raingaugelocations):
xs.append(i.x())
ys.append(i.y())
zs.append(float(self.data[r][1][counter]))
data = np.c_[xs, ys, zs]
# grid covering the domain of the data
# getting the minimum and maximum x and ys of generation area
layer = self.dialog.GenerationAreaLayer.currentLayer()
ex = layer.extent()
xmax = ex.xMaximum()
ymax = ex.yMaximum()
xmin = ex.xMinimum()
ymin = ex.yMinimum()
X, Y = np.meshgrid(np.linspace(xmin, xmax, self.dialog.dxBox.value()),
np.linspace(ymin, ymax, self.dialog.dyBox.value()))
order = 2 # 2: quadratic
if order == 2:
# best-fit quadratic curve
A = np.c_[np.ones(data.shape[0]), data[:, :2], np.prod(data[:, :2], axis=1), data[:, :2] ** 2]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2])
# formula
# Z = C[4]*X**2. + C[5]*Y**2. + C[3]*X*Y + C[1]*X + C[2]*Y + C[0]
rainvaluesintimestep = []
for i in generationlocations:
value = C[4] * i.x() ** 2. + C[5] * i.y() ** 2. + C[3] * i.x() * i.y() + C[1] * i.x() + C[
2] * i.y() + C[0]
rainvaluesintimestep.append(value)
allrainvalues.append(rainvaluesintimestep)
# writing the file
for i in range(len(generationlocations)):
SpatialInterpolation.write('BEGIN\n')
SpatialInterpolation.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths))))
counter = 0
while counter + 1 <= min(rainlengths):
rainvalue = float(allrainvalues[counter][i])
###############################################
# time viewer layer
if self.dialog.TImeVieweLayerBox.isChecked():
fields = timeviewerlayer.dataProvider().fields()
datetimefieldid = fields.indexFromName("date_time")
rainvaluefieldid = fields.indexFromName("Boundary Value")
idfieldid = fields.indexFromName("ID")
featureids = []
for feature in timeviewerlayer.getFeatures():
if float(feature.attributes()[idfieldid]) == float(i):
featureids.append(feature.id())
try:
atts = {
datetimefieldid: float(self.data[rainlengths.index(min(rainlengths))][0][counter]),
rainvaluefieldid: rainvalue}
except:
atts = {datetimefieldid: self.data[rainlengths.index(min(rainlengths))][0][counter],
rainvaluefieldid: rainvalue}
timeviewerlayer.dataProvider().changeAttributeValues({featureids[counter]: atts})
###############################################
SpatialInterpolation.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue), str(rainvalue)))
if counter + 1 == min(rainlengths):
SpatialInterpolation.write('!END')
SpatialInterpolation.write('\n\n')
counter = counter + 1
##########################################################
# time viewer layer
if self.dialog.TImeVieweLayerBox.isChecked():
timeviewerlayer.updateFields()
QgsProject.instance().addMapLayer(timeviewerlayer)
##########################################################
if self.dialog.SaveSpatialInterpolationBox.isChecked():
self.dialog.StatusIndicator.setText("Writing Spatial Interpolation Output...")
QTimer.singleShot(50, self.SpatialInterpolationforPromaides)
self.dialog.StatusIndicator.setText("Analyzing Storm Statistics...")
QTimer.singleShot(50, self.StormAnalysis)
################################################################################################
def SpatialInterpolationforPromaides(self):
filepath = os.path.join(self.dialog.folderEdit_dataanalysis.text(), "RainfallSpatialInterpolation" + '.txt')
try: # deletes previous files
if os.path.isfile(filepath):
os.remove(filepath)
except:
pass
with open(filepath, 'a') as generateddata:
generateddata.write('# comment\n')
generateddata.write('# !BEGIN\n')
generateddata.write('# number begining from 0 ++ number of points\n')
generateddata.write('# hour [h] discharge [m³/s]\n')
generateddata.write('# !END\n\n\n')
raingaugelocations = []
generationlocations = []
# getting the locations of raingauges
point_layer = self.dialog.RainGaugeLayer.currentLayer()
features = point_layer.getFeatures()
for feature in features:
buff = feature.geometry()
raingaugelocations.append(buff.asPoint())
# getting the generation locations
area_layer = self.layer2
features = area_layer.getFeatures()
for feature in features:
buff = feature.geometry()
generationlocations.append(buff.centroid().asPoint())
# calculate generation duration
rainlengths = []
for j in range(len(self.data)):
rainlengths.append(len(self.data[j][0]))
#################################################################################################
# Inversed Distance Weighting
if self.dialog.SpatialInterpolationMethodBox.currentText() == "Inversed Distance Weighting":
# writing the file
for i in range(len(generationlocations)):
generateddata.write('!BEGIN #%s\n' % "raingaugename")
generateddata.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths) * 2)))
counter = 0
n = self.dialog.ExponentFactorBox.value() # exponent factor for the invert distance weighting formula
while counter + 1 <= min(rainlengths):
upperformula = 0
lowerformula = 0
for j in range(len(self.data)):
distance = raingaugelocations[j].distance(generationlocations[i])
upperformula = upperformula + ((1 / (distance ** n)) * float(self.data[j][1][counter]))
lowerformula = lowerformula + (1 / (distance ** n))
rainvalue = round((upperformula / lowerformula), 3)
generateddata.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
generateddata.write(
'%s.99 %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
if counter + 1 == min(rainlengths):
generateddata.write('!END')
generateddata.write('\n\n')
counter = counter + 1
######################################################################################################
# Trend Surface Analysis (Polynomial 1st Order)
elif self.dialog.SpatialInterpolationMethodBox.currentText() == "Trend Surface Analysis (Polynomial 1st Order)":
allrainvalues = []
for counter in range(min(rainlengths)):
xs = []
ys = []
zs = []
# putting all x and y and z values in seperate arrays
for r, i in enumerate(raingaugelocations):
xs.append(i.x())
ys.append(i.y())
zs.append(float(self.data[r][1][counter]))
data = np.c_[xs, ys, zs]
# grid covering the domain of the data
# getting the minimum and maximum x and ys of generation area
layer = self.dialog.GenerationAreaLayer.currentLayer()
ex = layer.extent()
xmax = ex.xMaximum()
ymax = ex.yMaximum()
xmin = ex.xMinimum()
ymin = ex.yMinimum()
X, Y = np.meshgrid(np.linspace(xmin, xmax, self.dialog.dxBox.value()),
np.linspace(ymin, ymax, self.dialog.dyBox.value()))
order = 1 # 1: linear, 2: quadratic
if order == 1:
# best-fit linear plane
A = np.c_[data[:, 0], data[:, 1], np.ones(data.shape[0])]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2]) # coefficients
# formula
# Z = C[0] * X + C[1] * Y + C[2]
rainvaluesintimestep = []
for i in generationlocations:
value = (C[0] * i.x()) + (C[1] * i.y()) + C[2]
rainvaluesintimestep.append(value)
allrainvalues.append(rainvaluesintimestep)
# writing the file
for i in range(len(generationlocations)):
generateddata.write('!BEGIN #%s\n' % "raingaugename")
generateddata.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths) * 2)))
counter = 0
while counter + 1 <= min(rainlengths):
rainvalue = float(allrainvalues[counter][i])
generateddata.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
generateddata.write(
'%s.99 %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
if counter + 1 == min(rainlengths):
generateddata.write('!END')
generateddata.write('\n\n')
counter = counter + 1
######################################################################################
elif self.dialog.SpatialInterpolationMethodBox.currentText() == "Trend Surface Analysis (Polynomial 2nd Order)":
allrainvalues = []
for counter in range(min(rainlengths)):
xs = []
ys = []
zs = []
# putting all x and y and z values in seperate arrays
for r, i in enumerate(raingaugelocations):
xs.append(i.x())
ys.append(i.y())
zs.append(float(self.data[r][1][counter]))
data = np.c_[xs, ys, zs]
# grid covering the domain of the data
# getting the minimum and maximum x and ys of generation area
layer = self.dialog.GenerationAreaLayer.currentLayer()
ex = layer.extent()
xmax = ex.xMaximum()
ymax = ex.yMaximum()
xmin = ex.xMinimum()
ymin = ex.yMinimum()
X, Y = np.meshgrid(np.linspace(xmin, xmax, self.dialog.dxBox.value()),
np.linspace(ymin, ymax, self.dialog.dyBox.value()))
order = 2 # 2: quadratic
if order == 2:
# best-fit quadratic curve
A = np.c_[
np.ones(data.shape[0]), data[:, :2], np.prod(data[:, :2], axis=1), data[:, :2] ** 2]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2])
# formula
# Z = C[4]*X**2. + C[5]*Y**2. + C[3]*X*Y + C[1]*X + C[2]*Y + C[0]
rainvaluesintimestep = []
for i in generationlocations:
value = C[4] * i.x() ** 2. + C[5] * i.y() ** 2. + C[3] * i.x() * i.y() + C[1] * i.x() + C[
2] * i.y() + C[0]
rainvaluesintimestep.append(value)
allrainvalues.append(rainvaluesintimestep)
# writing the file
for i in range(len(generationlocations)):
generateddata.write('!BEGIN #%s\n' % "raingaugename")
generateddata.write(
'%s %s area #Length [m²/s], Area [m/s], waterlevel [m], point [m³/s]\n' % (
str(i), str(min(rainlengths) * 2)))
counter = 0
while counter + 1 <= min(rainlengths):
rainvalue = float(allrainvalues[counter][i])
generateddata.write(
'%s %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
generateddata.write(
'%s.99 %s #%s mm/h\n' % (str(counter), str(rainvalue / 3600000), str(rainvalue)))
if counter + 1 == min(rainlengths):
generateddata.write('!END')
generateddata.write('\n\n')
counter = counter + 1
###########################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
# data analysis
# shared arrays
StormTraveledDistance = []
StormVolume = []
StormDirection = []
StormDuration = []
StormPeakIntensity = []
StormPeakIntensityTimestep = []
StormPeakIntensityLocation = []
StormSize = []
NoStormDuration = []
CellCoordinates = []
StormLocations = []
StormIDs = []
StormCount = 0
MaxNumberofStorms = 100000
def StormAnalysis(self):
# getting the center x y of each square cell
for feature in self.layer2.getFeatures():
self.CellCoordinates.append(feature.geometry().centroid().asPoint())
print(self.nx, "nx")
print(self.ny, "ny")
# calculates angle between two points clockwise
# east is 0
# north is 90
def angle_between(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return np.rad2deg((ang1 - ang2) % (2 * np.pi))
self.StormCount = 0
nostormcount = 0
# reset
self.StormTraveledDistance = []
self.StormVolume = []
self.StormDirection = []
self.StormDuration = []
self.StormPeakIntensity = []
self.StormSize = []
self.NoStormDuration = []
for i in range(self.MaxNumberofStorms):
self.StormTraveledDistance.append(0)
self.StormVolume.append(0)
self.StormDirection.append([])
self.StormLocations.append([])
self.StormDuration.append(0)
self.StormPeakIntensity.append(0)
self.StormPeakIntensityTimestep.append(0)
self.StormPeakIntensityLocation.append(0)
self.StormSize.append(0)
Storm = []
StormConnectivity = []
PreviousStormConnectivity = []
# reading file
filepath = os.path.join(tempfile.gettempdir(), "RainfallSpatialInterpolation" + '.txt')
f = open(filepath)
lines = f.readlines()
StartingLine = 2
for linecount in range(len(self.data[0][0])):
print(StartingLine, "startingline")
for i in range(StartingLine, StartingLine + ((self.nx * self.ny - 1) * (len(self.data[0][0]) + 4)) + 1,
len(self.data[0][0]) + 3 + 1):
Storm.append(lines[i].split(' ')[1])
# place to put test arrays
for i in range(len(Storm)):
StormConnectivity.append(0)
Storm = [float(i) for i in Storm]
StartingLine = StartingLine + 1
###################################################################################
# storm cluster identification
StormThreshhold = self.dialog.StormThreshholdBox.value()
for i, value in enumerate(Storm):
try:
if Storm[i - 1] > StormThreshhold and value > StormThreshhold and (i - 1) >= 0:
StormConnectivity[i] = StormConnectivity[i - 1]
continue
except:
pass
try:
if Storm[i - self.nx] > StormThreshhold and value > StormThreshhold and (i - self.nx) >= 0:
StormConnectivity[i] = StormConnectivity[i - self.nx]
continue
except:
pass
try:
if Storm[i - self.nx - 1] > StormThreshhold and value > StormThreshhold and (i - self.nx - 1) >= 0:
StormConnectivity[i] = StormConnectivity[i - self.nx - 1]
continue
except:
pass
if value > StormThreshhold:
self.StormCount = self.StormCount + 1
StormConnectivity[i] = self.StormCount
####################################################################################
print(PreviousStormConnectivity, "previous connectivity1")
print(StormConnectivity, "storm connectivity1")
print(Storm, "storm")
# find overlapping storms
for i, value in enumerate(StormConnectivity):
for j, previousvalue in enumerate(PreviousStormConnectivity):
if i == j and value > 0 and previousvalue > 0:
for k, value2 in enumerate(StormConnectivity):
if value2 == value:
StormConnectivity[k] = previousvalue
######################################################################################
# getting storm statistics
if all(i <= self.dialog.StormThreshholdBox.value() for i in Storm):
nostormcount = nostormcount + 1
else:
self.NoStormDuration.append(nostormcount)
nostormcount = 0
# storm volume
for i, value in enumerate(StormConnectivity):
if value > 0:
self.StormVolume[value] = self.StormVolume[value] + Storm[i]
# saving the storm id
for stormid in list(set(StormConnectivity)):
if stormid != 0 and (stormid not in self.StormIDs):
self.StormIDs.append(stormid)
# saving storm locations
for stormid in list(set(StormConnectivity)):
indexes=[]
if stormid != 0:
for index, element in enumerate(StormConnectivity):
if element == stormid:
indexes.append(index)
self.StormLocations[stormid].append(indexes)
# storm duration
print(StormConnectivity, "storm connectivity2")
for value in list(set(StormConnectivity)):
if value != 0:
self.StormDuration[value] = self.StormDuration[value] + 1
# peak intensity and storm area and velocity and direction
rainintensities = []
currentstormcoordinates = []
previousstormcoordinates = []
stormarea = 0
for i, id in enumerate(StormConnectivity):
if id == value and id != 0:
rainintensities.append(Storm[i])
currentstormcoordinates.append(self.CellCoordinates[i])
stormarea = stormarea + 1
for i, id in enumerate(PreviousStormConnectivity):
if id == value and id != 0:
previousstormcoordinates.append(self.CellCoordinates[i])
if value != 0:
if max(rainintensities) > self.StormPeakIntensity[value]:
self.StormPeakIntensity[value] = max(rainintensities)
self.StormPeakIntensityTimestep[value] = StartingLine
self.StormPeakIntensityLocation[value] = rainintensities.index(max(rainintensities))
self.StormSize[value] = self.StormSize[value] + stormarea
# traveled distance and direction
if value != 0 and (value in PreviousStormConnectivity):
currentstormcenterx = 0
currentstormcentery = 0
for xy in currentstormcoordinates:
currentstormcenterx = currentstormcenterx + xy.x()
currentstormcentery = currentstormcentery + xy.y()
currentstormcenterx = currentstormcenterx / len(currentstormcoordinates)
currentstormcentery = currentstormcentery / len(currentstormcoordinates)
previousstormcenterx = 0
previousstormcentery = 0
for xy in previousstormcoordinates:
previousstormcenterx = previousstormcenterx + xy.x()
previousstormcentery = previousstormcentery + xy.y()
if len(previousstormcoordinates) > 0:
previousstormcenterx = previousstormcenterx / len(previousstormcoordinates)
previousstormcentery = previousstormcentery / len(previousstormcoordinates)
# both need averaging out
self.StormTraveledDistance[value] = self.StormTraveledDistance[value] + math.sqrt(
(currentstormcenterx - previousstormcenterx) ** 2 + (
currentstormcentery - previousstormcentery) ** 2)
angle = angle_between([previousstormcenterx, previousstormcentery],
[currentstormcenterx, currentstormcentery])
if 0 < angle < 22.5 or 337.5 < angle < 360:
direction = "E"
elif 22.5 <= angle <= 67.5:
direction = "NE"
elif 67.5 <= angle <= 112.5:
direction = "N"
elif 112.5 <= angle <= 157.5:
direction = "NW"
elif 157.5 <= angle <= 202.5:
direction = "W"
elif 202.5 <= angle <= 247.5:
direction = "SW"
elif 247.5 <= angle <= 292.5:
direction = "S"
elif 292.5 <= angle <= 337.5:
direction = "W"
self.StormDirection[value].append(direction)
PreviousStormConnectivity = StormConnectivity
Storm = []
StormConnectivity = []
# print(self.StormPeakIntensity[:self.StormCount+1],"peak")
# print(self.StormSize[:self.StormCount+1],"size")
# print(self.StormDuration[:self.StormCount+1],"duration")
# print(self.StormTraveledDistance[:self.StormCount+1],"distance")
#print(self.StormDirection[:self.StormCount + 1], "direction")
#print(self.StormLocations,"locations")
#print(self.StormIDs,"stormids")
#print(self.StormPeakIntensityTimestep,"timestep")
#print(self.StormPeakIntensityLocation,"location")
if self.dialog.SaveStormStatisticsBox.isChecked():
self.dialog.StatusIndicator.setText("Writing Storm Statistics to File...")
QTimer.singleShot(50, self.WriteStormStatistics)
N = 0
for i in self.StormDuration:
if i > 0:
N = N + 1
self.dialog.StatusIndicator.setText("Processing Complete, %s Storms Identified" % (N))
self.iface.messageBar().pushSuccess(
'Rain Generator',
'Processing Complete !'
)
self.dialog.groupBox_3.setEnabled(True)
# function to write storm statistics to file
def WriteStormStatistics(self):
filepath = os.path.join(self.dialog.folderEdit_dataanalysis.text(), "StormStatistics" + '.txt')
try: # deletes previous files
if os.path.isfile(filepath):
os.remove(filepath)
except:
pass
try:
file = open(filepath, 'w')
file.close()
except:
pass
with open(filepath, 'a') as StormStatistics:
StormStatistics.write(
'Storm_id Storm_Duration Storm_Volume Storm_PeakIntensity Storm_TotalArea Storm_TraveledDistance StormTotalAngle\n')
for i in range(1, self.StormCount + 1):
StormStatistics.write('%s %s %s %s %s %s %s\n' % (
i, self.StormDuration[i], self.StormVolume[i], self.StormPeakIntensity[i], (self.StormSize[i]),
(self.StormTraveledDistance[i]), (self.StormDirection[i])))
#############################################################################################
#############################################################################################
#generation
def Generation(self):
StormIDUniqueValues = self.StormIDs
RequestedNumberofTimesteps = self.dialog.RequestedGenerationDurationBox.value()
def execTool(self):
print("hello")
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
# copula class
# https://github.com/ashiq24/Copula
# multivariate Gaussian copulas
class Copula():
def __init__(self, data):
self.data = np.array(data)
if (len(data) < 2):
raise Exception('input data must have multiple samples')
if not isinstance(data[0], list):
raise Exception('input data must be a 2D array')
self.cov = | np.cov(self.data.T) | numpy.cov |
import datetime as dt
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from syaroho_rating.utils import perf_to_color
def get_colorlist(performances: pd.Series, n_col: int) -> np.ndarray:
colordefs = []
for p in performances:
rgb = (3.0 + perf_to_color(p)) / 4.0 # type: np.ndarray
colordef_row = np.tile(rgb, reps=(n_col, 1))
colordefs.append(colordef_row)
return | np.stack(colordefs) | numpy.stack |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import viewkeys
from multiprocessing import Process, Queue
import numpy as np
import os
import shutil
import tempfile
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, cnn, data_parallel_model, dyndep, optimizer, \
rnn_cell, workspace, model_helper, brew
from caffe2.python.test_util import TestCase
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
class TemporaryDirectory:
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self.tmpdir
def __exit__(self, type, value, traceback):
shutil.rmtree(self.tmpdir)
# Note(jiayq): we are yet to find out why Travis gives out an error in gloo
# like:
# RuntimeError: [enforce fail at /home/travis/build/caffe2/caffe2/third_party/gloo/gloo/transport/tcp/device.cc:113] ifa != nullptr. Unable to find interface for: [127.0.1.1]
# See for example https://travis-ci.org/caffe2/caffe2/jobs/262433866
# As a result, we will check if this is travis, and if yes, disable it.
@unittest.skipIf(os.environ.get("TRAVIS"), "DPMTest has a known issue with Travis.")
class DataParallelModelTest(TestCase):
def run_model(self, devices, gpu):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
fc = model.FC("data", "fc", 16, 1,
("ConstantFill", {}), ("ConstantFill", {}))
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
# For testing explicit sync
model.param_init_net.UniformFill([], ["sync_num"], shape=[1])
return [loss]
def add_optimizer(model):
return optimizer.build_sgd(
model,
0.1,
policy="fixed",
max_gradient_norm=5.0,
allow_lr_injection=True,
)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="test{}".format(devices),
)
data_parallel_model.Parallelize(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
optimizer_builder_fun=add_optimizer,
devices=devices,
cpu_device=not gpu,
shared_model=not gpu,
)
data_parallel_model.AddBlobSync(model, ["sync_num"])
np.random.seed(2603)
# Each run has same input, independent of number of gpus
batch_size = 64
for i in range(0, 10):
full_data = np.random.rand(batch_size, 16)
full_labels = | np.round(full_data[:, 0]) | numpy.round |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test hardcoded decomposition rules and matrix definitions for standard gates."""
import inspect
import numpy as np
from ddt import ddt, data, unpack
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Operator
from qiskit.test import QiskitTestCase
from qiskit.circuit import ParameterVector, Gate, ControlledGate
from qiskit.circuit.library import standard_gates
from qiskit.circuit.library import (
HGate,
CHGate,
IGate,
RGate,
RXGate,
CRXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
SGate,
SdgGate,
CSwapGate,
TGate,
TdgGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
ECRGate,
CCXGate,
YGate,
CYGate,
ZGate,
CZGate,
RYYGate,
PhaseGate,
CPhaseGate,
UGate,
CUGate,
SXGate,
SXdgGate,
CSXGate,
RVGate,
)
from qiskit.circuit.library.standard_gates.equivalence_library import (
StandardEquivalenceLibrary as std_eqlib,
)
from .gate_utils import _get_free_params
class TestGateDefinitions(QiskitTestCase):
"""Test the decomposition of a gate in terms of other gates
yields the equivalent matrix as the hardcoded matrix definition
up to a global phase."""
def test_ch_definition(self): # TODO: expand this to all gates
"""Test ch gate matrix and definition."""
circ = QuantumCircuit(2)
circ.ch(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_ccx_definition(self):
"""Test ccx gate matrix and definition."""
circ = QuantumCircuit(3)
circ.ccx(0, 1, 2)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_crz_definition(self):
"""Test crz gate matrix and definition."""
circ = QuantumCircuit(2)
circ.crz(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cry_definition(self):
"""Test cry gate matrix and definition."""
circ = QuantumCircuit(2)
circ.cry(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_crx_definition(self):
"""Test crx gate matrix and definition."""
circ = QuantumCircuit(2)
circ.crx(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cswap_definition(self):
"""Test cswap gate matrix and definition."""
circ = QuantumCircuit(3)
circ.cswap(0, 1, 2)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cu1_definition(self):
"""Test cu1 gate matrix and definition."""
circ = QuantumCircuit(2)
circ.append(CU1Gate(1), [0, 1])
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cu3_definition(self):
"""Test cu3 gate matrix and definition."""
circ = QuantumCircuit(2)
circ.append(CU3Gate(1, 1, 1), [0, 1])
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cx_definition(self):
"""Test cx gate matrix and definition."""
circ = QuantumCircuit(2)
circ.cx(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_ecr_definition(self):
"""Test ecr gate matrix and definition."""
circ = QuantumCircuit(2)
circ.ecr(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_rv_definition(self):
"""Test R(v) gate to_matrix and definition."""
qreg = QuantumRegister(1)
circ = QuantumCircuit(qreg)
vec = np.array([0.1, 0.2, 0.3], dtype=float)
circ.rv(*vec, 0)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_rv_r_equiv(self):
"""Test R(v) gate is equivalent to R gate."""
theta = np.pi / 5
phi = np.pi / 3
rgate = RGate(theta, phi)
axis = np.array([np.cos(phi), | np.sin(phi) | numpy.sin |
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures # TODO: implement what I need from this package
class Design:
"""
Class Docstring.
"""
def __init__(self, experiments=None, levels=None):
"""
:param int experiments: Number of Experiments to design
:param dict levels: Levels of factors
Constructor Docstring.
"""
self.experiments = experiments
self.levels = levels
self.features = len(levels.keys())
self.order = None
self.interactions_only = None
self.bias = None
self.epochs = None
self.engine = None
# ---------------DUNDER, GETTERS AND SETTERS FUNCTION---------------------------------------------------------------
def __repr__(self):
return f"Design(experiments={self.experiments}, levels={self.levels})"
def set_model(self, order, interactions_only=False, bias=True):
"""
:param int order: Order of the polynomial (1-main effects, 2-quadratic effects, ...)
:param bool interactions_only: Include terms as x1^2 or not
:param bool bias: Include a beta_0 on the design matrix or not
Setter for model parameters
"""
self.order = order
self.interactions_only = interactions_only
self.bias = bias
def set_algorithm(self, epochs, engine):
"""
:param int epochs: Number of random start to check
:param str engine: What engine to use for maximization. Includes ("A", "C", "D", "E", "S", "T", "G", "I", "V")
Setter for algorithm parameters
"""
self.epochs = epochs
self.engine = engine
# ------------------------------------------------------------------------------------------------------------------
def gen_random_design(self) -> pd.DataFrame:
"""
Generate a random starting design matrix.
"""
df = pd.DataFrame(np.random.random((self.experiments, self.features)))
df.columns = ['x' + str(x) for x in list(range(self.features))]
return df
def gen_model_matrix(self, data=None) -> pd.DataFrame:
"""
:param pd.DataFrame data: Design matrix
Generate the model matrix of a design matrix (argument)
"""
if any(var is None for var in [self.order, self.interactions_only, self.bias]):
raise Exception('Parameters: \'order\', \'interactions_only\' and \'bias\' cannot be None')
poly = PolynomialFeatures(degree=self.order,
interaction_only=self.interactions_only,
include_bias=self.bias)
df = pd.DataFrame(poly.fit_transform(data))
df.columns = poly.get_feature_names(data.columns)
return df
@staticmethod
def clear_histories(optimalities, designs, design_mat):
"""
:param list designs: Number of Experiments to design
:param list optimalities: Number of random start to check
:param pd.DataFrame design_mat: Should the engine be maximized (True) or minimizes (False)?
Run the coordinate exchange algorithm and produce the best model matrix, according to the engine chosen, as well as a history of all other possible model matrices and the history of the selected engine used.
"""
hstry_designs = pd.DataFrame(designs, columns=['epoch', *list(design_mat.columns)])
hstry_opt_cr = pd.DataFrame(optimalities).rename(columns={0: 'epoch',
1: 'experiment',
2: 'feature'})
hstry_opt_cr['max'] = hstry_opt_cr.iloc[:, 3:].max(axis=1)
return hstry_designs, hstry_opt_cr
@staticmethod
def find_best_design(histories, designs, max_bool=True):
"""
:param pd.DataFrame histories: Dataframe of all the histories per epoch
:param pd.DataFrame designs: Dataframe of all the designs per epoch
:param bool max_bool: Should the engine be maximized (True) or minimizes (False)?
Group the histories per epoch and getting the max. Then, the function uses that max index (best epoch) to retrieve the design of that epoch and save it as the best design.
The function also changes behaviour according to the max_bool flag which is used to tell the function if we are searching for a maximum of a minimum.
"""
if max_bool:
per_epoch = histories.groupby('epoch')['max'].max()
return designs[designs['epoch'] == per_epoch.idxmax()].reset_index().iloc[:, 2:]
else:
per_epoch = histories.groupby('epoch')['min'].min()
return designs[designs['epoch'] == per_epoch.idxmin()].reset_index().iloc[:, 2:]
@staticmethod
def guards():
pass
# Engines --------------------------------------------------------------------------------------------------------
@staticmethod
def d_opt(matrix):
# Priority: Estimation
# Maximize the determinant of the information matrix X'X of the design.
# This engine results in maximizing the differential Shannon information content of the parameter estimates.
return | np.linalg.det(matrix.T @ matrix) | numpy.linalg.det |
'''
Design of a Neural Network from scratch
*************<IMP>*************
Mention hyperparameters used and describe functionality in detail in this space
- carries 1 mark
'''
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split #Allowed for splitting dataset
#global dictionary to store weight and bias matrices
parameters=dict()
def clean(x):
#We round mean values to appropriate number of decimal places
#Filling missing values in age column with mean of the column
x['Age']=x['Age'].fillna(round(x['Age'].mean()))
#filling missing values in weight column with mean of the column
x['Weight']=x['Weight'].fillna(round(x['Weight'].mean()))
#filling missing values in delivery column with the mode of the column as it has categorical values
x['Delivery phase']=x['Delivery phase'].fillna(x['Delivery phase'].mode()[0])
#Fillin HB values with mean of the column values
x['HB']=x['HB'].fillna(round(x['HB'].mean(),1))
#Filling IFA values with mode of the column as it is has categorical values
x['IFA']=x['IFA'].fillna(x['IFA'].mode()[0])
#Missing BP values are willing with the mean of the column. Rounded to 3 decial places
x['BP']=x['BP'].fillna(round(x['BP'].mean(),3))
#Education and residence are filled with the mode of the column as they are a categorical column
x['Education']=x['Education'].fillna(x['Education'].mode()[0])
x['Residence']=x['Residence'].fillna(x['Residence'].mode()[0])
#All non categorical columns are scaled from a range of 1 to 10
x['Age']=(x['Age']-x['Age'].min())/(x['Age'].max()-x['Age'].min())*10
x['Weight']=(x['Weight']-x['Weight'].min())/(x['Weight'].max()-x['Weight'].min())*10
x['HB']=(x['HB']-x['HB'].min())/(x['HB'].max()-x['HB'].min())*10
x['BP']=(x['BP']-x['BP'].min())/(x['BP'].max()-x['BP'].min())*10
return x
#The below functions can be used if it is desired to change the neural network archiecture in the future
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_derivative(y):
return y * (1 - y)
# np.tanh(x) will suffice for tanh activation function
def tanh_derivative(y):
return 1-(y*y)
def relu(x):
if x<0:
return 0
else:
return x
def relu_derivative(y):
if y<0:
return 0
else:
return 1
class NN:
#init function for the NN class
#The hidden layer size is set
#Weight matrices and bias matrices are initialized
#X and Y are initialized
''' X and Y are dataframes '''
def __init__(self,X,Y):
np.random.seed(2) #random seed 2
#Network consists of input layer, 1 hidden layer and then output layer
#Number of neurons in hidden layer is set to 4
self.h_size=4
#Input is intialized with the features dataframe
self.input = X
#Learning rate is set as 0.08. Learning rate defines at what rate we progress down the slope during gradient descent
self.learning_rate=0.08 #learning rate is 0.05, 0.08
'''
Weight Matrix dimensions are (number of neruons in current layer) X (number of neurons in previous layer)
Accordingly, weights1 size is (number of neurons in hidden layer) X (number of neurons in input layer)
weights2 size is (number of neurons in output layer) X (number of neurons in hidden layer)
Weight matrices multiplies by 0.01 to avoid exploding gradient problem
'''
self.weights1 = np.random.randn(self.h_size,self.input.shape[1])*0.01#We have 4 nodes in first hidden layer, 4X9
self.weights2 = np.random.randn(Y.shape[1],self.h_size)*0.01#Second hidden layer also has 4 nodes
'''
Bias is column vector with length corresponding to number of neurons in that particular layer
Both the bias vectors are initialized to zero vectors in the beginning.
'''
self.bias1=np.zeros((self.h_size,1))
self.bias2= | np.zeros((Y.shape[1],1)) | numpy.zeros |
import pendsim.sim, pendsim.utils
import copy
from scipy import integrate
import numpy as np
from scipy.signal import cont2discrete
# needed for LQR + UKF
from filterpy.kalman import UnscentedKalmanFilter, sigma_points
# needed for typeannotations
from typing import Tuple
LABELS = ["x", "xd", "t", "td"]
class Controller(object):
"""
Base class for controllers. A controller executes a `policy` during
the simulation loop. It takes some measured `state` and takes some
`action` on that state. The `action` is a force applied to the base
of the cart.
A controller's `policy` can include a state estimator or functions
we can use to record data.
Methods
-------
policy:
control policy
do_swingup:
get action from Astrom's "swing-up" policy
do_lqr:
get action from an LQR policy
get_linear_sys:
get the linearized system from jacobians
store_4tuple:
store a 4-tuple into multi-index
wrapPi:
wrap an angular value to [-pi, pi] interval
create_ukf:
create an unscented kalman filter for the system
do_pid:
get action from a PID control policy
get_and_store_priors:
utility function for storing state priors in a moving backwards horizon
"""
def __init__(self, init_state: np.ndarray):
"""init function for Controller object. Rare that this would be used
directly.
Parameters
----------
init_state : np.ndarray
Initial state.
"""
self.init_state = init_state
self.prev_err = 0
def policy(self, state: np.ndarray, dt: float) -> Tuple[float, dict]:
raise NotImplementedError
def do_swingup(
self, pend: pendsim.sim.Pendulum, state: np.ndarray, k: float
) -> float:
"""Implement a swing-up by Energy control method described by Astrom
and Furuta. Typically, we want to implement the swing-up strategy if
the pendulum is below some threshold (i.e. theta > pi/2).
See
Åström, <NAME>, and <NAME>. "Swinging up a pendulum by
energy control." Automatica 36.2 (2000): 287-295.
at https://doi.org/10.1016/S1474-6670(17)57951-3
Parameters
----------
pend : pendsim.sim.Pendulum
The pendulum object containing `m`,`g`,`l` parameters we use to
estimate the energy of the system.
state : np.ndarray
The current state
k : float
Swing-up gain
Returns
-------
float
controller action.
"""
m, g, l = pend.m, pend.g, pend.l
E_norm = 2 * m * g * l
E = m * g * l * (np.cos(state[2]) - 1) # 0 = upright
beta = E / E_norm
u = k * beta * pendsim.utils.sign(state[3] * np.cos(state[2]))
return -u
def do_lqr(
self,
w: int,
A: np.ndarray,
B: np.ndarray,
Q: np.ndarray,
R: np.ndarray,
x: np.ndarray,
) -> np.ndarray:
"""Finite-horizon discrete Linear Quadratic Regulator policy.
An LQR controller produces an optimal control policy over a finite horizon
according to a quadratic cost over the system state.
See https://underactuated.mit.edu/lqr.html
Parameters
----------
w : int
window over which to perform LQR control
A : np.ndarray
linear plant transition matrix
B : np.ndarray
linear control matrix
Q : np.ndarray
controller gain matrix
R : np.ndarray
control action penalty
x : np.ndarray
system state
Returns
-------
np.ndarray
sequence of control actions
"""
P = [None] * (w + 1)
P[w] = Q
for k in range(w, 0, -1):
p1 = A.T @ P[k] @ A # (4,4)
p2 = A.T @ P[k] @ B # (4,1)
p3 = R + B.T @ P[k] @ B # (4,1)
p3 = np.linalg.pinv(R + B.T @ P[k] @ B)
p4 = B.T @ P[k] @ A
P[k - 1] = p1 - p2 @ p3 @ p4 + Q
u = [None] * w
for k in range(w):
c1 = np.linalg.inv(R + B.T @ P[k] @ B)
c2 = B.T @ P[k] @ A
u[k] = c1 @ c2 @ x
return np.squeeze(u)
def get_linear_sys(
self, Adot: np.ndarray, Bdot: np.ndarray, dt: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Get linearize from jacobians of system with timestep `dt`.
Parameters
----------
Adot : np.ndarray
The (4,4) jacobian of the plant matrix A
Bdot : np.ndarray
The (4,1) jacobian of the control matrix B
dt : float
control timestep
Returns
-------
Tuple[np.ndarray, np.ndarray]
linearized system matrices A, B.
"""
A, B = Adot, Bdot
C, D = np.zeros((1, A.shape[0])), np.zeros((1, 1))
sys_disc = cont2discrete((A, B, C, D), dt, method="zoh")
return sys_disc[0], sys_disc[1]
def store_4tuple(self, level1_key: str, val: np.ndarray) -> dict:
"""Helper function for storing a 4-tuple of values. A level 0 key
is given, and the level 1 keys are populated with 'x', 'xd', 't','td'
values. Use this when you need to easily return data from a 4-tuple
produced by the simulation that maps to each of the 4 state values.
Example
>>> val = [1,2,3,4]
>>> key = "count"
>>> store4tuple(key, val)
{
('count','x') : 1,
('count','xd') : 2,
('count','t') : 3,
('count','td') : 4,
}
Parameters
----------
level1_key : str
outer key
val : np.ndarray
values. Must be shape (4,1) or (4,)
Returns
-------
dict
dict containing new data values
"""
labels = ["x", "xd", "t", "td"]
return pendsim.utils.array_to_kv(level1_key, labels, np.squeeze(val))
def wrapPi(self, val: float) -> float:
"""Wrap an angle to the interval between [-pi, pi].
Parameters
----------
val : float
angle
Returns
-------
float
wrapped angle
"""
return (val + np.pi) % (2 * np.pi) - np.pi
def create_ukf(
self, dt: float, hx: callable, fx: callable
) -> UnscentedKalmanFilter:
"""Create an unscented Kalman filter with state transition functions
`fx`, measurement function `hx` with timestep between measurements
estimated as `dt`.
Parameters
----------
dt : float
control timestep
hx : callable
function describing mapping between sensor inputs and measurements
fx : callable
function describing system. Can be non-linear.
Returns
-------
UnscentedKalmanFilter
UKF object with sensible defaults for sigma points.
"""
n = 4
points = sigma_points.MerweScaledSigmaPoints(
n,
alpha=1e-4,
beta=2,
kappa=3 - n,
)
kf = UnscentedKalmanFilter(
dim_x=n,
dim_z=n,
dt=dt,
hx=hx,
fx=fx,
points=points,
)
return kf
def do_pid(
self, dt: float, kp: float, ki: float, kd: float, state: np.array
) -> float:
"""Perform PID (proportional-integral-derivative) control policy given
a system state. This policy executes on the pendulum angle (i.e. attempts)
to keep the pendulum in the upright position.
Parameters
----------
dt : float
control timestep
kp : float
proportional gain
ki : float
integral gain
kd : float
derivative gain
state : np.array
input state
Returns
-------
float
control action
"""
err = -self.wrapPi(state[2])
errd = (err - self.prev_err) / dt
self.integrator += err
action = kp * err + ki * self.integrator + kd * errd
self.prev_err = err
return action
def get_and_store_priors(self, state: np.ndarray, n: int) -> Tuple[int, int]:
"""utility function for getting and storing priors in some window `n`.
This function returns two values `l` and `u` which provide the indices
to a moving window over prior_states. You must include an attr `tick`,
which is incremented to determine the window, as well as an attr `prior_states`
to store states to use this function, and this function alters those attrs.
Parameters
----------
state : np.ndarray
input state to store
n : int
window for returning `l`, `u` pair.
Returns
-------
Tuple[int, int]
l, u pair. If you want to get n most recent prior measurements, call
self.prior_states[l:u]
"""
l = max(0, self.tick - n)
u = self.tick
self.tick += 1
self.prior_states.append(state)
return l, u
class PID(Controller):
"""Basic PID Control class. This class produces a basic PID controller
that takes the state as it is measured and executes a pid policy on it.
Parameters
----------
pid : Tuple[float, float, float]
kp, ki, kd gains for the controller
"""
def __init__(self, pid: Tuple[float, float, float]) -> None:
self.kp, self.ki, self.kd = pid
self.integrator = 0
self.prev_err = 0
self.first = True
def policy(self, state: np.ndarray, dt: float) -> Tuple[float, dict]:
"""PID policy. This just calls `do_pid` on the state, but you can
add data collection values to it.
Parameters
----------
state : np.ndarray
system state
dt : float
control timestep
Returns
-------
Tuple[float, dict]
action, data pair
"""
action = self.do_pid(dt, self.kp, self.ki, self.kd, state)
return action, {}
class PID_UKF(Controller):
"""PID controller with unscented kalman filter. The unscented kalman filter
uses variance on the scale of `var_t` to get the measurement noise estimate.
(`var_t` is a measurement of time in seconds.)
Parameters
----------
pid : Tuple[float, float, float]
kp, ki, kd gains for the controller
pend : [type]
object containing the pendulum under control.
dt : float
control timestep
var_t : float
window over which to collect the measurement
"""
def __init__(
self, pid: Tuple[float, float, float], pend, dt: float, var_t: float
) -> None:
# PID Parameters
self.kp, self.ki, self.kd = pid
self.integrator = 0
self.prev_err = 0
# UKF Parameters
self.var_t = var_t
self.A, self.B = self.get_linear_sys(pend.jacA, pend.jacB, dt)
self.pend = pend
def sys_dynamics(t0, x):
# unpack variables
_, xd, t, td, u = x[0], x[1], x[2], x[3], x[4]
m, M, l, g, cfric, pfric = (
self.pend.m,
self.pend.M,
self.pend.l,
self.pend.g,
self.pend.cfric,
self.pend.pfric,
)
cost, sint = np.cos(t), np.sin(t)
# system dynamics
xdd = (g * m * sint * cost + u - m * l * td * td * sint) / (
M + m - m * cost * cost
)
tdd = (g * sint + xdd * cost) / l
# viscous friction
xdd += -1 * cfric * xd
tdd += -1 * pfric * td
# [state] + [u] is returned for the solver
return np.array([xd, xdd, td, tdd, u])
def fx(x, dt, u=0):
# we just tack u onto the end so one arg is passed into the solver
x_in = np.concatenate((x, np.atleast_1d(u)), axis=0)
# solve IVP
sol = integrate.solve_ivp(sys_dynamics, (0, dt), x_in)
return sol.y[:4, -1]
def hx(x):
return x
self.kf = self.create_ukf(dt, hx, fx)
self.prior_states = []
self.tick = 0
def policy(self, state: np.ndarray, dt: float) -> Tuple[float, dict]:
"""Measure variance from prior states over `var_t`, then use that variance
to compute an estimated state with an unscented kalman filter. The measured
state is then used for the PID control policy.
Parameters
----------
state : np.ndarray
system state
dt : float
timestep
Returns
-------
Tuple[float, dict]
action, data pair
"""
self.get_and_store_priors(state, round(self.var_t * 1 / dt))
prior = np.array(self.prior_states)
if self.tick > 1:
var = np.cov(prior.T)
else:
var = np.eye(4) * 1
self.kf.Q = var ** 2
self.kf.update(state)
est = self.kf.x
res = self.kf.y
action = self.do_pid(dt, self.kp, self.ki, self.kd, est)
self.kf.predict(dt, **{"u": action})
labels = ["x", "xd", "t", "td"]
data = {}
data.update(pendsim.utils.array_to_kv("res", labels, res))
data.update(pendsim.utils.array_to_kv("est", labels, est))
return action, data
class LQR_UKF(Controller):
"""LQR controller with unscented kalman filter state estimation.
Parameters
----------
qr : Tuple[np.ndarray, float]
tuple of (Q, R) matrices. In this case R is a 1x1 matrix or float.
lqrw : int
the window over which to perform LQR. Longer windows are more accurate,
because they take into account a longer approximation of the system
response. However, they take more time to compute and can be numerically
unstable.
pend : pendsim.sim.Pendulum
pendulum object with jacobians of A and B matrices.
dt : float
control timestep
var_t : float
window over which to collect variances, in units of seconds.
"""
def __init__(
self, qr: Tuple[np.ndarray, float], lqrw: int, pend, dt: float, var_t: float
) -> None:
# LQR Parameters
self.Q, self.R = qr
self.lqrw = lqrw
# UKF Parameters
self.A, self.B = self.get_linear_sys(pend.jacA, pend.jacB, dt)
def fx(x, dt, u=0):
return | np.dot(self.A, x) | numpy.dot |
import os
import numpy as np
import torch
id = []
statuses_count = []
followers_count = []
friends_count = []
favourites_count = []
listed_count = []
default_profile = []
geo_enabled = []
profile_uses_background_image = []
verified = []
protected = []
for i in range(1, 41):
files = os.listdir('./profile/' + str(i))
for file in files:
f = open('./profile/' + str(i) + '/' + file, 'r', encoding='UTF-8')
wnn = f.readlines()
# print('this is a error ID with error code' in wnn[0])
if 'this is a error ID with error code' in wnn[0]:
# print('Error!' + file)
f.close()
continue
# print('#' + str(len(id)))
id.append(file[: -8])
# print(len(id))
flag = 0
for line in wnn:
if line == 'statuses_count\n' and len(statuses_count) != len(id):
flag = 1
continue
if flag == 1:
statuses_count.append(float(line))
flag = 0
continue
if line == 'followers_count\n' and len(followers_count) != len(id):
flag = 2
continue
if flag == 2:
followers_count.append(float(line))
flag = 0
continue
if line == 'friends_count\n' and len(friends_count) != len(id):
flag = 3
continue
if flag == 3:
friends_count.append(float(line))
flag = 0
continue
if line == 'favourites_count\n' and len(favourites_count) != len(id):
flag = 4
continue
if flag == 4:
favourites_count.append(float(line))
flag = 0
continue
if line == 'listed_count\n' and len(listed_count) != len(id):
flag = 5
continue
if flag == 5:
listed_count.append(float(line))
flag = 0
continue
if line == 'default_profile\n' and len(default_profile) != len(id):
flag = 6
continue
if flag == 6:
default_profile.append(1.) if line == 'True\n' else default_profile.append(0.)
flag = 0
continue
if line == 'geo_enabled\n' and len(geo_enabled) != len(id):
flag = 7
continue
if flag == 7:
geo_enabled.append(0.) if line == 'False\n' else geo_enabled.append(1.)
flag = 0
continue
if line == 'profile_background_image_url\n' and len(profile_uses_background_image) != len(id):
flag = 8
continue
if flag == 8:
profile_uses_background_image.append(0.) if line == 'None\n' else profile_uses_background_image.append(1.)
flag = 0
continue
if line == 'verified\n' and len(verified) != len(id):
flag = 9
continue
if flag == 9:
verified.append(0.) if line == 'False\n' else verified.append(1.)
flag = 0
continue
if line == 'protected\n' and len(protected) != len(id):
flag = 10
continue
if flag == 10:
protected.append(0.) if line == 'False\n' else protected.append(1.)
flag = 0
continue
f.close()
if len(statuses_count) == len(followers_count)\
== len(friends_count) == len(favourites_count)\
== len(listed_count) == len(default_profile)\
== len(geo_enabled) == len(profile_uses_background_image)\
== len(verified) == len(protected) == len(id):
pass
else:
print(str(i) + '#' + str(file))
print(len(statuses_count))
print(len(followers_count))
print(len(friends_count))
print(len(favourites_count))
print(len(listed_count))
print(len(default_profile))
print(len(geo_enabled))
print(len(profile_uses_background_image))
print(len(verified))
print(len(protected))
print(len(id))
assert 1 == 0
statuses_count = (statuses_count - np.mean(statuses_count)) / np.sqrt(np.var(statuses_count))
followers_count = (followers_count - np.mean(followers_count)) / np.sqrt(np.var(followers_count))
friends_count = (friends_count - np.mean(friends_count)) / np.sqrt( | np.var(friends_count) | numpy.var |
import unittest
import numpy as np
from scipy.stats import binom, hypergeom
from pyapprox.numerically_generate_orthonormal_polynomials_1d import *
from pyapprox.orthonormal_polynomials_1d import *
from pyapprox.univariate_quadrature import gauss_jacobi_pts_wts_1D
from scipy.stats import beta as beta_rv
from functools import partial
from pyapprox.variables import float_rv_discrete
class TestNumericallyGenerateOrthonormalPolynomials1D(unittest.TestCase):
def test_krawtchouk(self):
num_coef=6
ntrials = 10
p = 0.5
xk = np.array(range(ntrials+1),dtype='float')
pk = binom.pmf(xk, ntrials, p)
ab_lanzcos = lanczos(xk,pk,num_coef)
ab_stieltjes = stieltjes(xk,pk,num_coef)
ab_exact = krawtchouk_recurrence(num_coef, ntrials, p)
assert np.allclose(ab_lanzcos,ab_exact)
assert np.allclose(ab_stieltjes,ab_exact)
from pyapprox.univariate_quadrature import gauss_quadrature
x,w = gauss_quadrature(ab_lanzcos,num_coef)
moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
true_moments = np.array([(xk**ii).dot(pk)for ii in range(num_coef)])
assert | np.allclose(moments,true_moments) | numpy.allclose |
"""
Simulate allocating FOBOS apertures to a randomly drawn set of targets.
Contains code originally written by 2021 Akamai intern, <NAME>.
.. include:: ../include/links.rst
"""
from IPython import embed
import numpy
from matplotlib import pyplot, patches, ticker
from . import scriptbase
from ..targets import random_targets
from ..plan import configure_observations
class AllocSim(scriptbase.ScriptBase):
@classmethod
def get_parser(cls, width=None):
import argparse
parser = super().get_parser(description='FOBOS Allocation Simulation', width=width)
parser.add_argument('-d', '--density', nargs=3, default=[2.5, 40, 5],
help='Target density sampling: minimum, maximum, number of samples. '
'Density is sampled geometrically.')
parser.add_argument('-s', '--sims', type=int, default=1,
help='Number of simulations to use for mean and standard deviation '
'of trends.')
parser.add_argument('-m', '--mode', type=int, default=1,
help='The spectrograph mode. All spectrographs are put in the same '
'mode. Use 1 for MOS mode, 2 for multi-IFU mode.')
parser.add_argument('-e', '--embed', default=False, action='store_true',
help='Embed using IPython before completing the script.')
return parser
@staticmethod
def main(args):
rng = numpy.random.default_rng(99)
ndens = args.density[-1]
nsim = args.sims
density = numpy.geomspace(*args.density)
nobj = numpy.zeros((nsim, ndens), dtype=int)
nobs = numpy.zeros((nsim, ndens), dtype=int)
nalloc = numpy.empty((nsim, ndens), dtype=object)
nap = numpy.empty((nsim, ndens), dtype=object)
completeness = numpy.empty((nsim, ndens), dtype=object)
efficiency = numpy.empty((nsim, ndens), dtype=object)
mean_efficiency = numpy.empty((nsim, ndens), dtype=object)
for j in range(nsim):
for i in range(ndens):
print(f'Density {i+1}/{ndens}', end='\r')
objx, objy = random_targets(10., density=density[i], rng=rng)
aptype = numpy.zeros(nobj[j,i], dtype=int)
n_in_fov, obs_obj, obs_nap, obs_ap, obs_mode \
= configure_observations(objx, objy, aptype)
nobj[j,i] = n_in_fov
nobs[j,i] = len(obs_obj)
nalloc[j,i] = numpy.array([len(o) for o in obs_obj])
nap[j,i] = numpy.array(obs_nap)
completeness[j,i] = numpy.cumsum(nalloc[j,i])/nobj[j,i]
efficiency[j,i] = nalloc[j,i]/nap[j,i]
mean_efficiency[j,i] = numpy.cumsum(efficiency[j,i])/(numpy.arange(nobs[j,i])+1)
print(f'Density {ndens}/{ndens}')
obs_mask = numpy.empty((nsim, ndens), dtype=object)
max_nobs = numpy.amax(nobs, axis=0)
for j in range(nsim):
for i in range(ndens):
obs_mask[j,i] = numpy.zeros(max_nobs[i], dtype=bool)
if nobs[j,i] < max_nobs[i]:
obs_mask[j,i][nobs[j,i]:] = True
nalloc[j,i] = numpy.append(nalloc[j,i], numpy.zeros(max_nobs[i] - nobs[j,i]))
nap[j,i] = numpy.append(nap[j,i], numpy.zeros(max_nobs[i] - nobs[j,i]))
completeness[j,i] = numpy.append(completeness[j,i],
numpy.zeros(max_nobs[i] - nobs[j,i]))
efficiency[j,i] = numpy.append(efficiency[j,i], numpy.zeros(max_nobs[i] - nobs[j,i]))
mean_efficiency[j,i] = numpy.append(mean_efficiency[j,i],
numpy.zeros(max_nobs[i] - nobs[j,i]))
m_completeness = numpy.empty(ndens, dtype=object)
m_efficiency = numpy.empty(ndens, dtype=object)
m_fom = numpy.empty(ndens, dtype=object)
s_completeness = numpy.empty(ndens, dtype=object)
s_efficiency = numpy.empty(ndens, dtype=object)
s_fom = numpy.empty(ndens, dtype=object)
for i in range(ndens):
comp = numpy.ma.MaskedArray(numpy.stack(completeness[:,i]), mask=numpy.stack(obs_mask[:,i]))
m_completeness[i] = numpy.ma.mean(comp, axis=0).filled(0.0)
s_completeness[i] = numpy.ma.std(comp, axis=0).filled(0.0)
arr = numpy.ma.MaskedArray(numpy.stack(efficiency[:,i]), mask=numpy.stack(obs_mask[:,i]))
m_efficiency[i] = numpy.ma.mean(arr, axis=0).filled(0.0)
s_efficiency[i] = numpy.ma.std(arr, axis=0).filled(0.0)
arr = numpy.ma.sqrt(comp) \
* numpy.ma.MaskedArray(numpy.stack(mean_efficiency[:,i]), mask=numpy.stack(obs_mask[:,i]))
m_fom[i] = numpy.ma.mean(arr, axis=0).filled(0.0)
s_fom[i] = numpy.ma.std(arr, axis=0).filled(0.0)
w,h = pyplot.figaspect(1)
fig = pyplot.figure(figsize=(1.5*w,1.5*h))
ax = fig.add_axes([0.2, 0.69, 0.6, 0.3])
ax.minorticks_on()
ax.tick_params(which='major', length=8, direction='in', top=True, right=True)
ax.tick_params(which='minor', length=4, direction='in', top=True, right=True)
ax.set_xlim(0.8, numpy.amax(nobs) + 0.2)
ax.set_ylim(0., 1.05)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
for i in range(ndens):
ax.fill_between( | numpy.arange(max_nobs[i]) | numpy.arange |
#!/usr/bin/env python
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
#!/usr/bin/env python
import unittest
import numpy as np
import ActivationFunction as activation
import GradientDescentOptimizer as gdo
import FCLayer as layer
import FCNetwork as network
import LossFunction as loss
import Support as sup
# TODO: Update and extend tests
# Global variables for test network in order to be accessible in all test cases.
# Input
x = np.random.randn(1, 3)
# Layer 1
w_1 = np.random.randn(3,4)
b_1 = np.random.randn(1,4)
# Layer 2
w_out = np.random.randn(4,2)
b_out = np.random.randn(1,2)
# Output labels
y_target = np.random.randn(1, 2)
input_dim = x.shape[1]
hidden_layer_specs = []
hidden_layer_specs.append({'activation': activation.SigmoidActivation(), 'dim': w_1.shape[1]})
output_dim = y_target.shape[1]
fc_net = network.FCNetwork(input_dim, output_dim, hidden_layer_specs)
fc_net.layers[0].setWeights(w_1)
fc_net.layers[0].setBiases(b_1)
fc_net.layers[1].setWeights(w_out)
fc_net.layers[1].setBiases(b_out)
class TestNetwork(unittest.TestCase):
# Was tested above, can be used now
sigmoid = activation.SigmoidActivation()
def testGradients(self):
# General computations
loss_function = loss.SquaredErrorFunction()
y = fc_net.output(x)
loss_derivative = loss_function.derivative(y, y_target)
# Manually compute gradients
h_1 = fc_net.evaluateLayer(1, x)
output_sigmoid = self.sigmoid.evaluate(np.dot(h_1, w_out) + b_out)
output_sigmoid_derivative = output_sigmoid * (1 - output_sigmoid)
## Output layer
delta_out = loss_derivative * output_sigmoid_derivative
L_w_out = np.dot(h_1.T, delta_out)
L_b_out = delta_out
## Hidden layer
delta_1 = np.dot(delta_out, w_out.T) * self.sigmoid.derivative(np.dot(x, w_1) + b_1)
L_w_1 = | np.dot(x.T, delta_1) | numpy.dot |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.