repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DeepGlow | DeepGlow-main/setup.py |
"""DeepGlow
DeepGlow is a Python package which emulates the BOXFIT gamma-ray burst afterglow simulation code using a neural network approach.
It can calculate light curves in milliseconds to within a few percent accuracy compared to the original BOXFIT model.
"""
from setuptools import setup
setup(
name='DeepGlow',
version='1.0.0',
description='A neural network emulator for BOXFIT',
long_description='DeepGlow is a Python package which emulates the BOXFIT gamma-ray burst afterglow simulation code using a neural network approach.\nIt can calculate light curves in milliseconds to within a few percent accuracy compared to the original BOXFIT model.\n',
url='https://github.com/OMBoersma/DeepGlow',
author='Oliver Boersma',
author_email='[email protected]',
license='BSD 2-clause',
packages=['DeepGlow'],
install_requires=['tensorflow>=2.0.0',
'numpy',
'importlib.resources'],
include_package_data=True,
package_dir={'DeepGlow': 'DeepGlow'},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering'
],
)
| 1,579 | 38.5 | 274 | py |
DeepGlow | DeepGlow-main/DeepGlow/DGmain.py | import numpy as np
from tensorflow import keras
import importlib.resources
class Emulator(object):
def __init__(self, simtype='ism'):
if simtype == 'ism':
with importlib.resources.path('DeepGlow', 'data') as data_path:
scale_path = data_path / "scale_facs_ism_final.csv"
scale_txt = scale_path.absolute().as_posix()
scale_facs = np.loadtxt(scale_txt)
self.Xmean = scale_facs[:-234][::2]
self.Xstd = scale_facs[:-234][1::2]
self.Ymean = scale_facs[-234:][::2]
self.Ystd = scale_facs[-234:][1::2]
model_path = data_path / "model-ism-final.hdf5"
model_hdf = model_path.absolute().as_posix()
self.NNmodel = keras.models.load_model(model_hdf, compile=False)
elif simtype == 'wind':
with importlib.resources.path('DeepGlow', 'data') as data_path:
scale_path = data_path / "scale_facs_wind_final.csv"
scale_txt = scale_path.absolute().as_posix()
scale_facs = np.loadtxt(scale_txt)
self.Xmean = scale_facs[:-234][::2]
self.Xstd = scale_facs[:-234][1::2]
self.Ymean = scale_facs[-234:][::2]
self.Ystd = scale_facs[-234:][1::2]
model_path = data_path / "model-wind-final.hdf5"
model_hdf = model_path.absolute().as_posix()
self.NNmodel = keras.models.load_model(model_hdf, compile=False)
# Fixed model parameters
self.Nparams = 8
self.nDP = 117
self.sdays = 60*60*24
self.tcomp = np.geomspace(0.1, 1000, self.nDP)*self.sdays
self.ref_d_L = 50*3.08567758 * 1e24
def flux(self, params, t_obs, nu_obs):
z = params[0]
log10_d_L_28_ = params[1]
log10_E_iso_53_ = params[2]
log10_n_ref_ = params[3]
theta_c = params[4]
theta_jn = params[5]*theta_c
p = params[6]
log10_eps_e_bar_ = params[7]
log10_eps_B_ = params[8]
log10_xi_N_ = params[9]
xi_N = 10**log10_xi_N_
E0 = (10**(log10_E_iso_53_)) * 1e53 * (xi_N)
d_L = (10**(log10_d_L_28_)) * 1e28
n0 = (10**(log10_n_ref_)) * (xi_N)
ee = (10**(log10_eps_e_bar_)) * ((p-1)/(p-2)) * (1.0/xi_N)
eB = (10**(log10_eps_B_)) * (1.0/xi_N)
t_obs = t_obs / (1+z)
nu_obs = nu_obs*(1+z)
if theta_jn == 0:
theta_jn = 1e-6
nu_unique = np.unique(nu_obs)
nu_inds = [np.where(nu_obs == nu)[0] for nu in nu_unique]
f_obs = np.zeros(len(t_obs))
inp_arr = np.zeros((len(nu_unique), self.Nparams))
inp_arr[:, :] = [(np.log10(E0), np.log10(theta_jn), np.log10(theta_c), np.log10(
n0), p, np.log10(ee), np.log10(eB), np.log10(nu)) for nu in nu_unique]
outY_unscaled = 10**((self.NNmodel((inp_arr - self.Xmean) /
self.Xstd)) * self.Ystd + self.Ymean).numpy()
for i, nu in enumerate(nu_unique):
t_nu = t_obs[nu_inds[i]]
dataOut = np.interp(t_nu, self.tcomp, outY_unscaled[i, :])
f_obs[nu_inds[i]] = dataOut
f_obs = f_obs*(1.0+z)/((d_L/self.ref_d_L)**2)
return f_obs
| 3,302 | 43.04 | 88 | py |
DeepGlow | DeepGlow-main/DeepGlow/__init__.py |
"""DeepGlow
Library to emulate the BOXFIT gamma-ray burst afterglow simulation code using a neural network approach.
"""
from .DGmain import Emulator
__version__ = "1.0.0"
__author__ = 'Oliver Boersma'
| 207 | 16.333333 | 104 | py |
DeepGlow | DeepGlow-main/paper/MCstats.py | import pickle
import sys
import numpy as np
import json
MCdist_file_scalefit_ism = open('scalefit-final-ism.pickle','rb')
MCdist_file_scalefit_wind = open('scalefit-final-wind.pickle','rb')
MCdist_file_deepglow_ism = open('deepglow-final-ism.pickle','rb')
MCdist_file_deepglow_wind = open('deepglow-final-wind.pickle','rb')
json_stats_scalefit_ism = json.load(open('scalefit-final-ism-stats.json','r'))
json_stats_deepglow_ism = json.load(open('deepglow-final-ism-stats.json','r'))
json_stats_scalefit_wind = json.load(open('scalefit-final-wind-stats.json','r'))
json_stats_deepglow_wind = json.load(open('deepglow-final-wind-stats.json','r'))
lnZ_scalefit_ism =json_stats_scalefit_ism['global evidence']
lnZ_scalefit_wind =json_stats_scalefit_wind['global evidence']
lnZ_deepglow_ism =json_stats_deepglow_ism['global evidence']
lnZ_deepglow_wind =json_stats_deepglow_wind['global evidence']
e = np.e
print('Bayes factor Z_wind_scalefit/Z_ism_scalefit : '+str( (e**lnZ_scalefit_wind) / (e**lnZ_scalefit_ism)))
print('Bayes factor Z_wind_deepglow/Z_ism_deepglow : '+str( (e**lnZ_deepglow_wind) / (e**lnZ_deepglow_ism)))
print('Bayes factor Z_ism_deepglow/Z_ism_scalefit : '+str( (e**lnZ_deepglow_ism) / (e**lnZ_scalefit_ism)))
print('Bayes factor Z_wind_deepglow/Z_wind_scalefit : '+str( (e**lnZ_deepglow_wind) / (e**lnZ_scalefit_wind)))
cache_scalefit_ism = pickle.load(MCdist_file_scalefit_ism)
cache_scalefit_wind = pickle.load(MCdist_file_scalefit_wind)
cache_deepglow_ism = pickle.load(MCdist_file_deepglow_ism)
cache_deepglow_wind = pickle.load(MCdist_file_deepglow_wind)
caches = [cache_scalefit_ism,cache_scalefit_wind,cache_deepglow_ism,cache_deepglow_wind]
names = ['scalefit_ism','scalefit_wind','deepglow_ism','deepglow_wind']
prmnames_latex = [r'$\log_{10}\theta_0$',r'$\log_{10}E_{K, \mathrm{iso}}$', r'$\log_{10}n_{\mathrm{ref}}$',r'$\theta_{\mathrm{obs}} / \theta_0$',r'$p$',r'$\log_{10}\epsilon_B$',r'$\log_{10}\bar{\epsilon}_e$',r'$A_V$']
paramnames=['theta_0','E_K_iso','n_ref','theta_obs_frac','p','epsilon_B','epsilon_e_bar','A_V']
class Param():
def __init__(self):
self.median = None
self.upper = None
self.lower = None
self.samples = None
dict_list = []
for i,dist in enumerate(caches):
marge_stat = dist.getMargeStats()
prms = dist.getParams()
prm_dict = {}
for param_name in paramnames:
density = dist.get1DDensity(param_name)
samples = getattr(prms,param_name)
_min, _max, _has_min, _has_top = density.getLimits([0.6827])
median = np.median(samples)
lower = _min - median
upper = _max - median
prm_class = Param()
prm_class.lower = lower
prm_class.median = median
prm_class.upper = upper
prm_class.samples = samples
prm_dict[param_name] =prm_class
dict_list.append(prm_dict)
sf_ism_dict = dict_list[0]
sf_wind_dict = dict_list[1]
dg_ism_dict = dict_list[2]
dg_wind_dict = dict_list[3]
ebar_sf_wind = 10**sf_wind_dict['epsilon_e_bar'].samples
p_sf_wind = sf_wind_dict['p'].samples
ebar_dg_wind =10**dg_wind_dict['epsilon_e_bar'].samples
p_dg_wind = dg_wind_dict['p'].samples
ee_sf_wind = ebar_sf_wind * ((p_sf_wind - 1) / (p_sf_wind - 2))
ee_dg_wind = ebar_dg_wind * ((p_dg_wind - 1) / (p_dg_wind - 2))
print(np.median(ee_sf_wind))
with open('combined_table_ism.txt','w') as table:
table.write('\\begin{table}\n')
table.write('\\begin{tabular} {| l | l | l | l |}\n')
table.write('\\hline\n')
table.write('Parameter & \\texttt{DeepGlow} & \\texttt{ScaleFit} & Match \\\\\n')
table.write('\\hline\n')
for j,param_name in enumerate(paramnames):
prm_dg_ism = dg_ism_dict[param_name]
prm_sf_ism = sf_ism_dict[param_name]
if (prm_dg_ism.median + prm_dg_ism.upper > prm_sf_ism.median + prm_sf_ism.lower and prm_dg_ism.median < prm_sf_ism.median) or (prm_dg_ism.median + prm_dg_ism.lower < prm_sf_ism.median + prm_sf_ism.upper and prm_dg_ism.median > prm_sf_ism.median):
line = r'{'+prmnames_latex[j]+r'}' + r' & $'+ '%.2f'%(prm_dg_ism.median) +r'^{+' + '%.2f'%(prm_dg_ism.upper)+r'}_{'+'%.2f'%(prm_dg_ism.lower)+r'}$' + r' & $'+ '%.2f'%(prm_sf_ism.median) +r'^{+' + '%.2f'%(prm_sf_ism.upper)+r'}_{'+'%.2f'%(prm_sf_ism.lower)+r'}$ & $ \quad \checkmark$ \\'+'\n'
else:
line = r'{'+prmnames_latex[j]+r'}' + r' & $'+ '%.2f'%(prm_dg_ism.median) +r'^{+' + '%.2f'%(prm_dg_ism.upper)+r'}_{'+'%.2f'%(prm_dg_ism.lower)+r'}$' + r' & $'+ '%.2f'%(prm_sf_ism.median) +r'^{+' + '%.2f'%(prm_sf_ism.upper)+r'}_{'+'%.2f'%(prm_sf_ism.lower)+r'}$ & $ \quad \times$ \\'+'\n'
table.write(line)
table.write('\\hline\n')
table.write('\\end{tabular}\n')
table.write('\\caption{ISM}')
table.write('\n\\end{table}')
with open('combined_table_wind.txt','w') as table:
table.write('\\begin{table}\n')
table.write('\\begin{tabular} {| l | l | l | l |}\n')
table.write('\\hline\n')
table.write('Parameter & \\texttt{DeepGlow} & \\texttt{ScaleFit} & Match \\\\\n')
table.write('\\hline\n')
for j,param_name in enumerate(paramnames):
prm_dg_wind = dg_wind_dict[param_name]
prm_sf_wind = sf_wind_dict[param_name]
if (prm_dg_wind.median + prm_dg_wind.upper > prm_sf_wind.median + prm_sf_wind.lower and prm_dg_wind.median < prm_sf_wind.median) or (prm_dg_wind.median + prm_dg_wind.lower < prm_sf_wind.median + prm_sf_wind.upper and prm_dg_wind.median > prm_sf_wind.median):
line = r'{'+prmnames_latex[j]+r'}' + r' & $'+ '%.2f'%(prm_dg_wind.median) +r'^{+' + '%.2f'%(prm_dg_wind.upper)+r'}_{'+'%.2f'%(prm_dg_wind.lower)+r'}$' + r' & $'+ '%.2f'%(prm_sf_wind.median) +r'^{+' + '%.2f'%(prm_sf_wind.upper)+r'}_{'+'%.2f'%(prm_sf_wind.lower)+r'}$ & $ \quad \checkmark$ \\'+'\n'
else:
line = r'{'+prmnames_latex[j]+r'}' + r' & $'+ '%.2f'%(prm_dg_wind.median) +r'^{+' + '%.2f'%(prm_dg_wind.upper)+r'}_{'+'%.2f'%(prm_dg_wind.lower)+r'}$' + r' & $'+ '%.2f'%(prm_sf_wind.median) +r'^{+' + '%.2f'%(prm_sf_wind.upper)+r'}_{'+'%.2f'%(prm_sf_wind.lower)+r'}$ & $ \quad \times$ \\'+'\n'
table.write(line)
table.write('\\hline\n')
table.write('\\end{tabular}\n')
table.write('\\caption{wind}')
table.write('\n\\end{table}')
| 6,274 | 54.530973 | 310 | py |
DeepGlow | DeepGlow-main/paper/whichbox.py | def which_boxes(theta_0):
boxes_ISM = ['0','1']*(theta_0<=2e-2) + ['1','2']*(theta_0 > 2e-2 and theta_0<=3e-2) + ['2','3']*(theta_0 >3e-2 and theta_0<=4e-2) + ['3','4']*(theta_0 >4e-2 and theta_0<=4.5e-2) + ['4','5']*(theta_0 > 4.5e-2 and theta_0<=5e-2) + ['5','6']*(theta_0 > 5e-2 and theta_0<=7.5e-2) + ['6','7']*(theta_0>7.5e-2 and theta_0<=1e-1) + ['7','8']*(theta_0>1e-1 and theta_0<=1.25e-1) + ['8','9']*(theta_0>1.25e-1 and theta_0<=1.5e-1) + ['9','10']*(theta_0>1.5e-1 and theta_0<=1.75e-1) + ['10','11']*(theta_0>1.75e-1 and theta_0<=2.0e-1) + ['11','12']*(theta_0>2e-1 and theta_0<=2.25e-1) + ['12','13']*(theta_0>2.25e-1 and theta_0<=2.5e-1) + ['13','14']*(theta_0>2.5e-1 and theta_0<=2.75e-1) + ['14','15']*(theta_0>2.75e-1 and theta_0<=3.0e-1) + ['15','16']*(theta_0>3.0e-1 and theta_0<=3.25e-1) + ['16','17']*(theta_0>3.25e-1 and theta_0<=3.5e-1) + ['17','18']*(theta_0>3.5e-1 and theta_0<=3.75e-1) + ['18','19']*(theta_0>3.75e-1 and theta_0<=4.0e-1) + ['19','20']*(theta_0>4.0e-1 and theta_0<=4.25e-1) + ['20','21']*(theta_0>4.25e-1 and theta_0<=4.5e-1) + ['21','22']*(theta_0>4.5e-1 and theta_0<=5.0e-1) + ['22','23']*(theta_0>5.0e-1 and theta_0<=6.0e-1) + ['23','24']*(theta_0>6.0e-1 and theta_0<=7.0e-1) + ['24','25']*(theta_0>7.0e-1 and theta_0<=8.0e-1) + ['25','26']*(theta_0>8.0e-1 and theta_0<=9.0e-1) + ['26','27']*(theta_0>9.0e-1 and theta_0<=1.0) + ['27','28']*(theta_0>1.0 and theta_0<=1.1) + ['28','29']*(theta_0>1.1 and theta_0<=1.2) + ['29','30']*(theta_0>1.2 and theta_0<=1.3) + ['30','31']*(theta_0>1.3 and theta_0<=1.4) + ['31','32']*(theta_0>1.4 and theta_0<=1.5) + ['32','33']*(theta_0>1.5 and theta_0<=1.6)
return boxes_ISM
| 1,669 | 277.333333 | 1,619 | py |
DeepGlow | DeepGlow-main/paper/boxfit_clrDLtrain.py | from CLR.clr_callback import CyclicLR
from sklearn.preprocessing import StandardScaler
import numpy as np
from tensorflow.keras.losses import MeanAbsoluteError
from keras import layers
import keras
from tensorflow.keras import backend as K
import tensorflow as tf
import pandas as pd
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_features = pd.read_csv(
'boxfitdata/boxfit_ism_final_trainfeatures.csv')
test_features = pd.read_csv(
'boxfitdata/boxfit_ism_final_testfeatures.csv')
train_labels = pd.read_csv(
'boxfitdata/boxfit_ism_final_trainlabels.csv')
test_labels = pd.read_csv(
'boxfitdata/boxfit_ism_final_testlabels.csv')
scaler_in = StandardScaler()
scaler_out = StandardScaler()
train_features_scaled = scaler_in.fit_transform(train_features)
train_labels_scaled = scaler_out.fit_transform(train_labels)
test_features_scaled = scaler_in.transform(test_features)
test_labels_scaled = scaler_out.transform(test_labels)
filepath = 'boxfitfinal/'
def masked_metric(y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
class CustomAccuracy(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
def build_and_compile_model():
model = keras.Sequential([
layers.Dense(
1000, input_dim=train_features_scaled.shape[1], activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(117, activation='linear')
])
model.compile(loss=CustomAccuracy(), metrics=[masked_metric],
optimizer=keras.optimizers.Nadam(0.001))
return model
trainingsizes = [int(len(train_features_scaled)/32), int(len(train_features_scaled)/16), int(
len(train_features_scaled)/8), int(len(train_features_scaled)/4), int(len(train_features_scaled)/2)]
for Ntrain in trainingsizes:
dnn_model = build_and_compile_model()
dnn_model.summary()
train_features_scaled_subset = train_features_scaled[0:Ntrain]
train_labels_scaled_subset = train_labels_scaled[0:Ntrain]
batch_size = 128
clr_step_size = int(4 * (len(train_features_scaled_subset)/batch_size))
base_lr = 1e-4
max_lr = 1e-2
mode = 'triangular2'
clr = CyclicLR(base_lr=base_lr, max_lr=max_lr,
step_size=clr_step_size, mode=mode)
history = dnn_model.fit(train_features_scaled_subset, train_labels_scaled_subset,
validation_split=0.0, batch_size=batch_size, verbose=1, epochs=200, callbacks=[clr])
dnn_model.save(filepath+'boxfit_ism_stdsc_'+str(Ntrain)+'.h5')
test_predictions_scaled = dnn_model.predict(test_features_scaled)
test_predictions_unscaled = scaler_out.inverse_transform(
test_predictions_scaled)
test_predictions = 10**test_predictions_unscaled
test_labels_lin = 10**test_labels
err = np.abs(test_predictions-test_labels_lin)/test_labels_lin
err = err.values.flatten()
print('errors <0.1: '+str(len(err[err < 0.1])/len(err)))
print('errors >0.2: '+str(len(err[err > 0.2])/len(err)))
print('median errors: '+str(np.nanmedian(err)))
| 4,003 | 36.773585 | 112 | py |
DeepGlow | DeepGlow-main/paper/plot_overlay.py | import pickle
from getdist import plots
from matplotlib import pyplot as plt
import sys
from matplotlib import rc
f1 = sys.argv[1]
f2 = sys.argv[2]
f3 = sys.argv[3]
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
with open(f1,mode='rb') as f:
scalefit = pickle.load(f)
with open(f2,mode='rb') as f:
deepglow = pickle.load(f)
defaultSettings = plots.GetDistPlotSettings()
defaultSettings.rcSizes(axes_fontsize=12, lab_fontsize=12)
g = plots.getSubplotPlotter(width_inch=10)
g.settings.rcSizes(axes_fontsize=12,lab_fontsize=15)
g.settings.num_plot_contours = 5
g.settings.legend_fontsize = 20
g.triangle_plot([scalefit,deepglow],legend_labels=[r'\texttt{ScaleFit}',r'\texttt{DeepGlow}'],params=['theta_0','E_K_iso','n_ref','theta_obs_frac','p','epsilon_B','epsilon_e_bar','A_V'],filled=True, colors='Set1')
plt.savefig(f3+'.png', dpi=400)
| 889 | 31.962963 | 213 | py |
DeepGlow | DeepGlow-main/paper/run_boxfit.py | import numpy as np
import time
import pandas as pd
import subprocess
import sys
from whichbox import which_boxes
n = int(2500)
Mpc_cm = 3.08567758 * 1e24
redshift = 0
filenumber = sys.argv[1]
nDP = 117
dp = []
for k in range(nDP):
dp.append('dp'+str(k+1))
data = pd.DataFrame(index=np.arange(int(n)), columns=[
'Eiso', 'theta_jn', 'theta_c', 'n0', 'p', 'epsilon_e', 'epsilon_B', 'nu']+dp)
sdays = 60*60*24
tdata = np.geomspace(0.1, 1000, nDP)*sdays
E = np.random.uniform(50, 56, n)
theta_c = np.random.uniform(np.log10(0.01), np.log10(0.5*np.pi), n)
theta_obs = np.zeros(n)
for i, th in enumerate(theta_c):
if 2*(10**th) <0.5*np.pi:
theta_obs[i] = np.random.uniform(0.01, 2*(10**th))
else:
theta_obs[i] = np.random.uniform(0.01, 0.5*np.pi)
n0 = np.random.uniform(-5, 3, n)
p = np.random.uniform(2, 3, n)
ee = np.random.uniform(-10, 0, n)
eB = np.random.uniform(-10, 0, n)
nu_obs = np.random.uniform(8, 19, n)
data.loc[:, :-nDP] = np.c_[E, theta_obs, theta_c, n0, p, ee, eB, nu_obs]
root = '/home/oboersma/boxfit/lightcurve_dataset/data'
boxroot = '/home/oboersma/boxfit/bin'
times = []
for i in range(n):
E_i, theta_obs_i, theta_c_i, n0_i, p_i, ee_i, eB_i, nu_i = data.iloc[i, :-nDP]
E_i = 10**E_i
theta_c_i = 10**theta_c_i
n0_i = 10**n0_i
ee_i = 10**ee_i
eB_i = 10**eB_i
nu_i = 10**nu_i
boxes = which_boxes(theta_c_i)
t0 = time.time()
cmd = "mpirun -n 40 --use-hwthread-cpus {boxroot}/boxfit_noboost -t_0={t0} -t_1={t1} -nu_0={nu} -nu_1={nu} -d_L={d_L} -z={z} -theta_0={theta_0} -E={E0} -n={n0} -theta_obs={theta_obs} -p={p} -epsilon_B={eB} -epsilon_E={eE} -ksi_N={ksi_N} -no_points={no_points} -box0={box0} -box1={box1}".format(
boxroot=boxroot, t0=tdata[0], t1=tdata[-1], nu=nu_i, d_L=50*Mpc_cm, z=redshift, theta_0=theta_c_i, E0=E_i, n0=n0_i, theta_obs=theta_obs_i, p=p_i, eB=eB_i, eE=ee_i, ksi_N=1, no_points=int(nDP), box0=boxes[0], box1=boxes[1])
p = subprocess.run(cmd, shell=True, capture_output=True).stdout
t1 = time.time()
try:
lines = p.splitlines()[-int(nDP+1):-1]
lc_data = [float(str(line).split(",")[-1][1:-1]) for line in lines]
data.loc[i, -nDP:] = np.log10(lc_data)
times.append(t1-t0)
except:
print(p,flush=True)
if not i % 100:
print(i,flush=True)
np.savetxt(root+'/timing/time_'+filenumber+'.txt', times)
data.to_csv(root+'/lcdata_'+filenumber+'.csv', index=False)
| 2,446 | 37.84127 | 298 | py |
DeepGlow | DeepGlow-main/paper/split_traintest.py | import pandas as pd
import numpy as np
import sys
import pandas as pd
filename = sys.argv[1]
out = sys.argv[2]
print('reading in data')
dataset =pd.read_csv(filename)
print('dataset size: '+str(len(dataset)))
allinf = np.where(np.all(dataset.iloc[:,8:]==-np.inf,axis=1))[0]
dataset = dataset.drop(allinf)
print('dataset size: '+str(len(dataset)))
dataset.replace([np.inf, -np.inf], np.nan, inplace=True)
print('Replaced infinities with nan in dataset')
print('Dividing training and test data')
train_dataset = dataset.sample(frac=0.9, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.iloc[:,:8]
train_labels = train_dataset.iloc[:,8:]
test_features = test_dataset.iloc[:,:8]
test_labels = test_dataset.iloc[:,8:]
print('saving csvs')
train_features.to_csv(out+'_trainfeatures.csv',index=False)
train_labels.to_csv(out+'_trainlabels.csv',index=False,na_rep=np.nan)
test_features.to_csv(out+'_testfeatures.csv',index=False)
test_labels.to_csv(out+'_testlabels.csv',index=False,na_rep=np.nan)
| 1,049 | 25.923077 | 69 | py |
DeepGlow | DeepGlow-main/paper/combine_datasets.py | import sys
import pandas as pd
nfiles = len(sys.argv[1:])
files = sys.argv[1:-1]
out = sys.argv[-1]
total = []
for i,filename in enumerate(files):
print('reading in file '+str(i))
ext = filename[-3:]
if ext =='hdf':
filedata = pd.read_hdf(filename,key='data')
else:
filedata = pd.read_csv(filename)
total.append(filedata)
dataset = pd.concat(total,ignore_index=True)
dataset.to_csv(out+'_total.csv',index=False)
| 461 | 20 | 59 | py |
DeepGlow | DeepGlow-main/paper/boxfit_clrDLtrain_wind.py | from CLR.clr_callback import CyclicLR
from sklearn.preprocessing import StandardScaler
import numpy as np
from tensorflow.keras.losses import MeanAbsoluteError
from keras import layers
import keras
from tensorflow.keras import backend as K
import tensorflow as tf
import pandas as pd
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
matplotlib.use('Agg')
train_features = pd.read_csv(
'boxfitdata/boxfit_wind_final_trainfeatures.csv')
test_features = pd.read_csv(
'boxfitdata/boxfit_wind_final_testfeatures.csv')
train_labels = pd.read_csv(
'boxfitdata/boxfit_wind_final_trainlabels.csv')
test_labels = pd.read_csv(
'boxfitdata/boxfit_wind_final_testlabels.csv')
scaler_in = StandardScaler()
scaler_out = StandardScaler()
train_features_scaled = scaler_in.fit_transform(train_features)
test_features_scaled = scaler_in.transform(test_features)
train_labels_scaled = scaler_out.fit_transform(train_labels)
test_labels_scaled = scaler_out.transform(test_labels)
batch_size = 128
clr_step_size = int(4 * (len(train_features_scaled)/batch_size))
base_lr = 1e-4
max_lr = 1e-2
mode = 'triangular2'
clr = CyclicLR(base_lr=base_lr, max_lr=max_lr,
step_size=clr_step_size, mode=mode)
filepath = 'boxfitfinal/'
filepath_intermediate = 'boxfitfinal/model-wind-stdsc-{epoch:02d}.hdf5'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath_intermediate, monitor='loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=20)
def masked_metric(y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
class CustomAccuracy(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
def build_and_compile_model():
model = keras.Sequential([
layers.Dense(
1000, input_dim=train_features_scaled.shape[1], activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(117, activation='linear')
])
model.compile(loss=CustomAccuracy(), metrics=[masked_metric],
optimizer=keras.optimizers.Nadam(0.001))
return model
dnn_model = build_and_compile_model()
dnn_model.summary()
history = dnn_model.fit(train_features_scaled, train_labels_scaled,
validation_split=0.0, batch_size=batch_size, verbose=1, epochs=2000, callbacks=[clr, model_checkpoint_callback])
dnn_model.save(filepath+'boxfit_wind_final_stdsc.h5')
test_predictions_scaled = dnn_model.predict(test_features_scaled)
test_predictions_unscaled = scaler_out.inverse_transform(
test_predictions_scaled)
test_predictions = 10**test_predictions_unscaled
test_labels = 10**test_labels
err = np.abs(test_predictions-test_labels)/test_predictions
err = err.values.flatten()
print('errors <0.1: '+str(len(err[err < 0.1])/len(err)))
print('errors >0.2: '+str(len(err[err > 0.2])/len(err)))
print('median errors: '+str(np.nanmedian(err)))
| 3,833 | 35.169811 | 136 | py |
GL-AT | GL-AT-master/utils/crash.py | import sys
class ExceptionHook:
instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
from IPython.core import ultratb
self.instance = ultratb.FormattedTB(mode='Plain',
color_scheme='Linux', call_pdb=1)
return self.instance(*args, **kwargs)
sys.excepthook = ExceptionHook()
| 366 | 27.230769 | 61 | py |
GL-AT | GL-AT-master/utils/create_black_list.py | import argparse
import csv
import os
from utilities import create_folder
def dcase2017task4(args):
"""Create black list. Black list is a list of audio ids that will be
skipped in training.
"""
# Augments & parameters
workspace = args.workspace
# Black list from DCASE 2017 Task 4
test_weak_csv = 'metadata/black_list/groundtruth_weak_label_testing_set.csv'
evaluation_weak_csv = 'metadata/black_list/groundtruth_weak_label_evaluation_set.csv'
black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv')
create_folder(os.path.dirname(black_list_csv))
def get_id_sets(csv_path):
with open(csv_path, 'r') as fr:
reader = csv.reader(fr, delimiter='\t')
lines = list(reader)
ids_set = []
for line in lines:
"""line: ['-5QrBL6MzLg_60.000_70.000.wav', '60.000', '70.000', 'Train horn']"""
ids_set.append(line[0][0 : 11])
ids_set = list(set(ids_set))
return ids_set
test_ids_set = get_id_sets(test_weak_csv)
evaluation_ids_set = get_id_sets(evaluation_weak_csv)
full_ids_set = test_ids_set + evaluation_ids_set
# Write black list
fw = open(black_list_csv, 'w')
for id in full_ids_set:
fw.write('{}\n'.format(id))
print('Write black list to {}'.format(black_list_csv))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_dcase2017task4 = subparsers.add_parser('dcase2017task4')
parser_dcase2017task4.add_argument('--workspace', type=str, required=True)
args = parser.parse_args()
if args.mode == 'dcase2017task4':
dcase2017task4(args)
else:
raise Exception('Error argument!') | 1,887 | 28.5 | 91 | py |
GL-AT | GL-AT-master/utils/data_generator.py | import os
import sys
import numpy as np
import h5py
import csv
import time
import logging
from utilities import int16_to_float32
def read_black_list(black_list_csv):
"""Read audio names from black list.
"""
with open(black_list_csv, 'r') as fr:
reader = csv.reader(fr)
lines = list(reader)
black_list_names = ['Y{}.wav'.format(line[0]) for line in lines]
return black_list_names
class AudioSetDataset(object):
def __init__(self, clip_samples, classes_num):
"""This class takes the meta of an audio clip as input, and return
the waveform and target of the audio clip. This class is used by DataLoader.
Args:
clip_samples: int
classes_num: int
"""
self.clip_samples = clip_samples
self.classes_num = classes_num
def __getitem__(self, meta):
"""Load waveform and target of an audio clip.
Args:
meta: {
'audio_name': str,
'hdf5_path': str,
'index_in_hdf5': int}
Returns:
data_dict: {
'audio_name': str,
'waveform': (clip_samples,),
'target': (classes_num,)}
"""
if meta is None:
"""Dummy waveform and target. This is used for samples with mixup
lamda of 0."""
audio_name = None
waveform = np.zeros((self.clip_samples,), dtype=np.float32)
target = np.zeros((self.classes_num,), dtype=np.float32)
else:
hdf5_path = meta['hdf5_path']
index_in_hdf5 = meta['index_in_hdf5']
with h5py.File(hdf5_path, 'r') as hf:
audio_name = hf['audio_name'][index_in_hdf5].decode()
waveform = int16_to_float32(hf['waveform'][index_in_hdf5])
target = hf['target'][index_in_hdf5].astype(np.float32)
data_dict = {
'audio_name': audio_name, 'waveform': waveform, 'target': target}
return data_dict
class Base(object):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv, random_seed):
"""Base class of train sampler.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
self.batch_size = batch_size
self.random_state = np.random.RandomState(random_seed)
# Black list
if black_list_csv:
self.black_list_names = read_black_list(black_list_csv)
else:
self.black_list_names = []
logging.info('Black list samples: {}'.format(len(self.black_list_names)))
# Load target
load_time = time.time()
with h5py.File(indexes_hdf5_path, 'r') as hf:
self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
self.targets = hf['target'][:].astype(np.float32)
(self.audios_num, self.classes_num) = self.targets.shape
logging.info('Training number: {}'.format(self.audios_num))
logging.info('Load target time: {:.3f} s'.format(time.time() - load_time))
class TrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(TrainSampler, self).__init__(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.indexes = np.arange(self.audios_num)
# Shuffle indexes
self.random_state.shuffle(self.indexes)
self.pointer = 0
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'audio_name': 'YfWBzCRl6LUs.wav',
'hdf5_path': 'xx/balanced_train.h5',
'index_in_hdf5': 15734,
'target': [0, 1, 0, 0, ...]},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
index = self.indexes[self.pointer]
self.pointer += 1
# Shuffle indexes and reset pointer
if self.pointer >= self.audios_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'audio_name': self.audio_names[index],
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index],
'target': self.targets[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes': self.indexes,
'pointer': self.pointer}
return state
def load_state_dict(self, state):
self.indexes = state['indexes']
self.pointer = state['pointer']
class BalancedTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training. Data are equally
sampled from different sound classes.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(BalancedTrainSampler, self).__init__(indexes_hdf5_path,
batch_size, black_list_csv, random_seed)
self.samples_num_per_class = np.sum(self.targets, axis=0)
logging.info('samples_num_per_class: {}'.format(
self.samples_num_per_class.astype(np.int32)))
# Training indexes of all sound classes. E.g.:
# [[0, 11, 12, ...], [3, 4, 15, 16, ...], [7, 8, ...], ...]
self.indexes_per_class = []
for k in range(self.classes_num):
self.indexes_per_class.append(
np.where(self.targets[:, k] == 1)[0])
# Shuffle indexes
for k in range(self.classes_num):
self.random_state.shuffle(self.indexes_per_class[k])
self.queue = []
self.pointers_of_classes = [0] * self.classes_num
def expand_queue(self, queue):
classes_set = np.arange(self.classes_num).tolist()
self.random_state.shuffle(classes_set)
queue += classes_set
return queue
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'audio_name': 'YfWBzCRl6LUs.wav',
'hdf5_path': 'xx/balanced_train.h5',
'index_in_hdf5': 15734,
'target': [0, 1, 0, 0, ...]},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
if len(self.queue) == 0:
self.queue = self.expand_queue(self.queue)
class_id = self.queue.pop(0)
pointer = self.pointers_of_classes[class_id]
self.pointers_of_classes[class_id] += 1
index = self.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.pointers_of_classes[class_id] >= self.samples_num_per_class[class_id]:
self.pointers_of_classes[class_id] = 0
self.random_state.shuffle(self.indexes_per_class[class_id])
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'audio_name': self.audio_names[index],
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index],
'target': self.targets[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes_per_class': self.indexes_per_class,
'queue': self.queue,
'pointers_of_classes': self.pointers_of_classes}
return state
def load_state_dict(self, state):
self.indexes_per_class = state['indexes_per_class']
self.queue = state['queue']
self.pointers_of_classes = state['pointers_of_classes']
class AlternateTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""AlternateSampler is a combination of Sampler and Balanced Sampler.
AlternateSampler alternately sample data from Sampler and Blanced Sampler.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
self.sampler1 = TrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.sampler2 = BalancedTrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.batch_size = batch_size
self.count = 0
def __iter__(self):
batch_size = self.batch_size
while True:
self.count += 1
if self.count % 2 == 0:
batch_meta = []
i = 0
while i < batch_size:
index = self.sampler1.indexes[self.sampler1.pointer]
self.sampler1.pointer += 1
# Shuffle indexes and reset pointer
if self.sampler1.pointer >= self.sampler1.audios_num:
self.sampler1.pointer = 0
self.sampler1.random_state.shuffle(self.sampler1.indexes)
# If audio in black list then continue
if self.sampler1.audio_names[index] in self.sampler1.black_list_names:
continue
else:
batch_meta.append({
'audio_name': self.sampler1.audio_names[index],
'hdf5_path': self.sampler1.hdf5_paths[index],
'index_in_hdf5': self.sampler1.indexes_in_hdf5[index],
'target': self.sampler1.targets[index]})
i += 1
elif self.count % 2 == 1:
batch_meta = []
i = 0
while i < batch_size:
if len(self.sampler2.queue) == 0:
self.sampler2.queue = self.sampler2.expand_queue(self.sampler2.queue)
class_id = self.sampler2.queue.pop(0)
pointer = self.sampler2.pointers_of_classes[class_id]
self.sampler2.pointers_of_classes[class_id] += 1
index = self.sampler2.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.sampler2.pointers_of_classes[class_id] >= self.sampler2.samples_num_per_class[class_id]:
self.sampler2.pointers_of_classes[class_id] = 0
self.sampler2.random_state.shuffle(self.sampler2.indexes_per_class[class_id])
# If audio in black list then continue
if self.sampler2.audio_names[index] in self.sampler2.black_list_names:
continue
else:
batch_meta.append({
'audio_name': self.sampler2.audio_names[index],
'hdf5_path': self.sampler2.hdf5_paths[index],
'index_in_hdf5': self.sampler2.indexes_in_hdf5[index],
'target': self.sampler2.targets[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'sampler1': self.sampler1.state_dict(),
'sampler2': self.sampler2.state_dict()}
return state
def load_state_dict(self, state):
self.sampler1.load_state_dict(state['sampler1'])
self.sampler2.load_state_dict(state['sampler2'])
class EvaluateSampler(object):
def __init__(self, indexes_hdf5_path, batch_size):
"""Evaluate sampler. Generate batch meta for evaluation.
Args:
indexes_hdf5_path: string
batch_size: int
"""
self.batch_size = batch_size
with h5py.File(indexes_hdf5_path, 'r') as hf:
self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
self.targets = hf['target'][:].astype(np.float32)
self.audios_num = len(self.audio_names)
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'audio_name': 'Y--PJHxphWEs.wav',
'hdf5_path': 'xx/balanced_train.h5',
'index_in_hdf5': 0, 'target':
'target': [0, 1, 0, 0, ...]},
...]
"""
batch_size = self.batch_size
pointer = 0
while pointer < self.audios_num:
batch_indexes = np.arange(pointer,
min(pointer + batch_size, self.audios_num))
batch_meta = []
for index in batch_indexes:
batch_meta.append({
'audio_name': self.audio_names[index],
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index],
'target': self.targets[index]})
pointer += batch_size
yield batch_meta
def collate_fn(list_data_dict):
"""Collate data.
Args:
list_data_dict, e.g., [{'audio_name': str, 'waveform': (clip_samples,), ...},
{'audio_name': str, 'waveform': (clip_samples,), ...},
...]
Returns:
np_data_dict, dict, e.g.,
{'audio_name': (batch_size,), 'waveform': (batch_size, clip_samples), ...}
"""
np_data_dict = {}
for key in list_data_dict[0].keys():
np_data_dict[key] = np.array([data_dict[key] for data_dict in list_data_dict])
return np_data_dict | 15,307 | 34.850117 | 116 | py |
GL-AT | GL-AT-master/utils/dataset.py | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import (create_folder, get_filename, create_logging,
float32_to_int16, pad_or_truncate, read_metadata)
import config
def split_unbalanced_csv_to_partial_csvs(args):
"""Split unbalanced csv to part csvs. Each part csv contains up to 50000 ids.
"""
unbalanced_csv_path = args.unbalanced_csv
unbalanced_partial_csvs_dir = args.unbalanced_partial_csvs_dir
create_folder(unbalanced_partial_csvs_dir)
with open(unbalanced_csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove head info
audios_num_per_file = 50000
files_num = int(np.ceil(len(lines) / float(audios_num_per_file)))
for r in range(files_num):
lines_per_file = lines[r * audios_num_per_file :
(r + 1) * audios_num_per_file]
out_csv_path = os.path.join(unbalanced_partial_csvs_dir,
'unbalanced_train_segments_part{:02d}.csv'.format(r))
with open(out_csv_path, 'w') as f:
f.write('empty\n')
f.write('empty\n')
f.write('empty\n')
for line in lines_per_file:
f.write(line)
print('Write out csv to {}'.format(out_csv_path))
def download_wavs(args):
"""Download videos and extract audio in wav format.
"""
# Paths
csv_path = args.csv_path
audios_dir = args.audios_dir
mini_data = args.mini_data
if mini_data:
logs_dir = '_logs/download_dataset/{}'.format(get_filename(csv_path))
else:
logs_dir = '_logs/download_dataset_minidata/{}'.format(get_filename(csv_path))
create_folder(audios_dir)
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Download log is saved to {}'.format(logs_dir))
# Read csv
with open(csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove csv head info
if mini_data:
lines = lines[0 : 10] # Download partial data for debug
download_time = time.time()
# Download
for (n, line) in enumerate(lines):
items = line.split(', ')
audio_id = items[0]
start_time = float(items[1])
end_time = float(items[2])
duration = end_time - start_time
logging.info('{} {} start_time: {:.1f}, end_time: {:.1f}'.format(
n, audio_id, start_time, end_time))
# Download full video of whatever format
video_name = os.path.join(audios_dir, '_Y{}.%(ext)s'.format(audio_id))
os.system("youtube-dl --quiet -o '{}' -x https://www.youtube.com/watch?v={}"\
.format(video_name, audio_id))
video_paths = glob.glob(os.path.join(audios_dir, '_Y' + audio_id + '.*'))
# If download successful
if len(video_paths) > 0:
video_path = video_paths[0] # Choose one video
# Add 'Y' to the head because some video ids are started with '-'
# which will cause problem
audio_path = os.path.join(audios_dir, 'Y' + audio_id + '.wav')
# Extract audio in wav format
os.system("ffmpeg -loglevel panic -i {} -ac 1 -ar 32000 -ss {} -t 00:00:{} {} "\
.format(video_path,
str(datetime.timedelta(seconds=start_time)), duration,
audio_path))
# Remove downloaded video
os.system("rm {}".format(video_path))
logging.info("Download and convert to {}".format(audio_path))
logging.info('Download finished! Time spent: {:.3f} s'.format(
time.time() - download_time))
logging.info('Logs can be viewed in {}'.format(logs_dir))
def pack_waveforms_to_hdf5(args):
"""Pack waveform and target of several audio clips to a single hdf5 file.
This can speed up loading and training.
"""
# Arguments & parameters
audios_dir = args.audios_dir
csv_path = args.csv_path
waveforms_hdf5_path = args.waveforms_hdf5_path
mini_data = args.mini_data
clip_samples = config.clip_samples
classes_num = config.classes_num
sample_rate = config.sample_rate
id_to_ix = config.id_to_ix
# Paths
if mini_data:
prefix = 'mini_'
waveforms_hdf5_path += '.mini'
else:
prefix = ''
create_folder(os.path.dirname(waveforms_hdf5_path))
logs_dir = '_logs/pack_waveforms_to_hdf5/{}{}'.format(prefix, get_filename(csv_path))
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Write logs to {}'.format(logs_dir))
# Read csv file
meta_dict = read_metadata(csv_path, classes_num, id_to_ix)
if mini_data:
mini_num = 10
for key in meta_dict.keys():
meta_dict[key] = meta_dict[key][0 : mini_num]
audios_num = len(meta_dict['audio_name'])
# Pack waveform to hdf5
total_time = time.time()
with h5py.File(waveforms_hdf5_path, 'w') as hf:
hf.create_dataset('audio_name', shape=((audios_num,)), dtype='S20')
hf.create_dataset('waveform', shape=((audios_num, clip_samples)), dtype=np.int16)
hf.create_dataset('target', shape=((audios_num, classes_num)), dtype=np.bool)
hf.attrs.create('sample_rate', data=sample_rate, dtype=np.int32)
# Pack waveform & target of several audio clips to a single hdf5 file
for n in range(audios_num):
audio_path = os.path.join(audios_dir, meta_dict['audio_name'][n])
if os.path.isfile(audio_path):
logging.info('{} {}'.format(n, audio_path))
(audio, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
audio = pad_or_truncate(audio, clip_samples)
hf['audio_name'][n] = meta_dict['audio_name'][n].encode()
hf['waveform'][n] = float32_to_int16(audio)
hf['target'][n] = meta_dict['target'][n]
else:
logging.info('{} File does not exist! {}'.format(n, audio_path))
logging.info('Write to {}'.format(waveforms_hdf5_path))
logging.info('Pack hdf5 time: {:.3f}'.format(time.time() - total_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_split = subparsers.add_parser('split_unbalanced_csv_to_partial_csvs')
parser_split.add_argument('--unbalanced_csv', type=str, required=True, help='Path of unbalanced_csv file to read.')
parser_split.add_argument('--unbalanced_partial_csvs_dir', type=str, required=True, help='Directory to save out split unbalanced partial csv.')
parser_download_wavs = subparsers.add_parser('download_wavs')
parser_download_wavs.add_argument('--csv_path', type=str, required=True, help='Path of csv file containing audio info to be downloaded.')
parser_download_wavs.add_argument('--audios_dir', type=str, required=True, help='Directory to save out downloaded audio.')
parser_download_wavs.add_argument('--mini_data', action='store_true', default=True, help='Set true to only download 10 audios for debugging.')
parser_pack_wavs = subparsers.add_parser('pack_waveforms_to_hdf5')
parser_pack_wavs.add_argument('--csv_path', type=str, required=True, help='Path of csv file containing audio info to be downloaded.')
parser_pack_wavs.add_argument('--audios_dir', type=str, required=True, help='Directory to save out downloaded audio.')
parser_pack_wavs.add_argument('--waveforms_hdf5_path', type=str, required=True, help='Path to save out packed hdf5.')
parser_pack_wavs.add_argument('--mini_data', action='store_true', default=False, help='Set true to only download 10 audios for debugging.')
args = parser.parse_args()
if args.mode == 'split_unbalanced_csv_to_partial_csvs':
split_unbalanced_csv_to_partial_csvs(args)
elif args.mode == 'download_wavs':
download_wavs(args)
elif args.mode == 'pack_waveforms_to_hdf5':
pack_waveforms_to_hdf5(args)
else:
raise Exception('Incorrect arguments!') | 8,269 | 35.919643 | 147 | py |
GL-AT | GL-AT-master/utils/utilities.py | import os
import logging
import h5py
import soundfile
import librosa
import numpy as np
import pandas as pd
from scipy import stats
import datetime
import pickle
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def get_sub_filepaths(folder):
paths = []
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
paths.append(path)
return paths
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_metadata(csv_path, classes_num, id_to_ix):
"""Read metadata of AudioSet from a csv file.
Args:
csv_path: str
Returns:
meta_dict: {'audio_name': (audios_num,), 'target': (audios_num, classes_num)}
"""
with open(csv_path, 'r') as fr:
lines = fr.readlines()
lines = lines[3:] # Remove heads
audios_num = len(lines)
targets = np.zeros((audios_num, classes_num), dtype=np.bool)
audio_names = []
for n, line in enumerate(lines):
items = line.split(', ')
"""items: ['--4gqARaEJE', '0.000', '10.000', '"/m/068hy,/m/07q6cd_,/m/0bt9lr,/m/0jbk"\n']"""
audio_name = 'Y{}.wav'.format(items[0]) # Audios are started with an extra 'Y' when downloading
label_ids = items[3].split('"')[1].split(',')
audio_names.append(audio_name)
# Target
for id in label_ids:
ix = id_to_ix[id]
targets[n, ix] = 1
meta_dict = {'audio_name': np.array(audio_names), 'target': targets}
return meta_dict
def float32_to_int16(x):
assert np.max(np.abs(x)) <= 1.
return (x * 32767.).astype(np.int16)
def int16_to_float32(x):
return (x / 32767.).astype(np.float32)
def pad_or_truncate(x, audio_length):
"""Pad all audio to specific length."""
if len(x) <= audio_length:
return np.concatenate((x, np.zeros(audio_length - len(x))), axis=0)
else:
return x[0 : audio_length]
def d_prime(auc):
d_prime = stats.norm().ppf(auc) * np.sqrt(2.0)
return d_prime
class Mixup(object):
def __init__(self, mixup_alpha, random_seed=1234):
"""Mixup coefficient generator.
"""
self.mixup_alpha = mixup_alpha
self.random_state = np.random.RandomState(random_seed)
def get_lambda(self, batch_size):
"""Get mixup random coefficients.
Args:
batch_size: int
Returns:
mixup_lambdas: (batch_size,)
"""
mixup_lambdas = []
for n in range(0, batch_size, 2):
lam = self.random_state.beta(self.mixup_alpha, self.mixup_alpha, 1)[0]
mixup_lambdas.append(lam)
mixup_lambdas.append(1. - lam)
return np.array(mixup_lambdas)
class StatisticsContainer(object):
def __init__(self, statistics_path):
"""Contain statistics of different training iterations.
"""
self.statistics_path = statistics_path
self.backup_statistics_path = '{}_{}.pkl'.format(
os.path.splitext(self.statistics_path)[0],
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self.statistics_dict = {'bal': [], 'test': []}
def append(self, iteration, statistics, data_type):
statistics['iteration'] = iteration
self.statistics_dict[data_type].append(statistics)
def dump(self):
pickle.dump(self.statistics_dict, open(self.statistics_path, 'wb'))
pickle.dump(self.statistics_dict, open(self.backup_statistics_path, 'wb'))
logging.info(' Dump statistics to {}'.format(self.statistics_path))
logging.info(' Dump statistics to {}'.format(self.backup_statistics_path))
def load_state_dict(self, resume_iteration):
self.statistics_dict = pickle.load(open(self.statistics_path, 'rb'))
resume_statistics_dict = {'bal': [], 'test': []}
for key in self.statistics_dict.keys():
for statistics in self.statistics_dict[key]:
if statistics['iteration'] <= resume_iteration:
resume_statistics_dict[key].append(statistics)
self.statistics_dict = resume_statistics_dict | 5,085 | 28.74269 | 105 | py |
GL-AT | GL-AT-master/utils/config.py | import numpy as np
import csv
sample_rate = 32000
clip_samples = sample_rate * 10 # Audio clips are 10-second
# Load label
with open('metadata/class_labels_indices.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
lines = list(reader)
labels = []
ids = [] # Each label has a unique id such as "/m/068hy"
for i1 in range(1, len(lines)):
id = lines[i1][1]
label = lines[i1][2]
ids.append(id)
labels.append(label)
classes_num = len(labels)
lb_to_ix = {label : i for i, label in enumerate(labels)}
ix_to_lb = {i : label for i, label in enumerate(labels)}
id_to_ix = {id : i for i, id in enumerate(ids)}
ix_to_id = {i : id for i, id in enumerate(ids)}
full_samples_per_class = np.array([
937432, 16344, 7822, 10271, 2043, 14420, 733, 1511,
1258, 424, 1751, 704, 369, 590, 1063, 1375,
5026, 743, 853, 1648, 714, 1497, 1251, 2139,
1093, 133, 224, 39469, 6423, 407, 1559, 4546,
6826, 7464, 2468, 549, 4063, 334, 587, 238,
1766, 691, 114, 2153, 236, 209, 421, 740,
269, 959, 137, 4192, 485, 1515, 655, 274,
69, 157, 1128, 807, 1022, 346, 98, 680,
890, 352, 4169, 2061, 1753, 9883, 1339, 708,
37857, 18504, 12864, 2475, 2182, 757, 3624, 677,
1683, 3583, 444, 1780, 2364, 409, 4060, 3097,
3143, 502, 723, 600, 230, 852, 1498, 1865,
1879, 2429, 5498, 5430, 2139, 1761, 1051, 831,
2401, 2258, 1672, 1711, 987, 646, 794, 25061,
5792, 4256, 96, 8126, 2740, 752, 513, 554,
106, 254, 1592, 556, 331, 615, 2841, 737,
265, 1349, 358, 1731, 1115, 295, 1070, 972,
174, 937780, 112337, 42509, 49200, 11415, 6092, 13851,
2665, 1678, 13344, 2329, 1415, 2244, 1099, 5024,
9872, 10948, 4409, 2732, 1211, 1289, 4807, 5136,
1867, 16134, 14519, 3086, 19261, 6499, 4273, 2790,
8820, 1228, 1575, 4420, 3685, 2019, 664, 324,
513, 411, 436, 2997, 5162, 3806, 1389, 899,
8088, 7004, 1105, 3633, 2621, 9753, 1082, 26854,
3415, 4991, 2129, 5546, 4489, 2850, 1977, 1908,
1719, 1106, 1049, 152, 136, 802, 488, 592,
2081, 2712, 1665, 1128, 250, 544, 789, 2715,
8063, 7056, 2267, 8034, 6092, 3815, 1833, 3277,
8813, 2111, 4662, 2678, 2954, 5227, 1472, 2591,
3714, 1974, 1795, 4680, 3751, 6585, 2109, 36617,
6083, 16264, 17351, 3449, 5034, 3931, 2599, 4134,
3892, 2334, 2211, 4516, 2766, 2862, 3422, 1788,
2544, 2403, 2892, 4042, 3460, 1516, 1972, 1563,
1579, 2776, 1647, 4535, 3921, 1261, 6074, 2922,
3068, 1948, 4407, 712, 1294, 1019, 1572, 3764,
5218, 975, 1539, 6376, 1606, 6091, 1138, 1169,
7925, 3136, 1108, 2677, 2680, 1383, 3144, 2653,
1986, 1800, 1308, 1344, 122231, 12977, 2552, 2678,
7824, 768, 8587, 39503, 3474, 661, 430, 193,
1405, 1442, 3588, 6280, 10515, 785, 710, 305,
206, 4990, 5329, 3398, 1771, 3022, 6907, 1523,
8588, 12203, 666, 2113, 7916, 434, 1636, 5185,
1062, 664, 952, 3490, 2811, 2749, 2848, 15555,
363, 117, 1494, 1647, 5886, 4021, 633, 1013,
5951, 11343, 2324, 243, 372, 943, 734, 242,
3161, 122, 127, 201, 1654, 768, 134, 1467,
642, 1148, 2156, 1368, 1176, 302, 1909, 61,
223, 1812, 287, 422, 311, 228, 748, 230,
1876, 539, 1814, 737, 689, 1140, 591, 943,
353, 289, 198, 490, 7938, 1841, 850, 457,
814, 146, 551, 728, 1627, 620, 648, 1621,
2731, 535, 88, 1736, 736, 328, 293, 3170,
344, 384, 7640, 433, 215, 715, 626, 128,
3059, 1833, 2069, 3732, 1640, 1508, 836, 567,
2837, 1151, 2068, 695, 1494, 3173, 364, 88,
188, 740, 677, 273, 1533, 821, 1091, 293,
647, 318, 1202, 328, 532, 2847, 526, 721,
370, 258, 956, 1269, 1641, 339, 1322, 4485,
286, 1874, 277, 757, 1393, 1330, 380, 146,
377, 394, 318, 339, 1477, 1886, 101, 1435,
284, 1425, 686, 621, 221, 117, 87, 1340,
201, 1243, 1222, 651, 1899, 421, 712, 1016,
1279, 124, 351, 258, 7043, 368, 666, 162,
7664, 137, 70159, 26179, 6321, 32236, 33320, 771,
1169, 269, 1103, 444, 364, 2710, 121, 751,
1609, 855, 1141, 2287, 1940, 3943, 289])
| 5,404 | 55.894737 | 71 | py |
GL-AT | GL-AT-master/utils/plot_statistics.py | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def _load_metrics0_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict['test'][300]['average_precision']
def _load_metrics0_classwise2(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 270
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def _load_metrics_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace = '/mnt/cephfs_new_wj/speechsv/kongqiuqiang/workspaces/cvssp/pub_audioset_tagging_cnn'
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 300
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def plot_for_paper(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper_{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
# lines.append(line)
elif select == '2_bal':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_sr':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_partial':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_melbins':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax.set_ylim(0, 0.8)
ax.set_xlim(0, len(iterations))
ax.set_xlabel('Iterations')
ax.set_ylabel('mAP')
ax.xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax.yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax.yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax.yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax.xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.legend(handles=lines, loc=2)
plt.tight_layout(0, 0, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def plot_for_paper2(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper2.pdf'
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(2, 3, figsize=(14, 7))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if True:
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax[0, 0].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax[0, 0].plot(test_map, label='ResNet38', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 0].legend(handles=lines, loc=2)
ax[0, 0].set_title('(a) Comparison of architectures')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
ax[0, 1].set_title('(b) Comparison of training data and augmentation')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 2].legend(handles=lines, loc=2)
ax[0, 2].set_title('(c) Comparison of embedding size')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 0].legend(handles=lines, loc=2)
ax[1, 0].set_title('(d) Comparison of amount of training data')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 1].legend(handles=lines, loc=2)
ax[1, 1].set_title('(e) Comparison of sampling rate')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 2].legend(handles=lines, loc=2)
ax[1, 2].set_title('(f) Comparison of mel bins number')
for i in range(2):
for j in range(3):
ax[i, j].set_ylim(0, 0.8)
ax[i, j].set_xlim(0, len(iterations))
ax[i, j].set_xlabel('Iterations')
ax[i, j].set_ylabel('mAP')
ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def table_values(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
idx = iteration // 2000
mAP = np.mean(statistics_dict['test'][idx]['average_precision'])
mAUC = np.mean(statistics_dict['test'][idx]['auc'])
dprime = d_prime(mAUC)
print('mAP: {:.3f}'.format(mAP))
print('mAUC: {:.3f}'.format(mAUC))
print('dprime: {:.3f}'.format(dprime))
if select == 'cnn13':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn5':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn9':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelmax':
iteration = 400000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelavg':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelatt':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb32':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb128':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb512':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop500':
iteration = 440000
_load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop640':
iteration = 440000
_load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop1000':
iteration = 540000
_load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv1':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv2':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet18':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'dainet':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet':
iteration = 540000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet18':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_cnn2d':
iteration = 660000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_spandwav':
iteration = 700000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32, iteration)
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def plot_class_iteration(args):
# Arguments & parameters
workspace = args.workspace
select = args.select
save_out_path = 'results_map/class_iteration_map.pdf'
create_folder(os.path.dirname(save_out_path))
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict
iteration = 600000
statistics_dict = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :]
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path)
def _load_old_metrics(workspace, filename, iteration, data_type):
assert data_type in ['train', 'test']
stat_name = "stat_{}_iters.p".format(iteration)
# Load stats
stat_path = os.path.join(workspace, "stats", filename, data_type, stat_name)
try:
stats = cPickle.load(open(stat_path, 'rb'))
except:
stats = cPickle.load(open(stat_path, 'rb'), encoding='latin1')
precisions = [stat['precisions'] for stat in stats]
recalls = [stat['recalls'] for stat in stats]
maps = np.array([stat['AP'] for stat in stats])
aucs = np.array([stat['auc'] for stat in stats])
return {'average_precision': maps, 'AUC': aucs}
def _sort(ys):
sorted_idxes = np.argsort(ys)
sorted_idxes = sorted_idxes[::-1]
sorted_ys = ys[sorted_idxes]
sorted_lbs = [config.labels[e] for e in sorted_idxes]
return sorted_ys, sorted_idxes, sorted_lbs
def load_data(hdf5_path):
with h5py.File(hdf5_path, 'r') as hf:
x = hf['x'][:]
y = hf['y'][:]
video_id_list = list(hf['video_id_list'][:])
return x, y, video_id_list
def get_avg_stats(workspace, bgn_iter, fin_iter, interval_iter, filename, data_type):
assert data_type in ['train', 'test']
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
t1 = time.time()
if data_type == 'test':
(te_x, te_y, te_id_list) = load_data(eval_hdf5)
elif data_type == 'train':
(te_x, te_y, te_id_list) = load_data(bal_train_hdf5)
y = te_y
prob_dir = os.path.join(workspace, "probs", filename, data_type)
names = os.listdir(prob_dir)
probs = []
iters = range(bgn_iter, fin_iter, interval_iter)
for iter in iters:
pickle_path = os.path.join(prob_dir, "prob_%d_iters.p" % iter)
try:
prob = cPickle.load(open(pickle_path, 'rb'))
except:
prob = cPickle.load(open(pickle_path, 'rb'), encoding='latin1')
probs.append(prob)
avg_prob = np.mean(np.array(probs), axis=0)
n_out = y.shape[1]
stats = []
for k in range(n_out): # around 7 seconds
(precisions, recalls, thresholds) = metrics.precision_recall_curve(y[:, k], avg_prob[:, k])
avg_precision = metrics.average_precision_score(y[:, k], avg_prob[:, k], average=None)
(fpr, tpr, thresholds) = metrics.roc_curve(y[:, k], avg_prob[:, k])
auc = metrics.roc_auc_score(y[:, k], avg_prob[:, k], average=None)
# eer = pp_data.eer(avg_prob[:, k], y[:, k])
skip = 1000
dict = {'precisions': precisions[0::skip], 'recalls': recalls[0::skip], 'AP': avg_precision,
'fpr': fpr[0::skip], 'fnr': 1. - tpr[0::skip], 'auc': auc}
stats.append(dict)
mAPs = np.array([e['AP'] for e in stats])
aucs = np.array([e['auc'] for e in stats])
print("Get avg time: {}".format(time.time() - t1))
return {'average_precision': mAPs, 'auc': aucs}
def _samples_num_per_class():
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
(x, y, id_list) = load_data(eval_hdf5)
eval_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(bal_train_hdf5)
bal_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(unbal_train_hdf5)
unbal_num = np.sum(y, axis=0)
return bal_num, unbal_num, eval_num
def get_label_quality():
rate_csv = '/vol/vssp/msos/qk/workspaces/pub_audioset_tagging_cnn_transfer/metadata/qa_true_counts.csv'
with open(rate_csv, 'r') as f:
reader = csv.reader(f, delimiter=',')
lis = list(reader)
rates = []
for n in range(1, len(lis)):
li = lis[n]
if float(li[1]) == 0:
rate = None
else:
rate = float(li[2]) / float(li[1])
rates.append(rate)
return rates
def summary_stats(args):
# Arguments & parameters
workspace = args.workspace
out_stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
create_folder(os.path.dirname(out_stat_path))
# Old workspace
old_workspace = '/vol/vssp/msos/qk/workspaces/audioset_classification'
# bal_train_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'train')
# eval_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'test')
bal_train_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='train')
eval_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='test')
maps0te = eval_metrics['average_precision']
(maps0te, sorted_idxes, sorted_lbs) = _sort(maps0te)
bal_num, unbal_num, eval_num = _samples_num_per_class()
output_dict = {
'labels': config.labels,
'label_quality': get_label_quality(),
'sorted_indexes_for_plot': sorted_idxes,
'official_balanced_trainig_samples': bal_num,
'official_unbalanced_training_samples': unbal_num,
'official_eval_samples': eval_num,
'downloaded_full_training_samples': config.full_samples_per_class,
'averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations':
{'bal_train': bal_train_metrics, 'eval': eval_metrics}
}
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
_workspace = '/vol/vssp/msos/qk/bytedance/workspaces_important/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(_workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
_idx = iteration // 2000
_dict = {'bal_train': {'average_precision': statistics_dict['bal'][_idx]['average_precision'],
'auc': statistics_dict['bal'][_idx]['auc']},
'eval': {'average_precision': statistics_dict['test'][_idx]['average_precision'],
'auc': statistics_dict['test'][_idx]['auc']}}
return _dict
iteration = 600000
output_dict['cnn13_system_iteration60k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
iteration = 560000
output_dict['mobilenetv1_system_iteration56k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
cPickle.dump(output_dict, open(out_stat_path, 'wb'))
print('Write stats for paper to {}'.format(out_stat_path))
def prepare_plot_long_4_rows(sorted_lbs):
N = len(sorted_lbs)
f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1,sharey=False, facecolor='w', figsize=(10, 12))
fontsize = 5
K = 132
ax1a.set_xlim(0, K)
ax2a.set_xlim(K, 2 * K)
ax3a.set_xlim(2 * K, 3 * K)
ax4a.set_xlim(3 * K, N)
truncated_sorted_lbs = []
for lb in sorted_lbs:
lb = lb[0 : 25]
words = lb.split(' ')
if len(words[-1]) < 3:
lb = ' '.join(words[0:-1])
truncated_sorted_lbs.append(lb)
ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax1a.set_yscale('log')
ax2a.set_yscale('log')
ax3a.set_yscale('log')
ax4a.set_yscale('log')
ax1b = ax1a.twinx()
ax2b = ax2a.twinx()
ax3b = ax3a.twinx()
ax4b = ax4a.twinx()
ax1b.set_ylim(0., 1.)
ax2b.set_ylim(0., 1.)
ax3b.set_ylim(0., 1.)
ax4b.set_ylim(0., 1.)
ax1b.set_ylabel('Average precision')
ax2b.set_ylabel('Average precision')
ax3b.set_ylabel('Average precision')
ax4b.set_ylabel('Average precision')
ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax1a.xaxis.set_ticks(np.arange(K))
ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
ax1a.xaxis.tick_bottom()
ax1a.set_ylabel("Number of audio clips")
ax2a.xaxis.set_ticks(np.arange(K, 2*K))
ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
ax2a.xaxis.tick_bottom()
# ax2a.tick_params(left='off', which='both')
ax2a.set_ylabel("Number of audio clips")
ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
ax3a.xaxis.tick_bottom()
ax3a.set_ylabel("Number of audio clips")
ax4a.xaxis.set_ticks(np.arange(3*K, N))
ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
ax4a.xaxis.tick_bottom()
# ax4a.tick_params(left='off', which='both')
ax4a.set_ylabel("Number of audio clips")
ax1a.spines['right'].set_visible(False)
ax1b.spines['right'].set_visible(False)
ax2a.spines['left'].set_visible(False)
ax2b.spines['left'].set_visible(False)
ax2a.spines['right'].set_visible(False)
ax2b.spines['right'].set_visible(False)
ax3a.spines['left'].set_visible(False)
ax3b.spines['left'].set_visible(False)
ax3a.spines['right'].set_visible(False)
ax3b.spines['right'].set_visible(False)
ax4a.spines['left'].set_visible(False)
ax4b.spines['left'].set_visible(False)
plt.subplots_adjust(hspace = 0.8)
return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
N = len(x)
ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
N = len(x)
ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
return line
def plot_long_fig(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
save_out_path = 'results/long_fig.pdf'
create_folder(os.path.dirname(save_out_path))
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
N = len(config.labels)
sorted_indexes = stats['sorted_indexes_for_plot']
sorted_labels = np.array(config.labels)[sorted_indexes]
audio_clips_per_class = stats['official_balanced_trainig_samples'] + stats['official_unbalanced_training_samples']
audio_clips_per_class = audio_clips_per_class[sorted_indexes]
(ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
# plot the same data on both axes
ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
maps_avg_instances = maps_avg_instances[sorted_indexes]
maps_cnn13 = stats['cnn13_system_iteration60k']['eval']['average_precision']
maps_cnn13 = maps_cnn13[sorted_indexes]
maps_mobilenetv1 = stats['mobilenetv1_system_iteration56k']['eval']['average_precision']
maps_mobilenetv1 = maps_mobilenetv1[sorted_indexes]
maps_logmel_wavegram_cnn = _load_metrics0_classwise('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
maps_logmel_wavegram_cnn = maps_logmel_wavegram_cnn[sorted_indexes]
_scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
_scatter_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
_scatter_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
_scatter_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
linewidth = 0.7
line0te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k', linewidth=linewidth, label='AP with averaging instances (baseline)')
line1te = _plot_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, c='r', linewidth=linewidth, label='AP with CNN14')
line2te = _plot_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b', linewidth=linewidth, label='AP with MobileNetV1')
line3te = _plot_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
label_quality = stats['label_quality']
sorted_rate = np.array(label_quality)[sorted_indexes]
for k in range(len(sorted_rate)):
if sorted_rate[k] and sorted_rate[k] == 1:
sorted_rate[k] = 0.99
ax1b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax2b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax3b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
line_label_quality = ax4b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
ax1b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax2b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax3b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax4b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
plt.savefig(save_out_path)
print('Save fig to {}'.format(save_out_path))
def plot_flops(args):
# Arguments & parameters
workspace = args.workspace
# Paths
save_out_path = 'results_map/flops.pdf'
create_folder(os.path.dirname(save_out_path))
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots(1, 1)
model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
flops = np.array([21.986, 21.986, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
sorted_indexes = np.sort(flops)
ax.scatter(flops, mAPs)
shift = [[1, 0.002], [1, -0.006], [-1, -0.014], [-2, 0.006], [-7, 0.006],
[1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
[1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
for i, model_type in enumerate(model_types):
ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
ax.plot(flops[[6, 7]], mAPs[[6, 7]])
ax.plot(flops[[9, 10]], mAPs[[9, 10]])
ax.plot(flops[[11, 12]], mAPs[[11, 12]])
ax.plot(flops[[13, 14]], mAPs[[13, 14]])
ax.set_xlim(0, 70)
ax.set_ylim(0.2, 0.5)
ax.set_xlabel('Multi-adds (million)')
ax.set_ylabel('mAP')
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Write out figure to {}'.format(save_out_path))
def spearman(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
label_quality = np.array([qu if qu else 0.5 for qu in stats['label_quality']])
training_samples = np.array(stats['official_balanced_trainig_samples']) + \
np.array(stats['official_unbalanced_training_samples'])
mAP = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
import scipy
samples_spearman = scipy.stats.spearmanr(training_samples, mAP)[0]
quality_spearman = scipy.stats.spearmanr(label_quality, mAP)[0]
print('Training samples spearman: {:.3f}'.format(samples_spearman))
print('Quality spearman: {:.3f}'.format(quality_spearman))
def print_results(args):
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
#
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
# partial
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# Sample rate
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
# Mel bins
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
import crash
asdf
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_plot = subparsers.add_parser('plot')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_plot.add_argument('--select', type=str, required=True)
parser_plot = subparsers.add_parser('plot_for_paper')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_plot.add_argument('--select', type=str, required=True)
parser_plot = subparsers.add_parser('plot_for_paper2')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_values = subparsers.add_parser('plot_class_iteration')
parser_values.add_argument('--workspace', type=str, required=True)
parser_values.add_argument('--select', type=str, required=True)
parser_summary_stats = subparsers.add_parser('summary_stats')
parser_summary_stats.add_argument('--workspace', type=str, required=True)
parser_plot_long = subparsers.add_parser('plot_long_fig')
parser_plot_long.add_argument('--workspace', type=str, required=True)
parser_plot_flops = subparsers.add_parser('plot_flops')
parser_plot_flops.add_argument('--workspace', type=str, required=True)
parser_spearman = subparsers.add_parser('spearman')
parser_spearman.add_argument('--workspace', type=str, required=True)
parser_print = subparsers.add_parser('print')
parser_print.add_argument('--workspace', type=str, required=True)
args = parser.parse_args()
if args.mode == 'plot':
plot(args)
elif args.mode == 'plot_for_paper':
plot_for_paper(args)
elif args.mode == 'plot_for_paper2':
plot_for_paper2(args)
elif args.mode == 'table_values':
table_values(args)
elif args.mode == 'plot_class_iteration':
plot_class_iteration(args)
elif args.mode == 'summary_stats':
summary_stats(args)
elif args.mode == 'plot_long_fig':
plot_long_fig(args)
elif args.mode == 'plot_flops':
plot_flops(args)
elif args.mode == 'spearman':
spearman(args)
elif args.mode == 'print':
print_results(args)
else:
raise Exception('Error argument!') | 100,664 | 48.49115 | 169 | py |
GL-AT | GL-AT-master/utils/create_indexes.py | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import create_folder, get_sub_filepaths
import config
def create_indexes(args):
"""Create indexes a for dataloader to read for training. When users have
a new task and their own data, they need to create similar indexes. The
indexes contain meta information of "where to find the data for training".
"""
# Arguments & parameters
waveforms_hdf5_path = args.waveforms_hdf5_path
indexes_hdf5_path = args.indexes_hdf5_path
# Paths
create_folder(os.path.dirname(indexes_hdf5_path))
with h5py.File(waveforms_hdf5_path, 'r') as hr:
with h5py.File(indexes_hdf5_path, 'w') as hw:
audios_num = len(hr['audio_name'])
hw.create_dataset('audio_name', data=hr['audio_name'][:], dtype='S20')
hw.create_dataset('target', data=hr['target'][:], dtype=np.bool)
hw.create_dataset('hdf5_path', data=[waveforms_hdf5_path.encode()] * audios_num, dtype='S200')
hw.create_dataset('index_in_hdf5', data=np.arange(audios_num), dtype=np.int32)
print('Write to {}'.format(indexes_hdf5_path))
def combine_full_indexes(args):
"""Combine all balanced and unbalanced indexes hdf5s to a single hdf5. This
combined indexes hdf5 is used for training with full data (~20k balanced
audio clips + ~1.9m unbalanced audio clips).
"""
# Arguments & parameters
indexes_hdf5s_dir = args.indexes_hdf5s_dir
full_indexes_hdf5_path = args.full_indexes_hdf5_path
classes_num = config.classes_num
# Paths
paths = get_sub_filepaths(indexes_hdf5s_dir)
paths = [path for path in paths if (
'train' in path and 'full_train' not in path and 'mini' not in path)]
print('Total {} hdf5 to combine.'.format(len(paths)))
with h5py.File(full_indexes_hdf5_path, 'w') as full_hf:
full_hf.create_dataset(
name='audio_name',
shape=(0,),
maxshape=(None,),
dtype='S20')
full_hf.create_dataset(
name='target',
shape=(0, classes_num),
maxshape=(None, classes_num),
dtype=np.bool)
full_hf.create_dataset(
name='hdf5_path',
shape=(0,),
maxshape=(None,),
dtype='S200')
full_hf.create_dataset(
name='index_in_hdf5',
shape=(0,),
maxshape=(None,),
dtype=np.int32)
for path in paths:
with h5py.File(path, 'r') as part_hf:
print(path)
n = len(full_hf['audio_name'][:])
new_n = n + len(part_hf['audio_name'][:])
full_hf['audio_name'].resize((new_n,))
full_hf['audio_name'][n : new_n] = part_hf['audio_name'][:]
full_hf['target'].resize((new_n, classes_num))
full_hf['target'][n : new_n] = part_hf['target'][:]
full_hf['hdf5_path'].resize((new_n,))
full_hf['hdf5_path'][n : new_n] = part_hf['hdf5_path'][:]
full_hf['index_in_hdf5'].resize((new_n,))
full_hf['index_in_hdf5'][n : new_n] = part_hf['index_in_hdf5'][:]
print('Write combined full hdf5 to {}'.format(full_indexes_hdf5_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_create_indexes = subparsers.add_parser('create_indexes')
parser_create_indexes.add_argument('--waveforms_hdf5_path', type=str, required=True, help='Path of packed waveforms hdf5.')
parser_create_indexes.add_argument('--indexes_hdf5_path', type=str, required=True, help='Path to write out indexes hdf5.')
parser_combine_full_indexes = subparsers.add_parser('combine_full_indexes')
parser_combine_full_indexes.add_argument('--indexes_hdf5s_dir', type=str, required=True, help='Directory containing indexes hdf5s to be combined.')
parser_combine_full_indexes.add_argument('--full_indexes_hdf5_path', type=str, required=True, help='Path to write out full indexes hdf5 file.')
args = parser.parse_args()
if args.mode == 'create_indexes':
create_indexes(args)
elif args.mode == 'combine_full_indexes':
combine_full_indexes(args)
else:
raise Exception('Incorrect arguments!') | 4,498 | 34.706349 | 151 | py |
GL-AT | GL-AT-master/pytorch/inference.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
# Print embedding
if 'embedding' in batch_output_dict.keys():
embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
print('embedding: {}'.format(embedding.shape))
return clipwise_output, labels
def sound_event_detection(args):
"""Inference sound event detection result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
frames_per_second = sample_rate // hop_size
# Paths
fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))
create_folder(os.path.dirname(fig_path))
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
print('Sound event detection result (time_steps x classes_num): {}'.format(
framewise_output.shape))
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,
hop_length=hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
plt.savefig(fig_path)
print('Save sound event detection visualization to {}'.format(fig_path))
return framewise_output, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--checkpoint_path', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
elif args.mode == 'sound_event_detection':
sound_event_detection(args)
else:
raise Exception('Error argument!') | 7,172 | 34.161765 | 101 | py |
GL-AT | GL-AT-master/pytorch/main.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
from sklearn import metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import (create_folder, get_filename, create_logging, Mixup,
StatisticsContainer)
from models import *
from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
do_mixup)
from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler,
AlternateTrainSampler, EvaluateSampler, collate_fn)
from evaluate import Evaluator
import config
from losses import get_loss_func
def get_train_sampler(balanced):
"""Get train sampler.
Args:
balanced: str
augmentation: str
train_indexes_hdf5_path: str
black_list_csv: str
batch_size: int
Returns:
train_sampler: object
train_collector: object
"""
if balanced == 'none':
_Sampler = TrainSampler
elif balanced == 'balanced':
_Sampler = BalancedTrainSampler
elif balanced == 'alternate':
_Sampler = AlternateTrainSampler
def train(args):
"""Train AudioSet tagging model.
Args:
dataset_dir: str
workspace: str
data_type: 'balanced_train' | 'unbalanced_train'
frames_per_second: int
mel_bins: int
model_type: str
loss_type: 'clip_bce'
balanced: 'none' | 'balanced' | 'alternate'
augmentation: 'none' | 'mixup'
batch_size: int
learning_rate: float
resume_iteration: int
early_stop: int
accumulation_steps: int
cuda: bool
"""
# Arugments & parameters
workspace = args.workspace
data_type = args.data_type
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
loss_type = args.loss_type
balanced = args.balanced
augmentation = args.augmentation
batch_size = args.batch_size
learning_rate = args.learning_rate
resume_iteration = args.resume_iteration
early_stop = args.early_stop
N = args.N
length = args.length
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
filename = args.filename
num_workers = 8
sample_rate = config.sample_rate
clip_samples = config.clip_samples
classes_num = config.classes_num
loss_func = get_loss_func(loss_type)
# Paths
black_list_csv = None
train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'{}.h5'.format(data_type))
eval_bal_indexes_hdf5_path = os.path.join(workspace,
'hdf5s', 'indexes', 'balanced_train.h5')
eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'eval.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length))
create_folder(checkpoints_dir)
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length),
'statistics.pkl')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length))
create_logging(logs_dir, filemode='w')
logging.info(args)
if 'cuda' in str(device):
logging.info('Using GPU.')
device = 'cuda'
else:
logging.info('Using CPU.')
device = 'cpu'
# Model
Model_G = eval(model_type)
model_G = Model_G(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Wavegram_Logmel_Cnn14_mAP=0.439.pth')['model'])
# model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth')['model'])
# model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/ResNet38_mAP=0.434.pth')['model'])
Model = eval(model_type+'_local')
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num, N=N, length=length)
params_num = count_parameters(model)
# flops_num = count_flops(model, clip_samples)
logging.info('Parameters num: {}'.format(params_num))
# logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
# Dataset will be used by DataLoader later. Dataset takes a meta as input
# and return a waveform and a target.
dataset = AudioSetDataset(clip_samples=clip_samples, classes_num=classes_num)
# Train sampler
if balanced == 'none':
Sampler = TrainSampler
elif balanced == 'balanced':
Sampler = BalancedTrainSampler
elif balanced == 'alternate':
Sampler = AlternateTrainSampler
train_sampler = Sampler(
indexes_hdf5_path=train_indexes_hdf5_path,
batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size,
black_list_csv=black_list_csv)
# Evaluate sampler
eval_bal_sampler = EvaluateSampler(
indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size)
eval_test_sampler = EvaluateSampler(
indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=train_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_bal_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
if 'mixup' in augmentation:
mixup_augmenter = Mixup(mixup_alpha=1.)
# Evaluator
evaluator = Evaluator(model=model,model_G=model_G)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True)
train_bgn_time = time.time()
# Resume training
if resume_iteration > 0:
resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(resume_iteration))
logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(checkpoint['model'])
train_sampler.load_state_dict(checkpoint['sampler'])
statistics_container.load_state_dict(resume_iteration)
iteration = checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
model_G.to(device)
time1 = time.time()
for batch_data_dict in train_loader:
"""batch_data_dict: {
'audio_name': (batch_size [*2 if mixup],),
'waveform': (batch_size [*2 if mixup], clip_samples),
'target': (batch_size [*2 if mixup], classes_num),
(ifexist) 'mixup_lambda': (batch_size * 2,)}
"""
# Evaluate
if (iteration % 2000 == 0 and iteration > resume_iteration):
train_fin_time = time.time()
bal_statistics = evaluator.evaluate(eval_bal_loader)
test_statistics = evaluator.evaluate(eval_test_loader)
logging.info('Validate bal mAP: {:.3f}'.format(
np.mean(bal_statistics['average_precision'])))
logging.info('Validate test mAP: {:.3f}'.format(
np.mean(test_statistics['average_precision'])))
statistics_container.append(iteration, bal_statistics, data_type='bal')
statistics_container.append(iteration, test_statistics, data_type='test')
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
''.format(iteration, train_time, validate_time))
logging.info('------------------------------------')
train_bgn_time = time.time()
# Save model
if iteration % 2000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'sampler': train_sampler.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Mixup lambda
if 'mixup' in augmentation:
batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
batch_size=len(batch_data_dict['waveform']))
# Move data to device
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
model_G.eval()
if 'mixup' in augmentation:
batch_output_dict_G = model_G(batch_data_dict['waveform'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
else:
batch_output_dict_G = model_G(batch_data_dict['waveform'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
# Forward
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'], batch_output_dict_G['clipwise_output'], batch_output_dict_G['feature_map'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': do_mixup(batch_data_dict['target'],
batch_data_dict['mixup_lambda'])}
"""{'target': (batch_size, classes_num)}"""
else:
batch_output_dict = model(batch_data_dict['waveform'], batch_output_dict_G['clipwise_output'], batch_output_dict_G['feature_map'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': batch_data_dict['target']}
"""{'target': (batch_size, classes_num)}"""
# Loss
loss = loss_func(batch_output_dict, batch_target_dict)
# Backward
loss.backward()
# print(loss)
optimizer.step()
optimizer.zero_grad()
if iteration % 400 == 0:
print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\
.format(iteration, time.time() - time1))
time1 = time.time()
iteration += 1
# Stop learning
if iteration == early_stop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--workspace', type=str, required=True)
parser_train.add_argument('--data_type', type=str, default='full_train', choices=['balanced_train', 'full_train'])
parser_train.add_argument('--window_size', type=int, default=1024)
parser_train.add_argument('--hop_size', type=int, default=320)
parser_train.add_argument('--mel_bins', type=int, default=64)
parser_train.add_argument('--fmin', type=int, default=50)
parser_train.add_argument('--fmax', type=int, default=14000)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce'])
parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate'])
parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup'])
parser_train.add_argument('--batch_size', type=int, default=32)
parser_train.add_argument('--N', type=int, default=5)
parser_train.add_argument('--length', type=int, default=2)
parser_train.add_argument('--learning_rate', type=float, default=1e-3)
parser_train.add_argument('--resume_iteration', type=int, default=0)
parser_train.add_argument('--early_stop', type=int, default=1000000)
parser_train.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | 15,044 | 37.676093 | 148 | py |
GL-AT | GL-AT-master/pytorch/losses.py | import torch
import torch.nn.functional as F
def clip_bce(output_dict, target_dict):
"""Binary crossentropy loss.
"""
return F.binary_cross_entropy(
output_dict['local_prob'], target_dict['target'])
def get_loss_func(loss_type):
if loss_type == 'clip_bce':
return clip_bce | 308 | 21.071429 | 57 | py |
GL-AT | GL-AT-master/pytorch/test.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
# Model
checkpoint_path = '/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth'
checkpoint_path_2 = '/data/dean/audioset_tagging_cnn/workspaces/checkpoints/main/sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000/data_type=full_train/Cnn10/loss_type=clip_bce/balanced=balanced/augmentation=none/batch_size=32/N=5,length=2/198000_iterations.pth'
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
Model_2 = eval(model_type+'_local')
model_2 = Model_2(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num, N=5, length=2)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
checkpoint_2 = torch.load(checkpoint_path_2, map_location=device)
model_2.load_state_dict(checkpoint_2['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model_2 = torch.nn.DataParallel(model_2)
if 'cuda' in str(device):
model.to(device)
model_2.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform_ = waveform
# stft_ = librosa.core.stft(y=waveform,n_fft=window_size,hop_length=hop_size).T
# melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
# mel_spec = np.dot(np.abs(stft_)**2,melW)
# logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
# logmel = logmel.astype(np.float32)
# logmel = np.transpose(logmel,(1,0))
# plt.imshow(logmel,cmap=plt.cm.jet)
# plt.axis('off')
# fig = plt.gcf()
# height,width=logmel.shape
# fig.set_size_inches(width/40.,height/40.)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
# plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
# plt.margins(0,0)
# plt.savefig('waveform.png',dpi=200,pad_inches=0)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
model_2.eval()
batch_output_dict_2 = model(waveform, None)
batch_output_dict = model_2(waveform, batch_output_dict_2['clipwise_output'], batch_output_dict_2['feature_map'], None)
clipwise_output = batch_output_dict['prob'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
waveform_1 = waveform_[109395:173395]
stft_ = librosa.core.stft(y=waveform_1,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform1.png',dpi=200,pad_inches=0)
waveform_2 = waveform_[34976:98976]
stft_ = librosa.core.stft(y=waveform_2,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform2.png',dpi=200,pad_inches=0)
waveform_3 = waveform_[146604:210604]
stft_ = librosa.core.stft(y=waveform_3,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform3.png',dpi=200,pad_inches=0)
waveform_4 = waveform_[49860:113860]
stft_ = librosa.core.stft(y=waveform_4,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform4.png',dpi=200,pad_inches=0)
waveform_5 = waveform_[5209:69209]
stft_ = librosa.core.stft(y=waveform_5,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform5.png',dpi=200,pad_inches=0)
return clipwise_output, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
else:
raise Exception('Error argument!') | 9,038 | 40.847222 | 295 | py |
GL-AT | GL-AT-master/pytorch/evaluate.py | from sklearn import metrics
from pytorch_utils import forward
class Evaluator(object):
def __init__(self, model, model_G):
"""Evaluator.
Args:
model: object
"""
self.model = model
self.model_G = model_G
def evaluate(self, data_loader):
"""Forward evaluation data and calculate statistics.
Args:
data_loader: object
Returns:
statistics: dict,
{'average_precision': (classes_num,), 'auc': (classes_num,)}
"""
# Forward
output_dict = forward(
model=self.model,
model_G=self.model_G,
generator=data_loader,
return_target=True)
clipwise_output = output_dict['prob'] # (audios_num, classes_num)
target = output_dict['target'] # (audios_num, classes_num)
average_precision = metrics.average_precision_score(
target, clipwise_output, average=None)
auc = metrics.roc_auc_score(target, clipwise_output, average=None)
statistics = {'average_precision': average_precision, 'auc': auc}
return statistics | 1,176 | 25.75 | 76 | py |
GL-AT | GL-AT-master/pytorch/finetune_template.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import get_filename
from models import *
import config
class Transfer_Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, freeze_base):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(Transfer_Cnn14, self).__init__()
audioset_classes_num = 527
self.base = Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, audioset_classes_num)
# Transfer to another task layer
self.fc_transfer = nn.Linear(2048, classes_num, bias=True)
if freeze_base:
# Freeze AudioSet pretrained layers
for param in self.base.parameters():
param.requires_grad = False
self.init_weights()
def init_weights(self):
init_layer(self.fc_transfer)
def load_from_pretrain(self, pretrained_checkpoint_path):
checkpoint = torch.load(pretrained_checkpoint_path)
self.base.load_state_dict(checkpoint['model'])
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, data_length)
"""
output_dict = self.base(input, mixup_lambda)
embedding = output_dict['embedding']
clipwise_output = torch.log_softmax(self.fc_transfer(embedding), dim=-1)
output_dict['clipwise_output'] = clipwise_output
return output_dict
def train(args):
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
pretrained_checkpoint_path = args.pretrained_checkpoint_path
freeze_base = args.freeze_base
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
sample_rate = config.sample_rate
classes_num = config.classes_num
pretrain = True if pretrained_checkpoint_path else False
# Model
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num, freeze_base)
# Load pretrained model
if pretrain:
logging.info('Load pretrained model from {}'.format(pretrained_checkpoint_path))
model.load_from_pretrain(pretrained_checkpoint_path)
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
print('Load pretrained model successfully!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
# Train
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--window_size', type=int, required=True)
parser_train.add_argument('--hop_size', type=int, required=True)
parser_train.add_argument('--mel_bins', type=int, required=True)
parser_train.add_argument('--fmin', type=int, required=True)
parser_train.add_argument('--fmax', type=int, required=True)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--pretrained_checkpoint_path', type=str)
parser_train.add_argument('--freeze_base', action='store_true', default=False)
parser_train.add_argument('--cuda', action='store_true', default=False)
# Parse arguments
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | 3,979 | 30.587302 | 88 | py |
GL-AT | GL-AT-master/pytorch/models.py | import os
import sys
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from pytorch_utils import do_mixup, interpolate, pad_framewise_output
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
feature_map = x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class Cnn10_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(Cnn10_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = Cnn10(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth')['model'])
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
def _resnet_conv3x3(in_planes, out_planes):
#3x3 convolution with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, groups=1, bias=False, dilation=1)
def _resnet_conv1x1(in_planes, out_planes):
#1x1 convolution
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
class _ResnetBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(_ResnetBasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('_ResnetBasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in _ResnetBasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.stride = stride
self.conv1 = _resnet_conv3x3(inplanes, planes)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = _resnet_conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn1)
init_layer(self.conv2)
init_bn(self.bn2)
nn.init.constant_(self.bn2.weight, 0)
def forward(self, x):
identity = x
if self.stride == 2:
out = F.avg_pool2d(x, kernel_size=(2, 2))
else:
out = x
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = F.dropout(out, p=0.1, training=self.training)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class _ResnetBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(_ResnetBottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.stride = stride
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = _resnet_conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = _resnet_conv3x3(width, width)
self.bn2 = norm_layer(width)
self.conv3 = _resnet_conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn1)
init_layer(self.conv2)
init_bn(self.bn2)
init_layer(self.conv3)
init_bn(self.bn3)
nn.init.constant_(self.bn3.weight, 0)
def forward(self, x):
identity = x
if self.stride == 2:
x = F.avg_pool2d(x, kernel_size=(2, 2))
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = F.dropout(out, p=0.1, training=self.training)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class _ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(_ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1:
downsample = nn.Sequential(
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[0])
init_bn(downsample[1])
elif stride == 2:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2),
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[1])
init_bn(downsample[2])
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet38(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(ResNet38, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
# self.conv_block2 = ConvBlock(in_channels=64, out_channels=64)
self.resnet = _ResNet(block=_ResnetBasicBlock, layers=[3, 4, 6, 3], zero_init_residual=True)
self.conv_block_after1 = ConvBlock(in_channels=512, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = self.resnet(x)
x = F.avg_pool2d(x, kernel_size=(2, 2))
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = self.conv_block_after1(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = torch.mean(x, dim=3)
feature_map=x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class ResNet38_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(ResNet38_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = ResNet38(sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/ResNet38_mAP=0.434.pth')['model'])
self.fc1 = nn.Linear(2048, 2048)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
class ConvPreWavBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvPreWavBlock, self).__init__()
self.conv1 = nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.conv2 = nn.Conv1d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=3, stride=1, dilation=2,
padding=2, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
x = F.max_pool1d(x, kernel_size=pool_size)
return x
class Wavegram_Logmel_Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Wavegram_Logmel_Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.pre_conv0 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=11, stride=5, padding=5, bias=False)
self.pre_bn0 = nn.BatchNorm1d(64)
self.pre_block1 = ConvPreWavBlock(64, 64)
self.pre_block2 = ConvPreWavBlock(64, 128)
self.pre_block3 = ConvPreWavBlock(128, 128)
self.pre_block4 = ConvBlock(in_channels=4, out_channels=64)
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=128, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_layer(self.pre_conv0)
init_bn(self.pre_bn0)
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
a1 = F.relu_(self.pre_bn0(self.pre_conv0(input[:, None, :])))
a1 = self.pre_block1(a1, pool_size=4)
a1 = self.pre_block2(a1, pool_size=4)
a1 = self.pre_block3(a1, pool_size=4)
a1 = a1.reshape((a1.shape[0], -1, 32, a1.shape[-1])).transpose(2, 3)
a1 = self.pre_block4(a1, pool_size=(2, 1))
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# if self.training:
# x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
a1 = do_mixup(a1, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = torch.cat((x, a1), dim=1)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
feature_map = x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class Wavegram_Logmel_Cnn14_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(Wavegram_Logmel_Cnn14_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = Wavegram_Logmel_Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Wavegram_Logmel_Cnn14_mAP=0.439.pth')['model'])
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
| 33,592 | 38.708038 | 144 | py |
GL-AT | GL-AT-master/pytorch/pytorch_utils.py | import numpy as np
import time
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, model_G, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
time1 = time.time()
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
# print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
model_G.eval()
batch_output_G = model_G(batch_waveform)
batch_output = model(batch_waveform,batch_output_G['clipwise_output'],batch_output_G['feature_map'])
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'prob',
batch_output['prob'].data.cpu().numpy())
if 'segmentwise_output' in batch_output.keys():
append_to_dict(output_dict, 'segmentwise_output',
batch_output['segmentwise_output'].data.cpu().numpy())
if 'framewise_output' in batch_output.keys():
append_to_dict(output_dict, 'framewise_output',
batch_output['framewise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
if n % 1000 == 0:
print(' --- Inference time: {:.3f} s / 1000 iterations ---'.format(
time.time() - time1))
time1 = time.time()
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops | 8,446 | 32.387352 | 127 | py |
gbm-bench | gbm-bench-master/datasets.py | # MIT License
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import os
from enum import Enum
import pickle
from urllib.request import urlretrieve
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
import pandas as pd
import tqdm
pbar = None
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = tqdm.tqdm(total=total_size / 1024, unit='kB')
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(block_size / 1024)
else:
pbar.close()
pbar = None
def retrieve(url, filename=None):
return urlretrieve(url, filename, reporthook=show_progress)
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data: # pylint: disable=too-few-public-methods,too-many-arguments
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None,
qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_dataset(dataset_folder, dataset, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
prepare_function = globals()["prepare_" + dataset]
return prepare_function(dataset_folder, nrows)
def __prepare_airline(dataset_folder, nrows, regression=False): # pylint: disable=too-many-locals
url = 'http://kt.ijs.si/elena_ikonomovska/datasets/airline/airline_14col.data.bz2'
pkl_base_name = "airline"
if regression:
pkl_base_name += "-regression"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
pkl_base_name
+ ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
cols = [
"Year", "Month", "DayofMonth", "DayofWeek", "CRSDepTime",
"CRSArrTime", "UniqueCarrier", "FlightNum", "ActualElapsedTime",
"Origin", "Dest", "Distance", "Diverted", "ArrDelay"
]
# load the data as int16
dtype = np.int16
dtype_columns = {
"Year": dtype, "Month": dtype, "DayofMonth": dtype, "DayofWeek": dtype,
"CRSDepTime": dtype, "CRSArrTime": dtype, "FlightNum": dtype,
"ActualElapsedTime": dtype, "Distance":
dtype,
"Diverted": dtype, "ArrDelay": dtype,
}
df = pd.read_csv(local_url,
names=cols, dtype=dtype_columns, nrows=nrows)
# Encode categoricals as numeric
for col in df.select_dtypes(['object']).columns:
df[col] = df[col].astype("category").cat.codes
# Turn into binary classification problem
if not regression:
df["ArrDelay"] = 1 * (df["ArrDelay"] > 0)
X = df[df.columns.difference(["ArrDelay"])].to_numpy(dtype=np.float32)
y = df["ArrDelay"].to_numpy(dtype=np.float32)
del df
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
if regression:
task = LearningTask.REGRESSION
else:
task = LearningTask.CLASSIFICATION
data = Data(X_train, X_test, y_train, y_test, task)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_airline(dataset_folder, nrows):
return __prepare_airline(dataset_folder, nrows, False)
def prepare_airline_regression(dataset_folder, nrows):
return __prepare_airline(dataset_folder, nrows, True)
def prepare_bosch(dataset_folder, nrows):
filename = "train_numeric.csv.zip"
local_url = os.path.join(dataset_folder, filename)
pickle_url = os.path.join(dataset_folder,
"bosch" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
os.system("kaggle competitions download -c bosch-production-line-performance -f " +
filename + " -p " + dataset_folder)
X = pd.read_csv(local_url, index_col=0, compression='zip', dtype=np.float32,
nrows=nrows)
y = X.iloc[:, -1].to_numpy(dtype=np.float32)
X.drop(X.columns[-1], axis=1, inplace=True)
X = X.to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_fraud(dataset_folder, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
filename = "creditcard.csv"
local_url = os.path.join(dataset_folder, filename)
pickle_url = os.path.join(dataset_folder,
"creditcard" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
os.system("kaggle datasets download mlg-ulb/creditcardfraud -f" +
filename + " -p " + dataset_folder)
df = pd.read_csv(local_url + ".zip", dtype=np.float32, nrows=nrows)
X = df[[col for col in df.columns if col.startswith('V')]].to_numpy(dtype=np.float32)
y = df['Class'].to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_higgs(dataset_folder, nrows):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
"higgs" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
higgs = pd.read_csv(local_url, nrows=nrows)
X = higgs.iloc[:, 1:].to_numpy(dtype=np.float32)
y = higgs.iloc[:, 0].to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_year(dataset_folder, nrows):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt' \
'.zip'
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
"year" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
year = pd.read_csv(local_url, nrows=nrows, header=None)
X = year.iloc[:, 1:].to_numpy(dtype=np.float32)
y = year.iloc[:, 0].to_numpy(dtype=np.float32)
if nrows is None:
# this dataset requires a specific train/test split,
# with the specified number of rows at the start belonging to the train set,
# and the rest being the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False,
train_size=463715,
test_size=51630)
else:
print(
"Warning: nrows is specified, not using predefined test/train split for "
"YearPredictionMSD.")
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.REGRESSION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_epsilon(dataset_folder, nrows):
url_train = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.bz2'
url_test = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.t.bz2'
pickle_url = os.path.join(dataset_folder,
"epsilon" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
local_url_train = os.path.join(dataset_folder, os.path.basename(url_train))
local_url_test = os.path.join(dataset_folder, os.path.basename(url_test))
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url_train):
retrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
retrieve(url_test, local_url_test)
X_train, y_train = datasets.load_svmlight_file(local_url_train,
dtype=np.float32)
X_test, y_test = datasets.load_svmlight_file(local_url_test,
dtype=np.float32)
X_train = X_train.toarray()
X_test = X_test.toarray()
y_train[y_train <= 0] = 0
y_test[y_test <= 0] = 0
if nrows is not None:
print("Warning: nrows is specified, not using predefined test/train split for epsilon.")
X_train = np.vstack((X_train, X_test))
y_train = np.append(y_train, y_test)
X_train = X_train[:nrows]
y_train = y_train[:nrows]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_covtype(dataset_folder, nrows): # pylint: disable=unused-argument
X, y = datasets.fetch_covtype(return_X_y=True) # pylint: disable=unexpected-keyword-arg
if nrows is not None:
X = X[0:nrows]
y = y[0:nrows]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
return Data(X_train, X_test, y_train, y_test, LearningTask.MULTICLASS_CLASSIFICATION)
def prepare_newsgroups(dataset_folder, nrows): # pylint: disable=unused-argument
X, y = datasets.fetch_20newsgroups_vectorized(subset='all',return_X_y=True) # pylint: disable=unexpected-keyword-arg
if nrows is not None:
X = X[0:nrows]
y = y[0:nrows]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
return Data(X_train, X_test, y_train, y_test, LearningTask.MULTICLASS_CLASSIFICATION) | 12,989 | 40.238095 | 121 | py |
gbm-bench | gbm-bench-master/json2csv.py | #!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json
import os
import csv
TIMINGS = ["train_time", "test_time"]
METRICS = ["AUC", "Accuracy", "F1", "Precision", "Recall", "MeanAbsError", "MeanSquaredError",
"MedianAbsError"]
ALLMETRICS = TIMINGS + METRICS
def load_perf_data(json_file):
file = open(json_file, "r")
data = json.load(file)
file.close()
return data
def load_all_perf_data(files):
data = {}
for json_file in files:
dataset = os.path.basename(json_file)
dataset = dataset.replace(".json", "")
data[dataset] = load_perf_data(json_file)
return data
def get_all_datasets(data):
return data.keys()
def get_all_algos(data):
algos = {}
for dset in data.keys():
for algo in data[dset].keys():
algos[algo] = 1
return algos.keys()
def read_from_dict(hashmap, key, def_val="-na-"):
return hashmap[key] if key in hashmap else def_val
def combine_perf_data(data, datasets, algos):
all_data = {}
for dataset in datasets:
out = []
dset = read_from_dict(data, dataset, {})
for algo in algos:
algo_data = read_from_dict(dset, algo, {})
perf = [algo]
for timing in TIMINGS:
perf.append(read_from_dict(algo_data, timing))
metric_data = read_from_dict(algo_data, "accuracy", {})
for metric in METRICS:
perf.append(read_from_dict(metric_data, metric))
out.append(perf)
all_data[dataset] = out
return all_data
def write_csv(all_data, datasets):
writer = csv.writer(sys.stdout)
header = ['dataset', 'algorithm'] + ALLMETRICS
writer.writerow(header)
for dataset in sorted(datasets):
for row in all_data[dataset]:
writer.writerow([dataset] + row)
def main():
data = load_perf_data(sys.argv[1])
datasets = get_all_datasets(data)
algos = get_all_algos(data)
table = combine_perf_data(data, datasets, algos)
write_csv(table, datasets)
if __name__ == '__main__':
main()
| 3,610 | 32.435185 | 94 | py |
gbm-bench | gbm-bench-master/metrics.py | # BSD License
#
# Copyright (c) 2016-present, Miguel Gonzalez-Fierro. All rights reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Miguel Gonzalez-Fierro nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import sklearn.metrics as sklm
from datasets import LearningTask
def get_metrics(data, pred):
if data.learning_task == LearningTask.REGRESSION:
return regression_metrics(data.y_test, pred)
if data.learning_task == LearningTask.CLASSIFICATION:
return classification_metrics(data.y_test, pred)
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
return classification_metrics_multilabel(data.y_test, pred)
raise ValueError("No metrics defined for learning task: " + str(data.learning_task))
def evaluate_metrics(y_true, y_pred, metrics):
res = {}
for metric_name, metric in metrics.items():
res[metric_name] = float(metric(y_true, y_pred))
return res
def classification_metrics(y_true, y_prob, threshold=0.5):
y_pred = np.where(y_prob > threshold, 1, 0)
metrics = {
"Accuracy": sklm.accuracy_score,
"Log_Loss": lambda real, pred: sklm.log_loss(real, y_prob, eps=1e-5),
# yes, I'm using y_prob here!
"AUC": lambda real, pred: sklm.roc_auc_score(real, y_prob),
"Precision": sklm.precision_score,
"Recall": sklm.recall_score,
}
return evaluate_metrics(y_true, y_pred, metrics)
def classification_metrics_multilabel(y_true, y_pred):
metrics = {
"Accuracy": sklm.accuracy_score,
"Precision": lambda real, pred: sklm.precision_score(real, pred,
average="weighted"),
"Recall": lambda real, pred: sklm.recall_score(real, pred,
average="weighted"),
"F1": lambda real, pred: sklm.f1_score(real, pred,
average="weighted"),
}
return evaluate_metrics(y_true, y_pred, metrics)
def regression_metrics(y_true, y_pred):
metrics = {
"MeanAbsError": sklm.mean_absolute_error,
"MeanSquaredError": sklm.mean_squared_error,
"MedianAbsError": sklm.median_absolute_error,
}
return evaluate_metrics(y_true, y_pred, metrics)
| 3,749 | 42.604651 | 93 | py |
gbm-bench | gbm-bench-master/algorithms.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABC, abstractmethod
import time
import pandas as pd
import numpy as np
import dask.dataframe as dd
import dask.array as da
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import xgboost as xgb
try:
import catboost as cat
except ImportError:
cat = None
try:
import lightgbm as lgb
except (ImportError, OSError):
lgb = None
try:
import dask_xgboost as dxgb
except ImportError:
dxgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier as skhgb
except ImportError:
skhgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor as skhgb_r
except ImportError:
skhgb_r = None
try:
from sklearn.ensemble import GradientBoostingClassifier as skgb
except ImportError:
skgb = None
try:
from sklearn.ensemble import GradientBoostingRegressor as skgb_r
except ImportError:
skgb_r = None
try:
from sklearn.ensemble import RandomForestClassifier as skrf
except ImportError:
skrf = None
try:
from sklearn.ensemble import RandomForestRegressor as skrf_r
except ImportError:
skrf_r = None
try:
from cuml.ensemble import RandomForestClassifier as cumlrf
except ImportError:
cumlrf = None
try:
from cuml.ensemble import RandomForestRegressor as cumlrf_r
except ImportError:
cumlrf_r = None
from datasets import LearningTask
class Timer:
def __init__(self):
self.start = None
self.end = None
self.interval = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
class Algorithm(ABC):
@staticmethod
def create(name): # pylint: disable=too-many-return-statements
if name == 'xgb-gpu':
return XgbGPUHistAlgorithm()
if name == 'xgb-gpu-dask':
return XgbGPUHistDaskAlgorithm()
if name == 'xgb-gpu-dask-old':
return XgbGPUHistDaskOldAlgorithm()
if name == 'xgb-cpu':
return XgbCPUHistAlgorithm()
if name == 'lgbm-cpu':
return LgbmCPUAlgorithm()
if name == 'lgbm-gpu':
return LgbmGPUAlgorithm()
if name == 'cat-cpu':
return CatCPUAlgorithm()
if name == 'cat-gpu':
return CatGPUAlgorithm()
if name == 'skhgb':
return SkHistAlgorithm()
if name == 'skgb':
return SkGradientAlgorithm()
if name == 'skrf':
return SkRandomForestAlgorithm()
if name == 'cumlrf':
return CumlRfAlgorithm()
raise ValueError("Unknown algorithm: " + name)
def __init__(self):
self.model = None
@abstractmethod
def fit(self, data, args):
pass
@abstractmethod
def test(self, data):
pass
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
pass
# learning parameters shared by all algorithms, using the xgboost convention
shared_params = {"max_depth": 8, "learning_rate": 0.1,
"reg_lambda": 1}
class CumlRfAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = cumlrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = cumlrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "reg:squarederror"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary:logistic"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multi:softmax"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = xgb.DMatrix(data.X_train, data.y_train)
params = self.configure(data, args)
with Timer() as t:
self.model = xgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "gpu_id": 0})
return params
class SkRandomForestAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkGradientAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkHistAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skhgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skhgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist"})
del params['nthread'] # This is handled by dask
return params
def get_slices(self, n_slices, X, y):
n_rows_worker = int(np.ceil(len(y) / n_slices))
indices = []
count = 0
for _ in range(0, n_slices - 1):
indices.append(min(count + n_rows_worker, len(y)))
count += n_rows_worker
return np.split(X, indices), np.split(y, indices)
def fit(self, data, args):
params = self.configure(data, args)
n_workers = None if args.gpus < 0 else args.gpus
cluster = LocalCUDACluster(n_workers=n_workers,
local_directory=args.root)
client = Client(cluster)
n_partitions = len(client.scheduler_info()['workers'])
X_sliced, y_sliced = self.get_slices(n_partitions,
data.X_train, data.y_train)
X = da.concatenate([da.from_array(sub_array) for sub_array in X_sliced])
X = X.rechunk((X_sliced[0].shape[0], data.X_train.shape[1]))
y = da.concatenate([da.from_array(sub_array) for sub_array in y_sliced])
y = y.rechunk(X.chunksize[0])
dtrain = xgb.dask.DaskDMatrix(client, X, y)
with Timer() as t:
output = xgb.dask.train(client, params, dtrain, num_boost_round=args.ntrees)
self.model = output['booster']
client.close()
cluster.close()
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
self.model.set_param({'predictor': 'gpu_predictor'})
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskOldAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskOldAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "nthread": 1})
return params
def fit(self, data, args):
params = self.configure(data, args)
cluster = LocalCUDACluster(n_workers=None if args.gpus < 0 else args.gpus,
local_directory=args.root)
client = Client(cluster)
partition_size = 1000
if isinstance(data.X_train, np.ndarray):
X = dd.from_array(data.X_train, partition_size)
y = dd.from_array(data.y_train, partition_size)
else:
X = dd.from_pandas(data.X_train, partition_size)
y = dd.from_pandas(data.y_train, partition_size)
X.columns = [str(i) for i in range(0, X.shape[1])]
with Timer() as t:
self.model = dxgb.train(client, params, X, y, num_boost_round=args.ntrees)
client.close()
return t.interval
def test(self, data):
if isinstance(data.X_test, np.ndarray):
data.X_test = pd.DataFrame(data=data.X_test, columns=np.arange(0,
data.X_test.shape[1]),
index=np.arange(0, data.X_test.shape[0]))
data.X_test.columns = [str(i) for i in range(0, data.X_test.shape[1])]
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbCPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbCPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "hist"})
return params
class LgbmAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({"max_leaves": 256,
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "regression"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multiclass"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = lgb.Dataset(data.X_train, data.y_train,
free_raw_data=False)
params = self.configure(data, args)
with Timer() as t:
self.model = lgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(data.X_test)
return np.argmax(prob, axis=1)
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
self.model.free_dataset()
del self.model
class LgbmCPUAlgorithm(LgbmAlgorithm):
pass
class LgbmGPUAlgorithm(LgbmAlgorithm):
def configure(self, data, args):
params = super(LgbmGPUAlgorithm, self).configure(data, args)
params.update({"device": "gpu"})
return params
class CatAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"thread_count": args.cpus})
if args.gpus >= 0:
params["devices"] = "0-" + str(args.gpus)
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "RMSE"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "Logloss"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "MultiClassOneVsAll"
params["classes_count"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = cat.Pool(data.X_train, data.y_train)
params = self.configure(data, args)
params["iterations"] = args.ntrees
self.model = cat.CatBoost(params)
with Timer() as t:
self.model.fit(dtrain)
return t.interval
def test(self, data):
dtest = cat.Pool(data.X_test)
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(dtest)
return np.argmax(prob, axis=1)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class CatCPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatCPUAlgorithm, self).configure(data, args)
params.update({"task_type": "CPU"})
return params
class CatGPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatGPUAlgorithm, self).configure(data, args)
params.update({"task_type": "GPU"})
return params
| 17,038 | 34.204545 | 97 | py |
gbm-bench | gbm-bench-master/runme.py | #!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import argparse
import json
import ast
import psutil
import algorithms
from metrics import get_metrics
from datasets import prepare_dataset
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus
def print_sys_info(args):
try:
import xgboost # pylint: disable=import-outside-toplevel
print("Xgboost : %s" % xgboost.__version__)
except ImportError:
pass
try:
import lightgbm # pylint: disable=import-outside-toplevel
print("LightGBM: %s" % lightgbm.__version__)
except (ImportError, OSError):
pass
try:
import catboost # pylint: disable=import-outside-toplevel
print("Catboost: %s" % catboost.__version__)
except ImportError:
pass
print("System : %s" % sys.version)
print("#jobs : %d" % args.cpus)
def parse_args():
parser = argparse.ArgumentParser(
description="Benchmark xgboost/lightgbm/catboost on real datasets")
parser.add_argument("-dataset", default="all", type=str,
help="The dataset to be used for benchmarking. 'all' for all datasets.")
parser.add_argument("-root", default="/opt/gbm-datasets",
type=str, help="The root datasets folder")
parser.add_argument("-algorithm", default="all", type=str,
help=("Comma-separated list of algorithms to run; "
"'all' run all"))
parser.add_argument("-gpus", default=-1, type=int,
help=("#GPUs to use for the benchmarks; "
"ignored when not supported. Default is to use all."))
parser.add_argument("-cpus", default=0, type=int,
help=("#CPUs to use for the benchmarks; "
"0 means psutil.cpu_count(logical=False)"))
parser.add_argument("-output", default=sys.path[0] + "/results.json", type=str,
help="Output json file with runtime/accuracy stats")
parser.add_argument("-ntrees", default=500, type=int,
help=("Number of trees. Default is as specified in "
"the respective dataset configuration"))
parser.add_argument("-nrows", default=None, type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."))
parser.add_argument("-warmup", action="store_true",
help=("Whether to run a small benchmark (fraud) as a warmup"))
parser.add_argument("-verbose", action="store_true", help="Produce verbose output")
parser.add_argument("-extra", default='{}', help="Extra arguments as a python dictionary")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "%s.json" % args.dataset
return args
# benchmarks a single dataset
def benchmark(args, dataset_folder, dataset):
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
# "all" runs all algorithms
if args.algorithm == "all":
args.algorithm = "xgb-gpu,xgb-cpu,xgb-gpu-dask,lgbm-cpu,lgbm-gpu,cat-cpu,cat-gpu"
for alg in args.algorithm.split(","):
print("Running '%s' ..." % alg)
runner = algorithms.Algorithm.create(alg)
with runner:
train_time = runner.fit(data, args)
pred = runner.test(data)
results[alg] = {
"train_time": train_time,
"accuracy": get_metrics(data, pred),
}
return results
def main():
args = parse_args()
args.cpus = get_number_processors(args)
args.extra = ast.literal_eval(args.extra)
print_sys_info(args)
if args.warmup:
benchmark(args, os.path.join(args.root, "fraud"), "fraud")
if args.dataset == 'all':
args.dataset = 'airline,bosch,fraud,higgs,year,epsilon,covtype,newsgroups'
results = {}
for dataset in args.dataset.split(","):
folder = os.path.join(args.root, dataset)
results.update({dataset: benchmark(args, folder, dataset)})
print(json.dumps({dataset: results[dataset]}, indent=2, sort_keys=True))
output = json.dumps(results, indent=2, sort_keys=True)
output_file = open(args.output, "w")
output_file.write(output + "\n")
output_file.close()
print("Results written to file '%s'" % args.output)
if __name__ == "__main__":
main()
| 6,334 | 42.095238 | 97 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/__init__.py | 0 | 0 | 0 | py |
|
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/loaders.py | import os
import pandas as pd
import arff
import numpy as np
from functools import reduce
import sqlite3
import logging
from libs.planet_kaggle import (to_multi_label_dict, get_file_count, enrich_with_feature_encoding,
featurise_images, generate_validation_files)
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_FRAUD_PATH = 'fraud_detection', 'credit_card_fraud_kaggle', 'creditcard.csv'
_IOT_PATH = 'iot', 'sensor_stream_berkeley', 'sensor.arff'
_AIRLINE_PATH = 'airline', 'airline_14col.data'
_FOOTBALL_PATH = 'football', 'database.sqlite'
_BCI_PATH = 'bci', 'data.npz'
_HIGGS_PATH = 'higgs', 'HIGGS.csv'
_KAGGLE_ROOT = 'planet'
_PLANET_KAGGLE_LABEL_CSV = 'train_v2.csv'
_PLANET_KAGGLE_TRAIN_DIR = 'train-jpg'
_PLANET_KAGGLE_VAL_DIR = 'validate-jpg'
def _get_datapath():
try:
datapath = os.environ['MOUNT_POINT']
except KeyError:
logger.info("MOUNT_POINT not found in environment. Defaulting to /fileshare")
datapath = '/fileshare'
return datapath
def load_fraud():
""" Loads the credit card fraud data
The datasets contains transactions made by credit cards in September 2013 by european cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation.
Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about
the data.
Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed
with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first
transaction in the dataset.
The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning.
Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve
(AUPRC).
Confusion matrix accuracy is not meaningful for unbalanced classification.
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group
(http://mlg.ulb.ac.be) of ULB (Universite Libre de Bruxelles) on big data mining and fraud detection. More details
on current and past projects on related topics are available on http://mlg.ulb.ac.be/BruFence
and http://mlg.ulb.ac.be/ARTML
Please cite: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with
Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Returns
-------
pandas DataFrame
"""
return pd.read_csv(reduce(os.path.join, _FRAUD_PATH, _get_datapath()))
def load_iot():
""" Loads iot data
Sensor stream contains information (temperature, humidity, light, and sensor voltage) collected from 54 sensors deployed
in Intel Berkeley Research Lab. The whole stream contains consecutive information recorded over a 2 months
period (1 reading per 1-3 minutes). I used the sensor ID as the class label, so the learning task of the stream is
to correctly identify the sensor ID (1 out of 54 sensors) purely based on the sensor data and the corresponding recording
time.
While the data stream flow over time, so does the concepts underlying the stream. For example, the lighting during
the working hours is generally stronger than the night, and the temperature of specific sensors (conference room)
may regularly rise during the meetings.
Returns
-------
pandas DataFrame
"""
dataset = arff.load(open(reduce(os.path.join, _IOT_PATH, _get_datapath())))
columns = [i[0] for i in dataset['attributes']]
return pd.DataFrame(dataset['data'], columns=columns)
def load_airline():
""" Loads airline data
The dataset consists of a large amount of records, containing flight arrival and departure details for all the
commercial flights within the USA, from October 1987 to April 2008. Its size is around 116 million records and
5.76 GB of memory.
There are 13 attributes, each represented in a separate column: Year (1987-2008), Month (1-12), Day of Month (1-31),
Day of Week (1:Monday - 7:Sunday), CRS Departure Time (local time as hhmm), CRS Arrival Time (local time as hhmm),
Unique Carrier, Flight Number, Actual Elapsed Time (in min), Origin, Destination, Distance (in miles), and Diverted
(1=yes, 0=no).
The target attribute is Arrival Delay, it is a positive or negative value measured in minutes.
Link to the source: http://kt.ijs.si/elena_ikonomovska/data.html
Returns
-------
pandas DataFrame
"""
cols = ['Year', 'Month', 'DayofMonth', 'DayofWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'ActualElapsedTime', 'Origin', 'Dest', 'Distance', 'Diverted', 'ArrDelay']
return pd.read_csv(reduce(os.path.join, _AIRLINE_PATH, _get_datapath()), names=cols)
def load_football():
""" Loads football data
Dataset of football stats. +25,000 matches, +10,000 players from 11 European Countries with their lead championship
Seasons 2008 to 2016. It also contains players attributes sourced from EA Sports' FIFA video game series,
including the weekly updates, team line up with squad formation (X, Y coordinates), betting odds from up to 10
providers and detailed match events (goal types, possession, corner, cross, fouls, cards etc...) for +10,000 matches.
The meaning of the columns can be found here: http://www.football-data.co.uk/notes.txt
Number of attributes in each table (size of the dataframe):
countries (11, 2)
matches (25979, 115)
leagues (11, 3)
teams (299, 5)
players (183978, 42)
Link to the source: https://www.kaggle.com/hugomathien/soccer
Returns
-------
list of pandas DataFrame
"""
database_path = reduce(os.path.join, _FOOTBALL_PATH, _get_datapath())
with sqlite3.connect(database_path) as con:
countries = pd.read_sql_query("SELECT * from Country", con)
matches = pd.read_sql_query("SELECT * from Match", con)
leagues = pd.read_sql_query("SELECT * from League", con)
teams = pd.read_sql_query("SELECT * from Team", con)
players = pd.read_sql("SELECT * FROM Player_Attributes;", con)
return countries, matches, leagues, teams, players
def load_bci():
""" Loads BCI data
Contains measurements from 64 EEG sensors on the scalp of a single participant.
The purpose of the recording is to determine from the electrical brain activity when the participant is paying attention.
Returns
-------
A tuple containing four numpy arrays
train features
train labels
test features
test labels
"""
npzfile = np.load(reduce(os.path.join, _BCI_PATH, _get_datapath()))
return npzfile['train_X'], npzfile['train_y'], npzfile['test_X'], npzfile['test_y']
def load_higgs():
""" Loads HIGGS data
Dataset of atomic particles measurements. The total size of the data is 11 millions of observations.
It can be used in a classification problem to distinguish between a signal process which produces Higgs
bosons and a background process which does not.
The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic
properties measured by the particle detectors in the accelerator. The last seven features are functions of
the first 21 features; these are high-level features derived by physicists to help discriminate between the
two classes. The first column is the class label (1 for signal, 0 for background), followed by the 28
features (21 low-level features then 7 high-level features): lepton pT, lepton eta, lepton phi,
missing energy magnitude, missing energy phi, jet 1 pt, jet 1 eta, jet 1 phi, jet 1 b-tag, jet 2 pt, jet 2 eta,
jet 2 phi, jet 2 b-tag, jet 3 pt, jet 3 eta, jet 3 phi, jet 3 b-tag, jet 4 pt, jet 4 eta, jet 4 phi,
jet 4 b-tag, m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb.
Link to the source: https://archive.ics.uci.edu/ml/datasets/HIGGS
Returns
-------
pandas DataFrame
"""
cols = ['boson','lepton_pT','lepton_eta','lepton_phi','missing_energy_magnitude','missing_energy_phi','jet_1_pt','jet_1_eta','jet_1_phi','jet_1_b-tag','jet_2_pt','jet_2_eta','jet_2_phi','jet_2_b-tag','jet_3_pt','jet_3_eta','jet_3_phi','jet_3_b-tag','jet_4_pt','jet_4_eta','jet_4_phi','jet_4_b-tag','m_jj','m_jjj','m_lv','m_jlv','m_bb','m_wbb','m_wwbb']
return pd.read_csv(reduce(os.path.join, _HIGGS_PATH, _get_datapath()), names=cols)
def load_planet_kaggle():
""" Loads Planet Kaggle data
Dataset of satellite images of the Amazon. The objective of this dataset is to label satellite image chips
with atmospheric conditions and various classes of land cover/land use. Resulting algorithms will help the
global community better understand where, how, and why deforestation happens all over the world. The images
use the GeoTiff format and each contain four bands of data: red, green, blue, and near infrared.
To treat the images we used transfer learning with the CNN ResNet50. The images are featurized with this
deep neural network. Once the features are generated we can use a boosted tree to classify them.
Link to the source: https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/data
Returns
-------
A tuple containing four numpy arrays
train_features
y_train
validation_features
y_val
"""
csv_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_LABEL_CSV), _get_datapath())
train_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_TRAIN_DIR), _get_datapath())
val_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_VAL_DIR), _get_datapath())
assert os.path.isfile(csv_path)
assert os.path.exists(train_path)
if not os.path.exists(val_path): os.mkdir(val_path)
if not os.listdir(val_path):
logger.info('Validation folder is empty, moving files...')
generate_validation_files(train_path, val_path)
logger.info('Reading in labels')
labels_df = pd.read_csv(csv_path).pipe(enrich_with_feature_encoding)
multi_label_dict = to_multi_label_dict(labels_df)
nb_train_samples = get_file_count(os.path.join(train_path, '*.jpg'))
nb_validation_samples = get_file_count(os.path.join(val_path, '*.jpg'))
logger.debug('Number of training files {}'.format(nb_train_samples))
logger.debug('Number of validation files {}'.format(nb_validation_samples))
logger.debug('Loading model')
model = ResNet50(include_top=False)
train_features, train_names = featurise_images(model,
train_path,
'train_{}',
range(nb_train_samples),
desc='Featurising training images')
validation_features, validation_names = featurise_images(model,
val_path,
'train_{}',
range(nb_train_samples, nb_train_samples+nb_validation_samples),
desc='Featurising validation images')
# Prepare data
y_train = np.array([multi_label_dict[name] for name in train_names])
y_val = np.array([multi_label_dict[name] for name in validation_names])
return train_features, y_train, validation_features, y_val
| 12,263 | 48.853659 | 356 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/timer.py | #code based on https://github.com/miguelgfierro/codebase/
from timeit import default_timer
class Timer(object):
"""Timer class.
Examples:
>>> big_num = 100000
>>> t = Timer()
>>> t.start()
>>> for i in range(big_num):
>>> r = 1
>>> t.stop()
>>> print(t.interval)
0.0946876304844
>>> with Timer() as t:
>>> for i in range(big_num):
>>> r = 1
>>> print(t.interval)
0.0766928562442
>>> try:
>>> with Timer() as t:
>>> for i in range(big_num):
>>> r = 1
>>> raise(Exception("Get out!"))
>>> finally:
>>> print(t.interval)
0.0757778924471
"""
def __init__(self):
self._timer = default_timer
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
"""Start the timer."""
self.start = self._timer()
def stop(self):
"""Stop the timer. Calculate the interval in seconds."""
self.end = self._timer()
self.interval = self.end - self.start
| 1,215 | 23.32 | 64 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/conversion.py | import pandas as pd
def _get_nominal_integer_dict(nominal_vals):
"""Convert nominal values in integers, starting at 0.
Parameters:
nominal_vals (pd.Series): A series.
Returns:
d (dict): An dictionary with numeric values.
"""
d = {}
for val in nominal_vals:
if val not in d:
current_max = max(d.values()) if len(d) > 0 else -1
d[val] = current_max+1
return d
def _convert_to_integer(srs, d):
"""Convert series to integer, given a dictionary.
Parameters:
srs (pd.Series): A series.
d (dict): A dictionary mapping values to integers
Returns:
srs (pd.Series): An series with numeric values.
"""
return srs.map(lambda x: d[x])
def convert_cols_categorical_to_numeric(df, col_list=None):
"""Convert categorical columns to numeric and leave numeric columns
as they are. You can force to convert a numerical column if it is
included in col_list
Parameters:
df (pd.DataFrame): Dataframe.
col_list (list): List of columns.
Returns:
ret (pd.DataFrame): An dataframe with numeric values.
Examples:
>>> df = pd.DataFrame({'letters':['a','b','c'],'numbers':[1,2,3]})
>>> df_numeric = convert_cols_categorical_to_numeric(df)
>>> print(df_numeric)
letters numbers
0 0 1
1 1 2
2 2 3
"""
if col_list is None: col_list = []
ret = pd.DataFrame()
for column_name in df.columns:
column = df[column_name]
if column.dtype == 'object' or column_name in col_list:
col_dict = _get_nominal_integer_dict(column)
ret[column_name] = _convert_to_integer(column, col_dict)
else:
ret[column_name] = column
return ret
def convert_related_cols_categorical_to_numeric(df, col_list):
"""Convert categorical columns, that are related between each other,
to numeric and leave numeric columns
as they are.
Parameters:
df (pd.DataFrame): Dataframe.
col_list (list): List of columns.
Returns:
ret (pd.DataFrame): An dataframe with numeric values.
Examples:
>>> df = pd.DataFrame({'letters':['a','b','c'],'letters2':['c','d','e'],'numbers':[1,2,3]})
>>> df_numeric = convert_related_cols_categorical_to_numeric(df, col_list=['letters','letters2'])
>>> print(df_numeric)
letters letters2 numbers
0 0 2 1
1 1 3 2
2 2 4 3
"""
ret = pd.DataFrame()
values=None
for c in col_list:
values = pd.concat([values,df[c]], axis=0)
values = pd.Series(values.unique())
col_dict = _get_nominal_integer_dict(values)
for column_name in df.columns:
column = df[column_name]
if column_name in col_list:
ret[column_name] = _convert_to_integer(column, col_dict)
else:
ret[column_name] = column
return ret
| 3,069 | 30.979167 | 105 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/utils.py | import os
import multiprocessing
def get_number_processors():
try:
num = os.cpu_count()
except:
num = multiprocessing.cpu_count()
return num
| 174 | 13.583333 | 41 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/metrics.py | #Original source: https://github.com/miguelgfierro/codebase/blob/master/python/machine_learning/metrics.py
import numpy as np
from sklearn.metrics import roc_auc_score,accuracy_score, precision_score, recall_score, f1_score
def classification_metrics_binary(y_true, y_pred):
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred)
m_precision = precision_score(y_true, y_pred)
m_recall = recall_score(y_true, y_pred)
report = {'Accuracy':m_acc, 'Precision':m_precision, 'Recall':m_recall, 'F1':m_f1}
return report
def classification_metrics_binary_prob(y_true, y_prob):
m_auc = roc_auc_score(y_true, y_prob)
report = {'AUC':m_auc}
return report
def classification_metrics_multilabel(y_true, y_pred, labels):
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred, labels, average='weighted')
m_precision = precision_score(y_true, y_pred, labels, average='weighted')
m_recall = recall_score(y_true, y_pred, labels, average='weighted')
report = {'Accuracy':m_acc, 'Precision':m_precision, 'Recall':m_recall, 'F1':m_f1}
return report
def binarize_prediction(y, threshold=0.5):
y_pred = np.where(y > threshold, 1, 0)
return y_pred
| 1,233 | 36.393939 | 106 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/__init__.py | 0 | 0 | 0 | py |
|
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/football.py | #code from https://www.kaggle.com/airback/match-outcome-prediction-in-football
import numpy as np
import pandas as pd
def get_fifa_stats(match, player_stats):
''' Aggregates fifa stats for a given match. '''
#Define variables
match_id = match.match_api_id
date = match['date']
players = ['home_player_1', 'home_player_2', 'home_player_3', "home_player_4", "home_player_5",
"home_player_6", "home_player_7", "home_player_8", "home_player_9", "home_player_10",
"home_player_11", "away_player_1", "away_player_2", "away_player_3", "away_player_4",
"away_player_5", "away_player_6", "away_player_7", "away_player_8", "away_player_9",
"away_player_10", "away_player_11"]
player_stats_new = pd.DataFrame()
names = []
#Loop through all players
for player in players:
#Get player ID
player_id = match[player]
#Get player stats
stats = player_stats[player_stats.player_api_id == player_id]
#Identify current stats
current_stats = stats[stats.date < date].sort_values(by = 'date', ascending = False)[:1]
if np.isnan(player_id) == True:
overall_rating = pd.Series(0)
else:
current_stats.reset_index(inplace = True, drop = True)
overall_rating = pd.Series(current_stats.loc[0, "overall_rating"])
#Rename stat
name = "{}_overall_rating".format(player)
names.append(name)
#Aggregate stats
player_stats_new = pd.concat([player_stats_new, overall_rating], axis = 1)
player_stats_new.columns = names
player_stats_new['match_api_id'] = match_id
player_stats_new.reset_index(inplace = True, drop = True)
#Return player stats
return player_stats_new.ix[0]
def get_fifa_data(matches, player_stats):
''' Gets fifa data for all matches. '''
#Apply get_fifa_stats for each match
fifa_data = matches.apply(lambda x :get_fifa_stats(x, player_stats), axis = 1)
return fifa_data
def get_match_label(match):
''' Derives a label for a given match. '''
#Define variables
home_goals = match['home_team_goal']
away_goals = match['away_team_goal']
label = pd.DataFrame()
label.loc[0,'match_api_id'] = match['match_api_id']
#Identify match label
if home_goals > away_goals:
label.loc[0,'label'] = "Win"
if home_goals == away_goals:
label.loc[0,'label'] = "Draw"
if home_goals < away_goals:
label.loc[0,'label'] = "Defeat"
#Return label
return label.loc[0]
def get_overall_fifa_rankings(fifa, get_overall = False):
''' Get overall fifa rankings from fifa data. '''
temp_data = fifa
#Check if only overall player stats are desired
if get_overall == True:
#Get overall stats
data = temp_data.loc[:,(fifa.columns.str.contains('overall_rating'))]
data.loc[:,'match_api_id'] = temp_data.loc[:,'match_api_id']
else:
#Get all stats except for stat date
cols = fifa.loc[:,(fifa.columns.str.contains('date_stat'))]
temp_data = fifa.drop(cols.columns, axis = 1)
data = temp_data
#Return data
return data
def get_last_matches(matches, date, team, x = 10):
''' Get the last x matches of a given team. '''
#Filter team matches from matches
team_matches = matches[(matches['home_team_api_id'] == team) | (matches['away_team_api_id'] == team)]
#Filter x last matches from team matches
last_matches = team_matches[team_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:x,:]
#Return last matches
return last_matches
def get_last_matches_against_eachother(matches, date, home_team, away_team, x = 10):
''' Get the last x matches of two given teams. '''
#Find matches of both teams
home_matches = matches[(matches['home_team_api_id'] == home_team) & (matches['away_team_api_id'] == away_team)]
away_matches = matches[(matches['home_team_api_id'] == away_team) & (matches['away_team_api_id'] == home_team)]
total_matches = pd.concat([home_matches, away_matches])
#Get last x matches
try:
last_matches = total_matches[total_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:x,:]
except:
last_matches = total_matches[total_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:total_matches.shape[0],:]
#Check for error in data
if(last_matches.shape[0] > x):
print("Error in obtaining matches")
#Return data
return last_matches
def get_goals(matches, team):
''' Get the goals of a specfic team from a set of matches. '''
#Find home and away goals
home_goals = int(matches.home_team_goal[matches.home_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.away_team_api_id == team].sum())
total_goals = home_goals + away_goals
#Return total goals
return total_goals
def get_goals_conceided(matches, team):
''' Get the goals conceided of a specfic team from a set of matches. '''
#Find home and away goals
home_goals = int(matches.home_team_goal[matches.away_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.home_team_api_id == team].sum())
total_goals = home_goals + away_goals
#Return total goals
return total_goals
def get_wins(matches, team):
''' Get the number of wins of a specfic team from a set of matches. '''
#Find home and away wins
home_wins = int(matches.home_team_goal[(matches.home_team_api_id == team) & (matches.home_team_goal > matches.away_team_goal)].count())
away_wins = int(matches.away_team_goal[(matches.away_team_api_id == team) & (matches.away_team_goal > matches.home_team_goal)].count())
total_wins = home_wins + away_wins
#Return total wins
return total_wins
def get_match_features(match, matches, x = 10):
''' Create match specific features for a given match. '''
#Define variables
date = match.date
home_team = match.home_team_api_id
away_team = match.away_team_api_id
#Get last x matches of home and away team
matches_home_team = get_last_matches(matches, date, home_team, x = 10)
matches_away_team = get_last_matches(matches, date, away_team, x = 10)
#Get last x matches of both teams against each other
last_matches_against = get_last_matches_against_eachother(matches, date, home_team, away_team, x = 3)
#Create goal variables
home_goals = get_goals(matches_home_team, home_team)
away_goals = get_goals(matches_away_team, away_team)
home_goals_conceided = get_goals_conceided(matches_home_team, home_team)
away_goals_conceided = get_goals_conceided(matches_away_team, away_team)
#Define result data frame
result = pd.DataFrame()
#Define ID features
result.loc[0, 'match_api_id'] = match.match_api_id
result.loc[0, 'league_id'] = match.league_id
#Create match features
result.loc[0, 'home_team_goals_difference'] = home_goals - home_goals_conceided
result.loc[0, 'away_team_goals_difference'] = away_goals - away_goals_conceided
result.loc[0, 'games_won_home_team'] = get_wins(matches_home_team, home_team)
result.loc[0, 'games_won_away_team'] = get_wins(matches_away_team, away_team)
result.loc[0, 'games_against_won'] = get_wins(last_matches_against, home_team)
result.loc[0, 'games_against_lost'] = get_wins(last_matches_against, away_team)
#Add season
result.loc[0, 'season'] = int(match['season'].split('/')[0])
#Return match features
return result.loc[0]
def create_feables(matches, fifa, bookkeepers, get_overall = False, horizontal = True, x = 10, all_leagues = True, verbose = True):
''' Create and aggregate features and labels for all matches. '''
#Get fifa stats features
fifa_stats = get_overall_fifa_rankings(fifa, get_overall)
if verbose == True:
print("Generating match features...")
#Get match features for all matches
match_stats = matches.apply(lambda x: get_match_features(x, matches, x = 10), axis = 1)
#Create dummies for league ID feature
if all_leagues:
dummies = pd.get_dummies(match_stats['league_id']).rename(columns = lambda x: 'League_' + str(x))
match_stats = pd.concat([match_stats, dummies], axis = 1)
match_stats.drop(['league_id'], inplace = True, axis = 1)
if verbose == True:
print("Generating match labels...")
#Create match labels
labels = matches.apply(get_match_label, axis = 1)
if verbose == True:
print("Generating bookkeeper data...")
#Get bookkeeper quotas for all matches
bk_data = get_bookkeeper_data(matches, bookkeepers, horizontal = True)
bk_data.loc[:,'match_api_id'] = matches.loc[:,'match_api_id']
#Merges features and labels into one frame
features = pd.merge(match_stats, fifa_stats, on = 'match_api_id', how = 'left')
features = pd.merge(features, bk_data, on = 'match_api_id', how = 'left')
feables = pd.merge(features, labels, on = 'match_api_id', how = 'left')
#Drop NA values
feables.dropna(inplace = True)
#Return preprocessed data
return feables
def convert_odds_to_prob(match_odds):
''' Converts bookkeeper odds to probabilities. '''
#Define variables
match_id = match_odds.loc[:,'match_api_id']
bookkeeper = match_odds.loc[:,'bookkeeper']
win_odd = match_odds.loc[:,'Win']
draw_odd = match_odds.loc[:,'Draw']
loss_odd = match_odds.loc[:,'Defeat']
#Converts odds to prob
win_prob = 1 / win_odd
draw_prob = 1 / draw_odd
loss_prob = 1 / loss_odd
total_prob = win_prob + draw_prob + loss_prob
probs = pd.DataFrame()
#Define output format and scale probs by sum over all probs
probs.loc[:,'match_api_id'] = match_id
probs.loc[:,'bookkeeper'] = bookkeeper
probs.loc[:,'Win'] = win_prob / total_prob
probs.loc[:,'Draw'] = draw_prob / total_prob
probs.loc[:,'Defeat'] = loss_prob / total_prob
#Return probs and meta data
return probs
def get_bookkeeper_data(matches, bookkeepers, horizontal = True):
''' Aggregates bookkeeper data for all matches and bookkeepers. '''
bk_data = pd.DataFrame()
#Loop through bookkeepers
for bookkeeper in bookkeepers:
#Find columns containing data of bookkeeper
temp_data = matches.loc[:,(matches.columns.str.contains(bookkeeper))]
temp_data.loc[:, 'bookkeeper'] = str(bookkeeper)
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
#Rename odds columns and convert to numeric
cols = temp_data.columns.values
cols[:3] = ['Win','Draw','Defeat']
temp_data.columns = cols
temp_data.loc[:,'Win'] = pd.to_numeric(temp_data['Win'])
temp_data.loc[:,'Draw'] = pd.to_numeric(temp_data['Draw'])
temp_data.loc[:,'Defeat'] = pd.to_numeric(temp_data['Defeat'])
#Check if data should be aggregated horizontally
if(horizontal == True):
#Convert data to probs
temp_data = convert_odds_to_prob(temp_data)
temp_data.drop('match_api_id', axis = 1, inplace = True)
temp_data.drop('bookkeeper', axis = 1, inplace = True)
#Rename columns with bookkeeper names
win_name = bookkeeper + "_" + "Win"
draw_name = bookkeeper + "_" + "Draw"
defeat_name = bookkeeper + "_" + "Defeat"
temp_data.columns.values[:3] = [win_name, draw_name, defeat_name]
#Aggregate data
bk_data = pd.concat([bk_data, temp_data], axis = 1)
else:
#Aggregate vertically
bk_data = bk_data.append(temp_data, ignore_index = True)
#If horizontal add match api id to data
if(horizontal == True):
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
#Return bookkeeper data
return bk_data
def get_bookkeeper_probs(matches, bookkeepers, horizontal = False):
''' Get bookkeeper data and convert to probabilities for vertical aggregation. '''
#Get bookkeeper data
data = get_bookkeeper_data(matches, bookkeepers, horizontal = False)
#Convert odds to probabilities
probs = convert_odds_to_prob(data)
#Return data
return probs
| 12,833 | 35.985591 | 140 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/planet_kaggle.py | import os
import numpy as np
import glob
from tqdm import tqdm
import shutil
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
def labels_from(labels_df):
""" Extracts the unique labels from the labels dataframe
"""
# Build list with unique labels
label_list = []
for tag_str in labels_df.tags.values:
labels = tag_str.split(' ')
for label in labels:
if label not in label_list:
label_list.append(label)
return label_list
def enrich_with_feature_encoding(labels_df):
# Add onehot features for every label
for label in labels_from(labels_df):
labels_df[label] = labels_df['tags'].apply(lambda x: 1 if label in x.split(' ') else 0)
return labels_df
def to_multi_label_dict(enriched_labels_df):
df = enriched_labels_df.set_index('image_name').drop('tags', axis=1)
return dict((filename, encoded_array) for filename, encoded_array in zip(df.index, df.values))
def get_file_count(folderpath):
""" Returns the number of files in a folder
"""
return len(glob.glob(folderpath))
def threshold_prediction(pred_y, threshold=0.5):# TODO: Needs to be tuned?
return pred_y > threshold
def read_images(filepath, filenames):
""" Read images in batches
"""
img_data = list()
for name in filenames:
img_path = os.path.join(filepath, name+'.jpg')
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
img_data.append(preprocess_input(x))
return np.concatenate(img_data)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def featurise_images(model, filepath, nameformat, num_iter, batch_size=32, desc=None):
""" Use DL model to featurise images
"""
features = list()
img_names = list()
num_list = list(num_iter)
num_batches = np.ceil(len(num_list)/batch_size)
for num_chunk in tqdm(chunks(num_list, batch_size), total=num_batches, desc=desc):
filenames = [nameformat.format(index) for index in num_chunk]
batch_images = read_images(filepath, filenames)
img_names.extend(filenames)
features.extend(model.predict_on_batch(batch_images).squeeze())
return np.array(features), img_names
def generate_validation_files(train_path, val_path, num_train = 35000):
""" Creates the validation files from the train files.
"""
num_train_ini = get_file_count(os.path.join(train_path, '*.jpg'))
assert num_train_ini > num_train
order = 'mv ' + train_path + '/train_{' + str(num_train) + '..' + str(num_train_ini) + '}.jpg ' + val_path
os.system(order)
| 2,761 | 30.033708 | 110 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/notebook_memory_management.py | #Source: https://github.com/ianozsvald/ipython_memory_usage
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import time
import memory_profiler
from IPython import get_ipython
import threading
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time() # will be set to current time later
keep_watching = True
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
"""Register memory profiling tools to IPython instance."""
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
"""Unregister memory profiling tools from IPython instance."""
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def pre_run_cell():
"""Capture current time before we execute the current command"""
global t1
t1 = time.time()
| 2,794 | 35.298701 | 106 | py |
gbm-bench | gbm-bench-master/3rdparty/codebase/python/machine_learning/metrics.py | from sklearn.metrics import (confusion_matrix, accuracy_score, roc_auc_score, f1_score, log_loss, precision_score,
recall_score, mean_squared_error, mean_absolute_error, r2_score)
import numpy as np
def classification_metrics_binary(y_true, y_pred):
"""Returns a report with different metrics for a binary classification problem.
- Accuracy: Number of correct predictions made as a ratio of all predictions. Useful when there are equal number
of observations in each class and all predictions and prediction errors are equally important.
- Confusion matrix: C_ij where observations are known to be in group i but predicted to be in group j. In binary
classification true negatives is C_00, false negatives is C_10, true positives is C_11 and false positives is C_01.
- Precision: Number of true positives divided by the number of true and false positives. It is the ability of the
classifier not to label as positive a sample that is negative.
- Recall: Number of true positives divided by the number of true positives and false negatives. It is the ability
of the classifier to find all the positive samples.
High Precision and low Recall will return few positive results but most of them will be correct.
High Recall and low Precision will return many positive results but most of them will be incorrect.
- F1 Score: 2*((precision*recall)/(precision+recall)). It measures the balance between precision and recall.
Args:
y_true (list or array): True labels.
y_pred (list or array): Predicted labels (binary).
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,0,0,1]
>>> y_pred = [0,1,0,1,1]
>>> result = classification_metrics_binary(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('Accuracy', 0.8), ('Confusion Matrix', array([[2, 1],
[0, 2]])), ('F1', 0.8), ('Precision', 0.6666666666666666), ('Recall', 1.0)])
"""
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred)
m_precision = precision_score(y_true, y_pred)
m_recall = recall_score(y_true, y_pred)
m_conf = confusion_matrix(y_true, y_pred)
report = {'Accuracy': m_acc, 'Precision': m_precision, 'Recall': m_recall, 'F1': m_f1, 'Confusion Matrix': m_conf}
return report
def classification_metrics_multilabel(y_true, y_pred, labels):
"""Returns a report with different metrics for a multilabel classification problem.
- Accuracy: Number of correct predictions made as a ratio of all predictions. Useful when there are equal number
of observations in each class and all predictions and prediction errors are equally important.
- Confusion matrix: C_ij where observations are known to be in group i but predicted to be in group j. In multilabel
classification true predictions are in the diagonal and false predictions outside the diagonal.
- Precision: Number of true positives divided by the number of true and false positives. It is the ability of the
classifier not to label as positive a sample that is negative.
- Recall: Number of true positives divided by the number of true positives and false negatives. It is the ability
of the classifier to find all the positive samples.
High Precision and low Recall will return few positive results but most of them will be correct.
High Recall and low Precision will return many positive results but most of them will be incorrect.
- F1 Score: 2*((precision*recall)/(precision+recall)). It measures the balance between precision and recall.
Args:
y_true (list or array): True labels.
y_pred (list or array): Predicted labels.
labels (list): Label index or name.
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,2,0,1]
>>> y_pred = [0,1,0,1,1]
>>> result = classification_metrics_multilabel(y_true, y_pred, [0,1,2])
>>> OrderedDict(sorted(result.items()))
OrderedDict([('Accuracy', 0.6), ('Confusion Matrix', array([[1, 1, 0],
[0, 2, 0],
[1, 0, 0]])), ('F1', 0.52), ('Precision', 0.4666666666666666), ('Recall', 0.6)])
"""
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred, labels, average='weighted')
m_precision = precision_score(y_true, y_pred, labels, average='weighted')
m_recall = recall_score(y_true, y_pred, labels, average='weighted')
m_conf = confusion_matrix(y_true, y_pred, labels)
report = {'Accuracy': m_acc, 'Precision': m_precision, 'Recall': m_recall, 'F1': m_f1, 'Confusion Matrix': m_conf}
return report
def classification_metrics_binary_prob(y_true, y_prob):
"""Returns a report with different metrics for a binary classification problem.
- AUC: The Area Under the Curve represents the ability to discriminate between positive and negative classes. An
area of 1 represent perfect scoring and an area of 0.5 means random guessing.
- Log loss: Also called logistic regression loss or cross-entropy loss. It quantifies the performance by
penalizing false classifications. Minimizing the Log Loss is equivalent to minimizing the squared error but using
probabilistic predictions. Log loss penalize heavily classifiers that are confident about incorrect classifications.
Args:
y_true (list or array): True labels.
y_prob (list or array): Predicted labels (probability).
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,0,0,1]
>>> y_prob = [0.2,0.7,0.4,0.3,0.2]
>>> result = classification_metrics_binary_prob(y_true, y_prob)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('AUC', 0.5833333333333333), ('Log loss', 0.6113513950783531)])
>>> y_prob = [0.2,0.7,0.4,0.3,0.3]
>>> result = classification_metrics_binary_prob(y_true, y_prob)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('AUC', 0.75), ('Log loss', 0.5302583734567203)])
"""
m_auc = roc_auc_score(y_true, y_prob)
m_logloss = log_loss(y_true, y_prob)
report = {'AUC': m_auc, 'Log loss': m_logloss}
return report
def regression_metrics(y_true, y_pred):
"""Returns a report with different metrics for a regression problem.
- Mean Squared Error: MSE is a risk metric corresponding to the expected value of the squared (quadratic) error.
It has the disadvantage of heavily weighting outliers.
- Mean Absolute Error: MAE is a risk metric corresponding to the expected value of the absolute error or L1 loss.
Not as sensitive to outliers.
- R Square: R2 is statistical measure of how close the data are to the fitted regression line. It's best possible
score is 1.0 and it can be negative (because the model can be arbitrarily worse). A score of 0 means that the
variables are not linearly correlated.
- Root Mean Squared Error: RMSE is the square root of MSE. It also gives a relatively high weight to large errors.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [5,1,0,7,1]
>>> y_pred = [6,0.7,0.4,10,20]
>>> result = regression_metrics(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('MAE', 4.74), ('MSE', 74.25), ('R2', -9.088315217391303), ('RMSE', 8.616843969807043)])
>>> y_true = [5,1,0,7,1]
>>> y_pred = [6,0.7,0.4,10,2]
>>> result = regression_metrics(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('MAE', 1.1400000000000001), ('MSE', 2.25), ('R2', 0.6942934782608696), ('RMSE', 1.5)])
"""
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
report = {'MSE': mse, 'MAE': mae, 'R2': r2, 'RMSE': np.sqrt(mse)}
return report
def precision_at_k(y_true, y_pred, k=None):
"""Precision at K.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): precision at k (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> precision_at_k(y_true, y_pred, k=3)
1.0
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> precision_at_k(y_true, y_pred, k=3)
0.3333333333333333
"""
predictions = y_pred[:k]
num_hit = len(set(predictions).intersection(set(y_true)))
return float(num_hit) / len(predictions)
def recall_at_k(y_true, y_pred, k=None):
"""Recall at K.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): recall at k (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> recall_at_k(y_true, y_pred, k=3)
0.6
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> recall_at_k(y_true, y_pred, k=3)
0.2
"""
predictions = y_pred[:k]
num_hit = len(set(predictions).intersection(set(y_true)))
return float(num_hit) / len(y_true)
def discounted_cumulative_gain(y_true, y_pred, k=None):
"""Discounted Cumulative Gain (DCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): DCG
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> discounted_cumulative_gain(y_true, y_pred, k=3)
5.130929753571458
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> discounted_cumulative_gain(y_true, y_pred, k=3)
6.0
"""
order = np.argsort(y_pred)[::-1]
y_true = np.take(y_true, order[:k])
return (y_true / np.log2(np.arange(y_true.shape[0]) + 2)).sum()
def exponential_discounted_cumulative_gain(y_true, y_pred, k=None):
"""Exponential Discounted Cumulative Gain (eDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): eDCG
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
19.130929753571458
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
32.0
"""
order = np.argsort(y_pred)[::-1]
y_true = np.take(y_true, order[:k])
return ((2 ** y_true - 1) / np.log2(np.arange(y_true.shape[0]) + 2)).sum()
def normalized_discounted_cumulative_gain(y_true, y_pred, k=None):
"""Normalized Discounted Cumulative Gain (nDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): nDCG (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> normalized_discounted_cumulative_gain(y_true, y_pred, k=3)
0.4599812921368268
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> normalized_discounted_cumulative_gain(y_true, y_pred, k=3)
0.537892328558952
"""
return discounted_cumulative_gain(y_true, y_pred, k) / discounted_cumulative_gain(y_true, y_true, k)
def normalized_exponential_discounted_cumulative_gain(y, y_pred, k=None):
"""Normalized Exponential Discounted Cumulative Gain (neDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): neDCG (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> normalized_exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
0.1292116839006246
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> normalized_exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
0.21950735175253772
"""
return exponential_discounted_cumulative_gain(y, y_pred, k)/exponential_discounted_cumulative_gain(y, y, k)
| 13,258 | 44.098639 | 120 | py |
dataqa | dataqa-master/subtract_continuum.py | #!/usr/bin/env python
"""
This script does continuum subtraction on line cubes.
- It creates a new fits file in: '/data*/apertif/<obs_id>/<beam>/line/cubes/HI_image_cube_contsub.fits'
Parameter
obs_id : int
Observation number which should be assessed
Example:
python subtract_continuum.py obs_id
"""
import os
import sys
import apercal.libs.lib as lib
import subprocess
import socket
import argparse
import time
# ---------------------------------------
# Create and parse argument list
# ---------------------------------------
parser = argparse.ArgumentParser(
description='Create mosaic image from pipeline continuum images')
# main argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number / Scan Number / TASK-ID')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal')
args = parser.parse_args()
# ---------------------------------------
# Check what host the user is on
# ---------------------------------------
host_name = socket.gethostname()
if host_name != "happili-01" and not args.trigger_mode:
print("WARNING: You are not working on happili-01.")
print("WARNING: The script will not process all beams")
print("Please switch to happili-01")
elif args.trigger_mode:
print(
"--> Running continuum QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
# ---------------------------------------
# Basic parameters
# ---------------------------------------
# get the observation number
data_dir = args.obs_id
print("Running continuum subtraction for {0:s}".format(data_dir))
start_time = time.time()
# ---------------------------------------
# subtract continuum from line cubes
# ---------------------------------------
def subract_cont(data, beams):
for i in beams:
if os.path.exists('/'+str(data)+'/apertif/'+str(data_dir)+'/'+str(i)+'/line/cubes/HI_image_cube.fits'):
print("Processing beam {0}".format(i))
fits = lib.miriad('fits')
fits.in_ = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube.fits'
fits.out = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube.mir'
fits.op = 'xyin'
fits.go()
contsub = lib.miriad('contsub')
contsub.in_ = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube.mir'
contsub.mode = 'poly,1'
contsub.contchan = '"(1,800)"'
contsub.out = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube_contsub.mir'
contsub.go()
fits.in_ = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube_contsub.mir'
fits.out = '/'+str(data)+'/apertif/'+str(data_dir) + \
'/'+str(i)+'/line/cubes/HI_image_cube_contsub.fits'
fits.op = 'xyout'
fits.go()
os.system('rm -rf /'+str(data)+'/apertif/'+str(data_dir) +
'/'+str(i)+'/line/cubes/HI_image_cube_contsub.mir')
os.system('rm -rf /'+str(data)+'/apertif/'+str(data_dir) +
'/'+str(i)+'/line/cubes/HI_image_cube.mir')
# else print('File not found:/'+str(data)+'/apertif/'+str(data_dir)+'/'+str(i)+'/line/cubes/HI_image_cube.fits')
# find beams
if os.path.exists('/data/apertif/'+str(data_dir)) or args.trigger_mode:
beams_1 = os.listdir('/data/apertif/'+str(data_dir))
subract_cont('data', beams_1)
if os.path.exists('/data2/apertif/'+str(data_dir)) and not args.trigger_mode:
beams_2 = os.listdir('/data2/apertif/'+str(data_dir))
subract_cont('data2', beams_2)
if os.path.exists('/data3/apertif/'+str(data_dir)) and not args.trigger_mode:
beams_3 = os.listdir('/data3/apertif/'+str(data_dir))
subract_cont('data3', beams_3)
if os.path.exists('/data4/apertif/'+str(data_dir)) and not args.trigger_mode:
beams_4 = os.listdir('/data4/apertif/'+str(data_dir))
subract_cont('data4', beams_4)
print("Finished continuum subtraction ({0:.0f}s)".format(
time.time() - start_time))
| 4,482 | 34.023438 | 126 | py |
dataqa | dataqa-master/create_report.py | #!/usr/bin/python2.7
"""
Script to create an html overview
# NOTE:
In triggered QA crosscal and selfcal plots are distributed over notes.
Preflag plots are also distributed over the notes.
An option exists to combine the QA from different happilis if run on happili-01
You can specify the name of the target, fluxcal, polcal and OSA which will be saved
in a text file. If this information is available some pages will use it to display
further information.
"""
import os
import sys
from astropy.table import Table
import logging
import glob
import time
import argparse
import socket
from apercal.libs import lib
from report import html_report as hp
from report import html_report_dir as hpd
from report.merge_ccal_scal_plots import run_merge_plots
from report.pipeline_run_time import get_pipeline_run_time
from report.make_nptabel_summary import make_nptabel_csv
from line.cube_stats import combine_cube_stats
from continuum.continuum_tables import merge_continuum_image_properties_table
from cb_plots import make_cb_plots_for_report
from crosscal.dish_delay_plot import get_dish_delay_plots
from scandata import get_default_imagepath
def main():
start_time = time.time()
# Create and parse argument list
# ++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(
description='Create overview for QA')
# 1st argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number')
parser.add_argument("--target", type=str, default='',
help='Name of the target')
parser.add_argument("--fluxcal", type=str, default='',
help='Name of the flux calibrator')
parser.add_argument("--polcal", type=str, default='',
help='Name of the polarisation calibrator')
parser.add_argument("--osa", type=str, default='',
help='Name of the OSA')
parser.add_argument("-p", "--path", type=str,
help='Path to QA output')
parser.add_argument("-b", "--basedir", type=str,
help='Base directory where the obs id is')
parser.add_argument("--tank", action="store_true", default=False,
help='Create the report on new volume')
parser.add_argument("-a", "--add_osa_report", action="store_true", default=False,
help='Add only the osa report to the webpage')
parser.add_argument("-c", "--combine", action="store_true", default=False,
help='(Depracated) Set to create a combined report from all happilis on happili-01. It will overwrite the report on happili-01')
parser.add_argument("--no_merge", action="store_true", default=False,
help='Set to merge selfcal and crosscal plots')
parser.add_argument("--do_not_read_timing", action="store_true", default=False,
help='Set to avoid reading timing information. Makes only sense if script is run multiple times or for debugging')
parser.add_argument("--page_only", action="store_true", default=False,
help='Set only create the webpages themselves')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal.')
parser.add_argument("--single_node", action="store_true", default=False,
help='Set it to run QA on a single node and get same result as if running like the OSA. Note, this is different from trigger mode.')
args = parser.parse_args()
obs_id = args.obs_id
qa_dir = args.path
base_dir = args.basedir
do_combine = args.combine
add_osa_report = args.add_osa_report
# directory where the output will be of pybdsf will be stored
if qa_dir is None:
if base_dir is not None:
qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
else:
qa_dir = get_default_imagepath(obs_id)
# check that path exists
if not os.path.exists(qa_dir):
print(
"Directory {0:s} does not exist and will be created".format(qa_dir))
os.makedirs(qa_dir)
# change the base directory from /data to /tank
if args.tank and "/data" in qa_dir:
print("Switching to /tank")
qa_dir = qa_dir.replace("/data", "/tank")
# check the mode to run the validation
qa_report_dir = "{0:s}report".format(
qa_dir)
# check that this directory exists (just in case)
if not os.path.exists(qa_report_dir):
print("Directory {0:s} does not exist and will be created".format(
qa_report_dir))
os.makedirs(qa_report_dir)
lib.setup_logger(
'debug', logfile='{0:s}/create_report.log'.format(qa_report_dir))
logger = logging.getLogger(__name__)
# if osa report should be added, check it is available
if add_osa_report:
# name of the osa report for this observation
osa_report = os.path.join(
qa_report_dir, "OSA_Report/{}_OSA_report.ecsv".format(obs_id))
# check that the file is actually there
if not os.path.exists(osa_report):
logger.error("No OSA report found. Abort")
return -1
else:
osa_report = ''
# Saving observation information if they do not exist yet
# =======================================================
table_name = "{0}_obs.ecsv".format(obs_id)
table_name_with_path = os.path.join(qa_dir, table_name)
if not os.path.exists(table_name_with_path):
obs_info = Table([
[obs_id],
[args.target],
[args.fluxcal],
[''],
[args.polcal],
[''],
[args.osa]], names=(
'Obs_ID', 'Target', 'Flux_Calibrator', 'Flux_Calibrator_Obs_IDs', 'Pol_Calibrator', 'Pol_Calibrator_Obs_IDs', 'OSA'))
try:
obs_info.write(
table_name_with_path, format='ascii.ecsv', overwrite=True)
except Exception as e:
logger.warning("Saving observation information in {0} failed.".format(
table_name_with_path))
logger.exception(e)
else:
logger.info(
("Saving observation information in {0} ... Done.".format(table_name_with_path)))
else:
logger.info(
("Observation information already exists. Reading {0}.".format(table_name_with_path)))
obs_info = Table.read(table_name_with_path, format="ascii.ecsv")
# check on which happili we are:
host_name = socket.gethostname()
if args.trigger_mode:
logger.info(
"--> Running report QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
elif args.single_node:
logger.info(
"--> Running report QA in single-node mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
elif do_combine:
logger.info("Combining QAs from different happilis")
if host_name != "happili-01":
logger.warning("You are not working on happili-01.")
logger.warning("Cannot combine QA from different happilis")
do_combine = False
elif host_name != "happili-01" and not args.trigger_mode:
logger.warning("You are not working on happili-01.")
logger.warning("The script will not process all beams")
logger.warning("Please switch to happili-01")
apercal_log_file = "/data/apertif/{0:s}/apercal.log".format(
obs_id)
# logging.basicConfig(filename='{0:s}/create_report.log'.format(qa_dir), level=logging.DEBUG,
# format='%(asctime)s - %(levelname)s: %(message)s')
# getting timing measurment for apercal only in trigger mode
# if not add_osa_report and not args.do_not_read_timing:
if args.trigger_mode or args.single_node:
try:
get_pipeline_run_time(obs_id, trigger_mode=args.trigger_mode)
except Exception as e:
logger.exception(e)
# the subpages to be created
subpages = ['observing_log', 'summary', 'beamweights', 'inspection_plots', 'preflag', 'crosscal',
'selfcal', 'continuum', 'polarisation', 'line', 'mosaic', 'apercal_log']
logger.info("#### Create report directory structure")
# copy the js and css files
js_file_name = "{0:s}/report_fct.js".format(
hp.__file__.split("/html_report.py")[0])
css_file_name = "{0:s}/report_style.css".format(
hp.__file__.split("/html_report.py")[0])
# for copying osa_files:
osa_nb_file = "{0:s}/OSA_report.ipynb".format(
hp.__file__.split("/html_report.py")[0])
osa_py_file = "{0:s}/osa_functions.py".format(
hp.__file__.split("/html_report.py")[0])
osa_files = [osa_nb_file, osa_py_file]
# Check that directory of the qa exists
if not os.path.exists(qa_dir):
logger.error(
"Directory {0:s} does not exists. Abort".format(qa_report_dir))
return -1
else:
# do things that should only happen on happili-01 when the OSA runs this function
if not args.trigger_mode and not args.page_only:
if host_name == "happili-01" or args.single_node:
# go through some of the subpages and process numpy files
for page in subpages:
# exclude non-apercal modules (and mosaic)
if page != "apercal_log" or page != "inspection_plots" or page != "summary" or page != "mosaic":
# just run it on preflag for now
if page == "preflag" or page == "crosscal" or page == "convert" or page == "selfcal" or page == "continuum":
# get information from numpy files
try:
logger.info(
"## Getting summary table for {}".format(page))
make_nptabel_csv(
obs_id, page, qa_dir, output_path=os.path.join(qa_dir, page))
except Exception as e:
logger.warning(
"## Getting summary table for {} failed".format(page))
logger.exception(e)
else:
logger.info(
"## Getting summary table for {} ... Done".format(page))
# merge plots
if not args.no_merge and not args.single_node:
try:
logger.info(
"## Merging selfcal and crosscal plots")
run_merge_plots(
qa_dir, do_ccal=True, do_scal=True, run_parallel=True, n_cores=5)
except Exception as e:
logger.warning(
"## Merging selfcal and crosscal plots ... Failed")
logger.exception(e)
else:
logger.info(
"## Merging selfcal and crosscal plots ... Done")
# merge the continuum image properties
if page == 'continuum':
try:
merge_continuum_image_properties_table(
obs_id, qa_dir, single_node=args.single_node)
except Exception as e:
logger.warning(
"Merging continuum image properties ... Failed")
logger.exception(e)
else:
logger.info(
"Merging continuum image properties ... Done")
# get line statistics
if page == 'line':
try:
combine_cube_stats(
obs_id, qa_dir, single_node=args.single_node)
except Exception as e:
logger.warning(
"Getting cube statistics ... Failed")
logger.exception(e)
else:
logger.info(
"Getting cube statistics ... Done")
# create dish delay plot
try:
logger.info("Getting dish delay plot")
get_dish_delay_plots(
obs_id, obs_info['Flux_Calibrator'][0], basedir=args.basedir)
except Exception as e:
logger.warning("Getting dish delay plot ... Failed")
logger.exception(e)
else:
logger.info("Getting dish delay plot ... Done")
# create compound beam plots
try:
logger.info("Getting compound beam plots")
make_cb_plots_for_report(obs_id, qa_dir)
except Exception as e:
logger.warning("Getting compound beam plots ... Failed")
logger.exception(e)
else:
logger.info("Getting compound beam plots ... Done")
# Create directory structure for the report
if not add_osa_report:
logger.info("#### Creating directory structrure")
try:
hpd.create_report_dirs(
obs_id, qa_dir, subpages, css_file=css_file_name, js_file=js_file_name, trigger_mode=args.trigger_mode, single_node=args.single_node, do_combine=do_combine, obs_info=obs_info, osa_files=osa_files)
except Exception as e:
logger.error(e)
else:
logger.info("#### Creating directory structrure ... Done")
logger.info("#### Creating report")
try:
hp.create_main_html(qa_report_dir, obs_id, subpages,
css_file=css_file_name, js_file=js_file_name, obs_info=obs_info, osa_report=osa_report)
except Exception as e:
logger.error(e)
logger.info("#### Report. Done ({0:.0f}s)".format(
time.time()-start_time))
if __name__ == "__main__":
main()
| 14,735 | 40.982906 | 212 | py |
dataqa | dataqa-master/scandata.py | import os
import numpy as np
import logging
"""
Define object classes for holding data related to scans
The key thing to specify an object is the scan of the target field
Also need name of fluxcal (for cross-cal solutions)
Want to add functionality for pol-cal for pol solutions (secondary)
This specifies the location of all data, assuming setup of automatic pipeline
(/tank/apertif, distributed across happili nodes)
"""
def get_default_imagepath(scan, basedir=None):
"""
Get the default path for saving images
Args:
scan (int): scan (or task id), e.g. 190303084
basedir (str): based directory of the scan, default /data/apertif/
Returns:
str: Path for storing images
"""
if basedir is not None:
return os.path.join(basedir, '{scan}/qa/'.format(scan=scan))
else:
return '/data/apertif/{scan}/qa/'.format(scan=scan)
class ScanData(object):
def __init__(self, scan, sourcename, basedir=None, trigger_mode=False):
"""
Initialize with scan (taskid) and source name
and place holders for phase and amplitude
Args:
scan (int): scan number, e.g. 190303083
sourcename (str): name of source, e.g. "3C48"
"""
self.scan = scan
self.sourcename = sourcename
self.imagepathsuffix = ""
self.trigger_mode = trigger_mode
# check if fluxcal is given as 3CXXX.MS or 3CXXX
# Fix to not include .MS no matter what
if self.sourcename[0:2] != '3C':
logging.warning("Fluxcal doesnt' start with 3C - are you sure?")
elif self.sourcename[-2:] == 'MS':
self.sourcename = self.sourcename[:-3]
# also get a directory list and beamlist
self.dirlist = []
self.beamlist = []
# first check what happili node on
# if not happili-01, print a warning and only search locally
hostname = os.uname()[1]
paths = []
# in case it runs on triggered mode, it should only look into
# the apertif dir of this node
if self.trigger_mode:
logging.info(
"--> Running in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(hostname))
path = '/data/apertif/{}'.format(self.scan)
paths = [path]
elif hostname != 'happili-01' and not trigger_mode:
logging.info(
'Not on happili-01, only search local {} for data'.format(hostname))
if basedir is not None:
path = os.path.join(basedir, "{}".format(self.scan))
else:
path = '/data/apertif/{}'.format(self.scan)
paths = [path]
elif hostname == 'happili-01' and basedir is not None:
path = os.path.join(basedir, "{}".format(self.scan))
paths = [path]
else:
# On happili-01, so search all nodes
# ignoring happili-05 - may have to fix this eventually
logging.info(
"Running on {0:s}. Search for data from all nodes".format(hostname))
paths += ['/data/apertif/{}'.format(self.scan)]
paths += ['/data2/apertif/{}'.format(self.scan)]
paths += ['/data3/apertif/{}'.format(self.scan)]
paths += ['/data4/apertif/{}'.format(self.scan)]
for path in paths:
allfiles = os.listdir(path)
for f in allfiles:
full_path = os.path.join(path, f)
if os.path.isdir(full_path) and len(f) == 2 and unicode(f, 'utf-8').isnumeric():
self.dirlist.append(full_path)
# create a list of all directories with full path.
# This should be all beams - there should be no other directories
# f is a string, so add to beam list to also track info about beams
self.beamlist.append(f)
# Initialize phase & amp arrays - common to all types of
self.phase = np.empty(len(self.dirlist), dtype=np.ndarray)
self.amp = np.empty(len(self.dirlist), dtype=np.ndarray)
def get_default_imagepath(self, scan):
"""
Wrapper around get_default_imagepath, this can be overridden in scal, ccal with a suffix
"""
return os.path.join(get_default_imagepath(scan), self.imagepathsuffix)
def create_imagepath(self, imagepath):
"""
Create the image path. If imagepath is None, return a default one (and create it).
Args:
imagepath (str): path where images should be stored (e.g. "/data/dijkema/190303084" or None)
Returns:
str: image path that was created. Will be equal to input imagepath, or a generated path
"""
if not imagepath:
imagepath = self.get_default_imagepath(self.scan)
if not os.path.exists(imagepath):
logging.info("{} doesn't exist, creating".format(imagepath))
os.makedirs(imagepath)
return imagepath
| 5,043 | 39.677419 | 120 | py |
dataqa | dataqa-master/make_mosaic_image.py | #!/usr/bin/env python
"""
This script creates a mosaic file from apertif continuum images.
- This script takes the obs_id as an argument,
- looks for continuum images from the pipeline (fits format),
- copies the images into a temporary directory,
- creates a mosaic image in /data/apertif/obs_id/mosaic/
- and deletes the temporary directory.
Parameter
obs_id : int
Observation number which should be assessed
Example:
python make_mosaic_image.py obs_id
"""
import os
import sys
import apercal.libs.lib as lib
import subprocess
import socket
import argparse
#------------------------------------------
#data_dir = '190311152'
#myusername = os.environ['USER']
# Check what host the user is on
#-------------------------------------
host_name = socket.gethostname()
if host_name != "happili-01":
print("WARNING: You are not working on happili-01.")
print("WARNING: The script will not process all beams")
print("Please switch to happili-01")
# Create and parse argument list
#---------------------------------------
parser = argparse.ArgumentParser(
description='Create mosaic image from pipeline continuum images')
# main argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number / Scan Number / TASK-ID')
args = parser.parse_args()
# Basic parameters
#-----------------------------------------
# get the observation number
data_dir = args.obs_id
print(data_dir)
#-------------------------------------------
# search for continuum files in standard pipeline directories
# and copy the files into a temporary directory
# if there is no fits file in the continuum folder, there will be an error message, but the script will continue running
if not os.path.exists('/data/apertif/'+str(data_dir)+'/mosaic/'):
os.mkdir('/data/apertif/'+str(data_dir)+'/mosaic/')
if not os.path.exists('/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/'):
os.mkdir('/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/')
beams_1 = os.listdir('/data/apertif/'+str(data_dir))
beams_2 = os.listdir('/data2/apertif/'+str(data_dir))
beams_3 = os.listdir('/data3/apertif/'+str(data_dir))
beams_4 = os.listdir('/data4/apertif/'+str(data_dir))
#print(beams_1)
def copy_data(beams, n, data):
for i in beams:
if os.path.isdir('/'+str(data)+'/apertif/'+str(data_dir)+'/'+str(i)) == True and n in i:
os.system('cp -r /'+str(data)+'/apertif/'+str(data_dir)+'/'+str(i)+'/continuum/image_mf_*.fits /data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/image_mf_'+str(i)+'.fits')
copy_data(beams_1, '0', 'data')
copy_data(beams_2, '1', 'data2')
copy_data(beams_3, '2', 'data3')
copy_data(beams_4, '3', 'data4')
#--------------------------------------
# convert fits continuum images into miriad files
# (this may not be needed)
items = os.listdir('/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/')
def convert_fits():
for i in range(len(items)):
fits = lib.miriad('fits')
fits.in_ = '/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/'+str(items[i])
fits.out= '/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/'+str(items[i][:-5])+'.mir'
fits.op = 'xyin'
fits.go()
convert_fits()
#-----------------------------------
# create mosaic image with linmos
linmos = lib.miriad('linmos')
linmos.in_ = '/data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/image_mf_*.mir'
linmos.out = '/data/apertif/'+str(data_dir)+'/mosaic/'+str(data_dir)+'_mosaic_image'
linmos.go()
#-----------------------------------
# convert mosaic miriad image into fits file
fits = lib.miriad('fits')
fits.in_ = '/data/apertif/'+str(data_dir)+'/mosaic/'+str(data_dir)+'_mosaic_image'
fits.op = 'xyout'
fits.out = '/data/apertif/'+str(data_dir)+'/mosaic/'+str(data_dir)+'_mosaic_image.fits'
fits.go()
#-------------------------------------
#clen up temporary files
os.system('rm -rf /data/apertif/'+str(data_dir)+'/mosaic/cont_tmp/')
print("DONE") | 3,896 | 28.300752 | 173 | py |
dataqa | dataqa-master/run_cube_stats.py | import numpy as np
import sys
import os
import argparse
import glob
import socket
import time
import logging
from apercal.libs import lib
from dataqa.scandata import get_default_imagepath
from dataqa.line.cube_stats import get_cube_stats
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
if __name__ == '__main__':
start_time = time.time()
# Create and parse argument list
# ++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(description='Run line QA')
# main argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number / Scan Number / TASK-ID')
parser.add_argument("-b", "--basedir", type=str,
help='Data directory without taskid')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal')
args = parser.parse_args()
# get taskid/obs_id/scan
obs_id = args.obs_id
# get the QA directory for this observation
qa_dir = get_default_imagepath(obs_id, basedir=args.basedir)
# get the line QA directory for this observation
qa_line_dir = "{0:s}line".format(qa_dir)
if not os.path.exists(qa_line_dir):
print("Creating directory {0:s}".format(qa_line_dir))
os.mkdir(qa_line_dir)
# Create logging file
lib.setup_logger(
'debug', logfile='{0:s}/get_cube_stats.log'.format(qa_line_dir))
logger = logging.getLogger(__name__)
# check host name
host_name = socket.gethostname()
# get data directories depending on the host name
if args.trigger_mode:
logger.info(
"--> Running line QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id)]
elif host_name != "happili-01" and not args.trigger_mode:
logger.warning("You are not working on happili-01.")
logger.warning("The script will not process all beams")
logger.warning("Please switch to happili-01")
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id)]
else:
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id), '/data2/apertif/{0:s}'.format(
obs_id), '/data3/apertif/{0:s}'.format(obs_id), '/data4/apertif/{0:s}'.format(obs_id)]
# run the function to get the cube statistics
# +++++++++++++++++++++++++++++++++++++++++++
try:
get_cube_stats(qa_line_dir, data_base_dir_list)
except Exception as e:
logger.exception(e)
logger.info("Getting cube statistics. Done ({0:.0f}s)".format(
time.time()-start_time))
| 2,849 | 33.337349 | 125 | py |
dataqa | dataqa-master/run_merge_plots.py | from scandata import get_default_imagepath
import argparse
import time
import logging
import os
import glob
import socket
import numpy as np
from PIL import Image
from apercal.libs import lib
from time import time
import pymp
from report.merge_ccal_scal_plots import run_merge_plots
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate crosscal QA plots')
# 1st argument: File name
parser.add_argument("scan", help='Scan of target field')
parser.add_argument("--do_ccal", action="store_true", default=False,
help='Set to enable merging of only the crosscal plots')
parser.add_argument("--do_scal", action="store_true", default=False,
help='Set to enable merging of only the selfcal plots')
parser.add_argument("--run_parallel", action="store_true", default=False,
help='Set to run the script in parallel')
parser.add_argument('-b', '--basedir', default=None,
help='Data directory')
parser.add_argument('--n_cores', default=5,
help='Data directory')
args = parser.parse_args()
# get the QA directory
qa_dir = get_default_imagepath(args.scan, basedir=args.basedir)
# start logging
# Create logging file
lib.setup_logger(
'info', logfile=os.path.join(qa_dir, 'merge_plots.log'))
logger = logging.getLogger(__name__)
start_time = time()
logger.info("#### Merging plots ...")
try:
run_merge_plots(
qa_dir, do_ccal=args.do_ccal, do_scal=args.do_scal, run_parallel=args.run_parallel, n_cores=args.n_cores)
except Exception as e:
logger.warning("#### Merging plots ... Failed ({0:.0f}s)".format(
time()-start_time))
logger.exception(e)
else:
logger.info("#### Merging plots ... Done ({0:.0f}s)".format(
time()-start_time))
| 1,935 | 29.730159 | 117 | py |
dataqa | dataqa-master/run_qa.py | """
This script contains functionality to run all QA automatically after being triggered at the end of apercal
"""
import os
import time
import numpy as np
import logging
import socket
from apercal.libs import lib
from apercal.subs import calmodels as subs_calmodels
from astropy.table import Table
from dataqa.scandata import get_default_imagepath
def run_triggered_qa(targets, fluxcals, polcals, steps=None, basedir=None, osa=''):
"""Function to run all QA steps.
Function is called as
return_msg = run_triggered_qa(
tdict['target'], tdict['cal1'], tdict['cal2'])
With the first three variables defined (the same way as autocal) as
targets = (190505048, 'LH_WSRT', array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
fluxcals = [(190505017, '3C147_9', 9), (190505016, '3C147_8', 8), (190505015, '3C147_7', 7), (190505014, '3C147_6', 6), (190505013, '3C147_5', 5),
(190505012, '3C147_4', 4), (190505011, '3C147_3', 3), (190505010, '3C147_2', 2), (190505009, '3C147_1', 1), (190505008, '3C147_0', 0)]
polcals = [(190506001, '3C286_0', 0), (190506002, '3C286_1', 1), (190506003, '3C286_2', 2), (190506004, '3C286_3', 3), (190506005, '3C286_4', 4),
(190506006, '3C286_5', 5), (190506007, '3C286_6', 6), (190506008, '3C286_7', 7), (190506009, '3C286_8', 8), (190506010, '3C286_9', 9)]
If steps is not provided then all steps except mosaic will be performed:
steps = ['preflag', 'crosscal', 'selfcal',
'continuum', 'line', 'mosaic', 'report']
For all steps including mosaic:
steps = ['preflag', 'crosscal', 'selfcal',
'continuum', 'line', 'mosaic', 'report']
It is possible to select a certain step:
steps = ['mosaic']
test call can look like this:
from dataqa.run_qa import run_triggered_qa
run_triggered_qa((190505048, 'LH_WSRT', [0]), [(190505048, '3C147_10', 10)], [(190505048, '3C286_10', 10)], steps=['report'])
"""
# for time measurement
start_time = time.time()
# Process input parameters
# (same as in start_apercal_pipeline)
# ========================
(taskid_target, name_target, beamlist_target) = targets
if fluxcals:
name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()
else:
name_fluxcal = ''
if polcals:
name_polcal = str(polcals[0][1]).strip().split('_')[0].upper()
else:
name_polcal = ''
if steps is None:
# steps = ['preflag', 'crosscal', 'selfcal',
# 'continuum', 'line', 'mosaic', 'report']
# steps = ['inspection_plots', 'beamweights', 'preflag', 'crosscal', 'selfcal',
# 'continuum', 'line', 'report']
# due to isssue with beamweights script, this step is currently not performed
steps = ['inspection_plots', 'preflag', 'crosscal', 'selfcal',
'continuum', 'line', 'report']
# Set up
# ======
# Get host name
host_name = socket.gethostname()
# QA directory
if basedir is not None:
qa_dir = get_default_imagepath(taskid_target, basedir=basedir)
else:
qa_dir = get_default_imagepath(taskid_target)
basedir = "/data/apertif"
# check that path exists
if not os.path.exists(qa_dir):
print(
"Directory {0:s} does not exist and will be created".format(qa_dir))
try:
os.mkdir(qa_dir)
except Exception as e:
print(e)
# start log file
# logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
# filename='{0:s}{1:s}_triggered_qa.log'.format(qa_dir, host_name), level=logging.DEBUG)
lib.setup_logger(
'debug', logfile='{0:s}{1:s}_triggered_qa.log'.format(qa_dir, host_name))
logger = logging.getLogger(__name__)
logger.info("#######################")
logger.info("Input parameters:")
logger.info("target={0:s}".format(str(targets)))
logger.info("fluxcals={0:s}".format(str(fluxcals)))
logger.info("polcals={0:s}".format(str(polcals)))
logger.info("#######################")
logger.info('#######################')
logger.info('#### Running all QA steps on {0:s}'.format(host_name))
logger.info('#######################')
# If both fluxcal and polcal polarized, remove polcal
# (taken from start_pipeline)
if subs_calmodels.is_polarised(name_polcal) and subs_calmodels.is_polarised(name_fluxcal):
name_polcal = ""
if (fluxcals and fluxcals != '') and (polcals and polcals != ''):
assert(len(fluxcals) == len(polcals))
# avoid symmetry bias, if there is only a polcal but no fluxcal, switch them
if fluxcals is None and polcals is not None:
logger.info(
"Only polcal was provided. Setting polcal {} to fluxcal".format(name_polcal))
fluxcals, polcals = polcals, fluxcals
name_fluxcal, name_polcal = name_polcal, name_fluxcal
name_polcal = ""
# Exchange polcal and fluxcal if specified in the wrong order
# (taken from start_pipeline)
# (except for how the names are switched)
elif not subs_calmodels.is_polarised(name_polcal) and name_polcal != '':
if subs_calmodels.is_polarised(name_fluxcal):
logger.debug("Switching polcal and fluxcal because " + name_polcal +
" is not polarised")
fluxcals, polcals = polcals, fluxcals
name_fluxcal, name_polcal = name_polcal, name_fluxcal
#name_polcal = str(polcals[0][1]).strip()
else:
logger.debug("Setting polcal to '' since " +
name_polcal + " is not polarised")
name_polcal = ""
elif name_polcal != '':
logger.debug("Polcal " + name_polcal + " is polarised, all good")
logger.info("## Observation of target: {0:s}, flux calibrator: {1:s}, polarisation calibrator: {2:s}".format(
name_target, name_fluxcal, name_polcal))
# Write information about the observation into a txt file for later
# This information is important for the OSA report
# =================================================================
# flux calibrator tid list
flux_cal_tid_list = [cal[0] for cal in fluxcals]
# pol calibrator tid list
if name_polcal != '':
pol_cal_tid_list = [cal[0] for cal in polcals]
else:
pol_cal_tid_list = []
summary_table = Table([
[taskid_target],
[name_target],
[name_fluxcal],
[str(flux_cal_tid_list).replace("]", "").replace("[", "")],
[name_polcal],
[str(pol_cal_tid_list).replace("]", "").replace("[", "")],
[osa]], names=(
'Obs_ID', 'Target', 'Flux_Calibrator', 'Flux_Calibrator_Obs_IDs', 'Pol_Calibrator', 'Pol_Calibrator_Obs_IDs', 'OSA'))
table_name = "{0}_obs.ecsv".format(taskid_target)
table_name_with_path = os.path.join(qa_dir, table_name)
try:
summary_table.write(
table_name_with_path, format='ascii.ecsv', overwrite=True)
except Exception as e:
logger.warning("Saving observation information in {0} failed.".format(
table_name_with_path))
logger.exception(e)
else:
logger.info(
("Saving observation information in {0} ... Done.".format(table_name_with_path)))
# Inspection Plots
# ================
if 'inspection_plots' in steps:
start_time_inspection_plot = time.time()
# for the target it is enough to do it only for happili-01
# as they do not depend on the beam
# for the flux and pol calibrator, they have to be run on every node
# get inspection plots for target
if host_name == "happili-01":
logger.info(
"#### Inspection plot QA for {}...".format(name_target))
try:
inspection_plot_msg = os.system(
'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} --basedir={2}'.format(taskid_target, name_target, basedir))
logger.info(
"Getting inspection plots finished with msg {0}".format(inspection_plot_msg))
logger.info(
"#### Inspection plot QA {0}... Done ".format(name_target))
except Exception as e:
logger.warning(
"Inspection plot QA for {} failed.".format(name_target))
logger.exception(e)
# get inspection plot for flux calibrator
logger.info("#### Inspection plot QA for {}...".format(name_fluxcal))
for (taskid_cal, name_cal, beamnr_cal) in fluxcals:
try:
inspection_plot_msg = os.system(
'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} -c --beam={2:d} --cal_id={3:d} --basedir={4}'.format(taskid_target, name_fluxcal, beamnr_cal, taskid_cal, basedir))
logger.info(
"Getting inspection plots finished with msg {0}".format(inspection_plot_msg))
logger.info("#### Inspection plot QA for {0} beam {1} ... Done".format(
name_fluxcal, beamnr_cal))
except Exception as e:
logger.warning(
"Inspection plot QA for {} beam {1} failed.".format(name_fluxcal, beamnr_cal))
logger.exception(e)
# get inspection plot for pol calibrator if it exists
if name_polcal != '':
logger.info(
"#### Inspection plot QA for {}...".format(name_polcal))
for (taskid_cal, name_cal, beamnr_cal) in polcals:
try:
inspection_plot_msg = os.system(
'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} -c --beam={2:d} --cal_id={3:d} --basedir={4}'.format(taskid_target, name_polcal, beamnr_cal, taskid_cal, basedir))
logger.info(
"Getting inspection plots finished with msg {0}".format(inspection_plot_msg))
logger.info("#### Inspection plot QA for {0} beam {1} ... Done".format(
name_polcal, beamnr_cal))
except Exception as e:
logger.warning(
"Inspection plot QA for {} beam {1} failed.".format(name_polcal, beamnr_cal))
logger.exception(e)
logger.info("#### Inspection plot QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_inspection_plot))
else:
logger.warning("#### Did not perform inspection plot QA")
# Beamweights Plots
# =================
if 'beamweights' in steps:
start_time_beamweights = time.time()
# this needs to run on every node
logger.info(
"#### Beamweights QA for {}...".format(name_fluxcal))
try:
beamweights_msg = os.system(
'python /home/apercal/dataqa/run_beamweights_plots.py {0:d} {1:s} -t 20'.format(taskid_target, name_fluxcal))
logger.info(
"Getting Beamweightss finished with msg {0}".format(beamweights_msg))
except Exception as e:
logger.warning(
"Beamweights QA for {} failed.".format(name_fluxcal))
logger.exception(e)
else:
logger.info("#### Beamweights QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_beamweights))
else:
logger.warning("#### Did not perform Beamweights QA")
# Preflag QA
# ==========
if 'preflag' in steps and host_name == "happili-01":
logger.info("#### Running preflag QA ...")
start_time_preflag = time.time()
try:
preflag_msg = os.system(
'python /home/apercal/dataqa/run_preflag_qa.py {0:d} --basedir={1}'.format(taskid_target, basedir))
logger.info(
"Preflag QA finished with msg {0}".format(preflag_msg))
logger.info("#### Running preflag QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_preflag))
except Exception as e:
logger.warning("Preflag QA failed. Continue with next QA")
logger.exception(e)
# Disabled rfinder
# try:
# preflag_msg = os.system(
# 'python /home/apercal/dataqa/run_rfinder.py {0:d} {1:s} --trigger_mode'.format(taskid_target, name_fluxcal))
# logger.info(
# "Preflag QA finished with msg {0}".format(preflag_msg))
# logger.info("#### Running preflag QA ... Done (time {0:.1f}s)".format(
# time.time()-start_time_preflag))
# except Exception as e:
# logger.warning("Preflag QA failed. Continue with next QA")
# logger.exception(e)
else:
logger.warning("#### Did not perform preflag QA")
# Crosscal QA
# ===========
if 'crosscal' in steps and name_fluxcal != '':
logger.info('#### Running crosscal QA ...')
start_time_crosscal = time.time()
try:
crosscal_msg = os.system(
'python /home/apercal/dataqa/run_ccal_plots.py {0:d} "{1:s}" "{2:s}" --basedir={3} --trigger_mode'.format(taskid_target, name_fluxcal, name_polcal, basedir))
logger.info(
"Crosscal QA finished with msg {0}".format(crosscal_msg))
logger.info("#### Running crosscal QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_crosscal))
except Exception as e:
logger.warning("Crosscal QA failed. Continue with next QA")
logger.exception(e)
else:
logger.warning("#### Did not perform crosscal QA")
# Selfcal QA
# ==========
if 'selfcal' in steps:
logger.info('#### Running selfcal QA ...')
start_time_selfcal = time.time()
try:
selfcal_msg = os.system(
'python /home/apercal/dataqa/run_scal_plots.py {0:d} {1:s} --basedir={2} --trigger_mode'.format(taskid_target, name_target, basedir))
logger.info(
"Selfcal QA finished with msg {0}".format(selfcal_msg))
logger.info("#### Running selfcal QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_selfcal))
except Exception as e:
logger.warning("Selfcal QA failed. Continue with next QA")
logger.exception(e)
else:
logger.warning("#### Did not perform selfcal QA")
# Mosaic QA
# ==========
if 'mosaic' in steps and host_name == 'happili-01':
logger.info('#### Mosaic QA is currently not available ...')
# logger.info('#### Running mosaic QA ...')
# start_time_mosaic = time.time()
# try:
# # Create the mosaic
# logger.info('## Making the mosaic ...')
# start_time_make_mosaic = time.time()
# make_mosaic_msg = os.system(
# 'python /home/apercal/dataqa/make_mosaic_image.py {0:d}'.format(taskid_target))
# logger.info(
# "Making mosaic finished with msg {0}".format(make_mosaic_msg))
# logger.info("## Making the mosaic ... Done (time {0:.1f}s)".format(
# time.time()-start_time_make_mosaic))
# # Run the validation tool
# logger.info('## Run validation ...')
# start_time_mosaic_validation = time.time()
# mosaic_validation_msg = os.system(
# 'python /home/apercal/dataqa/run_continuum_validation.py {0:d} --for_mosaic'.format(taskid_target))
# logger.info(
# "Mosaic validation finished with msg {0}".format(mosaic_validation_msg))
# logger.info("## Run validation ... Done (time {0:.1f}s)".format(
# time.time()-start_time_mosaic_validation))
# logger.info("#### Running mosaic QA ... Done (time {0:.1f}s)".format(
# time.time()-start_time_mosaic))
# except Exception as e:
# logger.warning("Mosaic QA failed. Continue with next QA")
# logger.exception(e)
else:
logger.warning("#### Did not perform mosaic QA")
# Line QA
# =======
if 'line' in steps:
logger.info('#### Running line QA ...')
start_time_line = time.time()
try:
# Get cube statistic without continuum subtraction
logger.info(
'## Get cube statistic ...')
start_time_get_cube_stat = time.time()
cube_stat_msg = os.system(
'python /home/apercal/dataqa/run_cube_stats.py {0:d} --basedir={1} --trigger_mode'.format(taskid_target, basedir))
logger.info(
"Cube stat finished with msg {0}".format(cube_stat_msg))
logger.info("## Get cube statistic ... Done (time {0:.1f}s)".format(
time.time()-start_time_get_cube_stat))
# Subtract continuum
# logger.info('## Subtract continuum ...')
# start_time_subtract_continuum = time.time()
# subtract_cont_msg = os.system(
# 'python /home/apercal/dataqa/subtract_continuum.py {0:d} --trigger_mode'.format(taskid_target))
# logger.info(
# "Continuum subtraction finished with msg {0}".format(subtract_cont_msg))
# logger.info("## Subtract continuum ... Done (time {0:.1f}s)".format(
# time.time()-start_time_subtract_continuum))
# # Get cube statistic after continuum subtraction
# logger.info(
# '## Get cube statistic after continuum subtraction ...')
# start_time_get_cube_stat_cont = time.time()
# get_cube_stat_cont_msg = os.system(
# 'python /home/apercal/dataqa/run_cube_stats_cont.py {0:d} --trigger_mode'.format(taskid_target))
# logger.info(
# "Cube stat cont finished with msg {0}".format(get_cube_stat_cont_msg))
# logger.info("## Get cube statistic after continuum subtraction ... Done (time {0:.1f}s)".format(
# time.time()-start_time_get_cube_stat_cont))
logger.info("#### Running line QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_line))
except Exception as e:
logger.warning("Line QA failed. Continue with next QA")
logger.exception(e)
else:
logger.warning("#### Did not perform line QA")
# Continuum QA
# ============
if 'continuum' in steps:
logger.info('#### Running continuum QA ...')
start_time_continuum = time.time()
try:
continuum_msg = os.system(
'python /home/apercal/dataqa/run_continuum_validation.py {0:d} --basedir={1} --trigger_mode'.format(taskid_target, basedir))
logger.info(
"Continuum QA finished with msg {0}".format(continuum_msg))
logger.info("#### Running continuum QA ... Done (time {0:.1f}s)".format(
time.time()-start_time_continuum))
except Exception as e:
logger.warning("Continuum QA failed. Continue with next QA")
logger.exception(e)
else:
logger.warning("#### Did not perform continuum QA")
# Create report
# =============
if 'report' in steps:
# merge the crosscal and selfcal plots for the report
if host_name == 'happili-01':
logger.info('#### Merge crosscal and selfcal plots...')
start_time_merge = time.time()
try:
report_msg = os.system(
'python /home/apercal/dataqa/run_merge_plots.py {0:d} --basedir={1} --do_ccal --do_scal --run_parallel'.format(taskid_target, basedir))
logger.info(
"Merging finished with msg {0}".format(report_msg))
logger.info("#### Merge crosscal and selfcal plots ... Done (time {0:.1f}s)".format(
time.time()-start_time_merge))
except Exception as e:
logger.warning("Merge crosscal and selfcal plots failed.")
logger.exception(e)
# now create the report
logger.info('#### Create report ...')
start_time_report = time.time()
try:
report_msg = os.system(
'python /home/apercal/dataqa/create_report.py {0:d} --basedir={1} --trigger_mode'.format(taskid_target, basedir))
logger.info(
"Report finished with msg {0}".format(report_msg))
logger.info("#### Create report ... Done (time {0:.1f}s)".format(
time.time()-start_time_report))
except Exception as e:
logger.warning("Creating report failed.")
logger.exception(e)
else:
logger.warning("#### Did not create a report")
# Finish
# ======
logger.info('#######################')
logger.info(
'#### Running all QA steps on {0:s} ... Done (time {1:.1f}s)'.format(host_name, time.time()-start_time))
logger.info('#######################')
| 21,273 | 39.676864 | 202 | py |
dataqa | dataqa-master/run_continuum_validation.py | #!/usr/bin/env python
"""
This script contains functionality to run pybdsf on continuum data
for the QA
It does can run on a single image for the mosaic QA or an observation
number alone for the continuum QA. In the latter case, it will go through
all the beams.
There will be a log file either in
/home/<user>/qa_science_demo_2019/<obs_id>/continuum/<beam>/pybdsf
or
/home/<user>/qa_science_demo_2019/<obs_id>/mosaic/pybdsf
"""
import argparse
import numpy as np
import logging
# Need to do this before bdsf import, it sets the default matplotlib backend
import matplotlib
matplotlib.use('Agg')
import bdsf
import os
import time
import logging
import socket
from apercal.libs import lib
import sys
import glob
from dataqa.scandata import get_default_imagepath
from dataqa.continuum.qa_continuum import qa_continuum_run_validation
from dataqa.continuum.qa_continuum import qa_get_image_noise_dr_gaussianity
from dataqa.mosaic.qa_mosaic import qa_mosaic_run_validation
if __name__ == '__main__':
start_time = time.time()
# Create and parse argument list
# ++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(
description='Run validation for continuum or mosaic QA')
# main argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number / Scan Number / TASK-ID')
# Optional argument
parser.add_argument("--mosaic_name", type=str, default='',
help='Provide name of the moasic image. This will run the validation only on this image.')
# Optional argument
parser.add_argument("--for_mosaic", action="store_true", default=False,
help='Set to run for mosaic QA.')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal.')
# parser.add_argument("--overwrite", action="store_true", default=True,
# help='Overwrite existing files')
# parser.add_argument("--beam", type=list, default=[],
# help='Specify a single beam or a list of beams to run pybdsf')
parser.add_argument("-p", "--path", type=str, default=None,
help='Directory to store the output in')
parser.add_argument("-b", "--basedir", type=str, default=None,
help='Data directory without taskid')
# parser.add_argument("--n_processes", type=int, default=1,
# help='Number of cores to use for processing')
args = parser.parse_args()
# Check what host the user is on
# ++++++++++++++++++++++++++++++
host_name = socket.gethostname()
if host_name != "happili-01" and not args.trigger_mode:
print("INFO: You are not working on happili-01.")
print("INFO: The script will not process all beams")
print("Please switch to happili-01")
# Basic parameters
# ++++++++++++++++
# get the observation number
obs_id = args.obs_id
# users home directory
# home_dir = os.path.expanduser('~')
# the mode in which the script runs
# mosaic: run on a file
if args.for_mosaic or args.mosaic_name != '':
run_mode = 'mosaic'
else:
run_mode = 'continuum'
# directory where the output will be of pybdsf will be stored
if args.path is None:
qa_dir = get_default_imagepath(obs_id, basedir=args.basedir)
# check that path exists
if not os.path.exists(qa_dir):
print(
"Directory {0:s} does not exist and will be created".format(qa_dir))
os.mkdir(qa_dir)
else:
qa_dir = args.path
# check the mode to run the validation
if run_mode == 'continuum':
qa_validation_dir = "{0:s}continuum".format(
qa_dir)
else:
qa_validation_dir = "{0:s}mosaic".format(
qa_dir)
# check that this directory exists (just in case)
if not os.path.exists(qa_validation_dir):
print("Directory {0:s} does not exist and will be created".format(
qa_validation_dir))
os.mkdir(qa_validation_dir)
# # 'create another directory to store the pybdsf output
# qa_validation_dir = '{0:s}/validation'.format(qa_validation_dir)
# if not os.path.exists(qa_validation_dir):
# print("Directory {0:s} does not exist and will be created".format(
# qa_validation_dir))
# os.mkdir(qa_validation_dir)
# Run validation depending on the chosen mode
# +++++++++++++++++++++++++++++++++++++++++++
# Create logging file
lib.setup_logger(
'debug', logfile='{0:s}/{1:s}_{2:s}_validation.log'.format(qa_validation_dir, obs_id, run_mode))
logger = logging.getLogger(__name__)
# logging.basicConfig(filename='{0:s}/{1:d}_{2:s}_pybdsf.log'.format(qa_validation_dir, obs_id, run_mode), level=logging.DEBUG,
# format='%(asctime)s - %(levelname)s: %(message)s')
# logger = logging.getLogger(__name__)
# run through continuum mode
if run_mode == 'continuum':
# base directory for data
if args.trigger_mode:
logger.info(
"--> Running continuum QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
data_basedir_list = ['/data/apertif/{0:s}'.format(obs_id)]
elif host_name != "happili-01":
data_basedir_list = ['/data/apertif/{0:s}'.format(obs_id)]
else:
data_basedir_list = ['/data/apertif/{0:s}'.format(obs_id), '/data2/apertif/{0:s}'.format(obs_id),
'/data3/apertif/{0:s}'.format(obs_id), '/data4/apertif/{0:s}'.format(obs_id)]
# run the continuum validation (with pybdsf)
try:
qa_continuum_run_validation(data_basedir_list, qa_validation_dir)
except Exception as e:
logger.error(e)
logger.error("Running continuum validation was not successful")
# # that it ran through
# if validation_run_status == 1:
# logger.info("Finished pybdsf and validation tool successfully.")
# else:
# logger.error(
# "Did not finish pybdsf and validation tool successfully. Check logfile")
# run through mosaic mode
else:
if args.mosaic_name == '':
mosaic_name = "/data/apertif/{0:s}/mosaic/{0:s}_mosaic_image.fits".format(
obs_id)
else:
mosaic_name = args.mosaic_name
# check that the file name exists
if os.path.exists(mosaic_name):
logger.info("Found image file {0:s}".format(mosaic_name))
else:
logger.error(
"Image {0:s} not found. Abort".format(mosaic_name))
# run the validation tool with pybdsf
try:
qa_mosaic_run_validation(mosaic_name, qa_validation_dir)
except Exception as e:
logger.error(e)
logger.error("Running continuum validation was not successful")
# if validation_run_status == 1:
# logger.info("Finished pybdsf and validation tool successfully")
# else:
# logger.error(
# "Did not finish pybdsf and validation tool successfully. Check logfile")
# Get additional QA information
#qa_get_image_noise_dr_gaussianity(mosaic_name, qa_validation_dir)
logger.info("Running validation for {0:s} done. (time {1:.0f}s)".format(
obs_id, time.time()-start_time))
| 7,717 | 35.065421 | 134 | py |
dataqa | dataqa-master/run_inspection_plot.py | """
Script to automatically retrieve inspection plots from ALTA
for the QA
Requires a scan number
Optionally takes a directory for writing plots
"""
import os
import socket
import argparse
from timeit import default_timer as timer
from scandata import get_default_imagepath
from inspection_plots.inspection_plots import get_inspection_plots
import time
from apercal.libs import lib
import logging
def main():
start = timer()
parser = argparse.ArgumentParser(description='Generate selfcal QA plots')
# 1st argument: File name
parser.add_argument("obs_id", help='ID of observation of target field')
parser.add_argument(
"src_name", help='Name of the calibrator or target of the plots')
parser.add_argument("-c", "--calibrator", action="store_true", default=False,
help='Set if a calibrator is used. Also requires beam and cal_id')
parser.add_argument("--beam", type=int, default=None,
help='If src_name is a calibrator set the beam number')
parser.add_argument("--cal_id", type=str, default=None,
help='Obs ID of the calibrator')
parser.add_argument('-p', '--path', default=None,
help='Destination for images')
parser.add_argument('-b', '--basedir', default=None,
help='Directory of obs id')
# this mode will make the script look only for the beams processed by Apercal on a given node
# parser.add_argument("--trigger_mode", action="store_true", default=False,
# help='Set it to run Autocal triggering mode automatically after Apercal.')
args = parser.parse_args()
# If no path is given change to default QA path
if args.path is None:
if args.basedir is not None:
output_path = get_default_imagepath(
args.obs_id, basedir=args.basedir)
else:
output_path = get_default_imagepath(args.obs_id)
# check that selfcal qa directory exists
qa_plot_dir = os.path.join(output_path, "inspection_plots")
if not os.path.exists(qa_plot_dir):
os.mkdir(qa_plot_dir)
else:
qa_plot_dir = args.path
# create a directory with the src_name to put
if args.src_name is not None:
qa_plot_dir = os.path.join(qa_plot_dir, args.src_name)
if not os.path.exists(qa_plot_dir):
os.mkdir(qa_plot_dir)
# if it is a calibrator then put the plots into a beam directory
if args.calibrator:
if args.beam is None:
print("ERROR: Please specify beam of calibrator")
return -1
elif args.cal_id is None:
print("ERROR: Please specify id of calibrator")
return -1
else:
is_calibrator = True
qa_plot_dir = os.path.join(
qa_plot_dir, "{0:02d}".format(args.beam))
if not os.path.exists(qa_plot_dir):
os.mkdir(qa_plot_dir)
else:
is_calibrator = False
# Create log file
lib.setup_logger(
'info', logfile=os.path.join(qa_plot_dir, 'get_inspection_plot.log'))
logger = logging.getLogger(__name__)
# Run function to get plots
try:
logger.info("#### Getting inspection plots ...")
start_time_plots = time.time()
get_inspection_plots(args.obs_id, qa_plot_dir,
is_calibrator=is_calibrator, cal_id=args.cal_id)
except Exception as e:
logger.error(e)
logger.error("#### Getting inspection plots failed")
else:
logger.info("#### Getting inspection plots... Done ({0:.0f}s)".format(
time.time()-start_time_plots))
if __name__ == "__main__":
main()
| 3,753 | 32.221239 | 100 | py |
dataqa | dataqa-master/run_ccal_plots.py | #!/usr/bin/env python
"""
Script to automatically run crosscal plots
Requires a scan number
Optionally takes a directory for writing plots
"""
from crosscal import crosscal_plots
from scandata import get_default_imagepath
import argparse
from timeit import default_timer as timer
import logging
import os
from apercal.libs import lib
start = timer()
parser = argparse.ArgumentParser(description='Generate crosscal QA plots')
# 1st argument: File name
parser.add_argument("scan", help='Scan of target field')
parser.add_argument("fluxcal", help='Fluxcal name')
parser.add_argument("polcal", help='Polcal name')
parser.add_argument('-p', '--path', default=None,
help='Destination for images')
parser.add_argument('-b', '--basedir', default=None,
help='Data directory')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal.')
args = parser.parse_args()
# If no path is given change to default QA path
if args.path is None:
output_path = get_default_imagepath(args.scan, basedir=args.basedir)
# check that crosscal qa directory exists
output_path = "{0:s}crosscal/".format(output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
else:
output_path = args.path
# Create logging file
lib.setup_logger(
'debug', logfile='{0:s}run_ccal_plots.log'.format(output_path))
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG)
# Create crosscal plots
crosscal_plots.make_all_ccal_plots(
args.scan, args.fluxcal, args.polcal, output_path=output_path, basedir=args.basedir, trigger_mode=args.trigger_mode)
end = timer()
logger.info('Elapsed time to generate cross-calibration data QA inpection plots is {} minutes'.format(
(end - start)/60.))
#time in minutes
| 1,987 | 30.0625 | 120 | py |
dataqa | dataqa-master/run_beamweights_plots.py | # inspect_beamweights: Make plots of beam weights from any observation
# K.M.Hess 27/06/2019 ([email protected])
# adapted for dataQA by Robert Schulz
__author__ = "Tammo Jan Dijkema & Kelley M. Hess & Robert Schulz"
__date__ = "$23-jul-2019 16:00:00$"
__version__ = "0.3"
from argparse import ArgumentParser, RawTextHelpFormatter
import casacore.tables as pt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scandata import get_default_imagepath
import os
import pymp
from apercal.libs import lib
import logging
import glob
import time
###################################################################
def parse_args():
parser = ArgumentParser(description="Plot beam weights for a given scan.",
formatter_class=RawTextHelpFormatter)
parser.add_argument('obs_id', type=str,
help='Specify the task ID number')
parser.add_argument('calibrator', type=str,
help='Specify the calibrator.')
parser.add_argument('-b', '--beam', type=int,
help='Specify the beam to plot. (default: %(default)s).')
parser.add_argument('--subband_step', default=10, type=int,
help='Take every subband_step-th subband. (default: %(default)s).')
parser.add_argument('--base_dir', type=str,
help='Specify the base directory (default: %(default)s).')
parser.add_argument("-p", "--path", type=str,
help='Path to QA output')
parser.add_argument("-t", "--threads", default=1, type=int,
help='Number of threads to use (default: %(default)s).')
args = parser.parse_args()
return args
def convert_weights(mat):
"""Converts 2x64 array to 11x11 array with apertif numbering"""
# test = np.chararray((11,11), itemsize=3)
converted_mat = np.zeros((11, 11), dtype=np.complex64)
for el_num in range(61):
for x_or_y_num, x_or_y_letter in enumerate(['X', 'Y']):
if x_or_y_letter == 'Y' and el_num == 60:
break
# test[give_coord(x_or_y_letter, el_num)] = "{:02d}".format(el_num) + x_or_y_letter
converted_mat[give_coord(x_or_y_letter, el_num)
] = mat[x_or_y_num, el_num]
return converted_mat
def give_coord(x_or_y, el_num):
"""Give the x and y position for an apertif antenna element
Args:
x_or_y (str): Polarization 'X' or 'Y'
el_num (int): antenna number from 0 to 61 (X) or 60 (Y)
Returns:
Tuple[int, int]: y and x coordinates
"""
if el_num < 55:
y_coord = (el_num % 11)
if y_coord % 2 == 0:
x_coord = (el_num // 11) * 2 + (x_or_y == 'Y')
else:
x_coord = (el_num // 11) * 2 + (x_or_y == 'X')
else:
y_coord = (el_num - 55) * 2 + (x_or_y == 'Y')
x_coord = 10
return y_coord, x_coord
###################################################################
def main():
start_time = time.time()
args = parse_args()
obs_id = args.obs_id
flux_cal = args.calibrator
qa_dir = args.path
base_dir = args.base_dir
n_threads = args.threads
subband_step = args.subband_step
# set output directory
if qa_dir is None:
if base_dir is not None:
qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
else:
qa_dir = get_default_imagepath(obs_id)
# check that path exists
if not os.path.exists(qa_dir):
print(
"Directory {0:s} does not exist and will be created".format(qa_dir))
os.makedirs(qa_dir)
data_dir = os.path.dirname(qa_dir).rsplit("qa")[0]
# check the mode to run the validation
qa_beamweights_dir = os.path.join(qa_dir, "beamweights")
# check that this directory exists (just in case)
if not os.path.exists(qa_beamweights_dir):
print("Directory {0:s} does not exist and will be created".format(
qa_beamweights_dir))
os.makedirs(qa_beamweights_dir)
lib.setup_logger(
'debug', logfile='{0:s}/create_beamweights.log'.format(qa_beamweights_dir))
logger = logging.getLogger(__name__)
logger.info("Getting beamweight plots for {}".format(flux_cal))
# get a list of beams if no beam was provided
if args.beam is None:
data_dir_beam_list = glob.glob(os.path.join(data_dir, "[0-3][0-9]"))
# check that there are beams
if len(data_dir_beam_list) == 0:
logger.warning("No beams found in {}".format(data_dir))
return None
else:
beam_list = [int(os.path.basename(beam))
for beam in data_dir_beam_list]
else:
beam_list = [args.beam]
# now go through the beams
for beam_nr in beam_list:
start_time_beam = time.time()
logger.info("Processing beam {}".format(beam_nr))
# check that the given calibrator exists
data_cal_dir = os.path.join(data_dir, "{0:02d}".format(beam_nr))
# calibrator file
cal_file = os.path.join(data_cal_dir, "raw/{}.MS".format(flux_cal))
# check that it exists
if not os.path.exists(cal_file):
logger.warning(
"Could not find calibrator {}. Continue with next beam".format(cal_file))
continue
else:
logger.info("Found calibrator {}".format(cal_file))
# set output directory for plots
qa_beamweights_beam_dir = os.path.join(
qa_beamweights_dir, "{0:02d}".format(beam_nr))
# check that this directory exists (just in case)
if not os.path.exists(qa_beamweights_beam_dir):
logger.info("Directory {0:s} does not exist and will be created".format(
qa_beamweights_beam_dir))
os.makedirs(qa_beamweights_beam_dir)
# Start with one measurement set to set up the size of the array
#
# cal = pt.table(
# "/data/hess/apertif/{}/{}/WSRTA{}_B000.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid, args.taskid),
# ack=False)
cal = pt.table(os.path.join(
cal_file, "APERTIF_CALIBRATION"), ack=False)
num_beams = 40
num_subbands = pt.taql(
'select distinct SPECTRAL_WINDOW_ID FROM $cal').nrows()
num_antennas = pt.taql(
'select distinct ANTENNA_ID FROM $cal').nrows()
beamweights = np.zeros(
(num_beams, num_subbands, num_antennas, 11, 11), dtype=np.complex64)
logger.info("Number of subbands in {0} is {1}".format(
os.path.basename(cal_file), num_subbands))
# in case there are no subbands or antennas better check
if num_subbands != 0 and num_antennas != 0:
# Old implementation looped over beams (and I just picked a subband for simplicity, but this could be expanded to loop over subbands)
#
# plot_sub = 350
# for beam_nr in range(40):
# ms_name = "/data/hess/apertif/{}/{}/WSRTA{}_B0{:02}.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid,
# args.taskid, beam_nr)
# print(ms_name)
# cal = pt.table(ms_name, ack=False)
# weights_gershape = cal.getcol('BEAM_FORMER_WEIGHTS').reshape((num_subbands, -1, 2, 64))
#
# for subband in range(num_subbands):
# for antenna in range(num_antennas):
# beamweights[beam_nr, subband, antenna] = convert_weights(weights_gershape[subband, antenna])
#
# print("BEAM NUMBER {}".format(beam_nr))
# # fig, axs = plt.subplots(3, 4, figsize=(15, 11))
# fig, axs = plt.subplots(3, 4, figsize=(10, 7))
# fig.suptitle("Beam {}; Subband {}".format(beam_nr, plot_sub), fontsize=14)
# for ax, plot_ant in zip(np.array(axs).flatten(), range(num_antennas)):
# ax.imshow(np.abs(beamweights[beam_nr, plot_sub, plot_ant]), cmap='plasma')
# ax.set_title("Antenna " + str(plot_ant))
# if plot_ant < 8:
# ax.set_xticklabels([])
# for i in range(61):
# x, y = give_coord('X', i)
# ax.text(x - 0.35, y + 0.18, 'X' + str(i), color='white', fontsize=5)
# x, y = give_coord('Y', i)
# ax.text(x - 0.35, y + 0.18, 'Y' + str(i), color='white', fontsize=5)
#
# plt.savefig('/data/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.cal_date, args.cal_date,
# beam_nr, plot_sub))
# plt.close()
# New implementation because I was just thinking of using a single beam and plotting a bunch of subbands. (quick and dirty solution)
# Beam is chosen by the user and saved in args.beam
# ms_name = "/home/hess/apertif/{}/{:02}/3C147.MS/APERTIF_CALIBRATION".format(
# args.taskid, beam_nr)
# cal = pt.table(ms_name, ack=False)
logger.info("Getting weights")
weights_gershape = cal.getcol(
'BEAM_FORMER_WEIGHTS').reshape((num_subbands, -1, 2, 64))
logger.info("Getting weights ... Done")
# parallelise it to plot faster
with pymp.Parallel(n_threads) as p:
# go throught the subband
for subband in p.range(0, num_subbands, subband_step):
for antenna in range(num_antennas):
beamweights[beam_nr, subband, antenna] = convert_weights(
weights_gershape[subband, antenna])
fig, axs = plt.subplots(3, 4, figsize=(10, 7))
fig.suptitle("Beam {}; Subband {}".format(
beam_nr, subband), fontsize=14)
for ax, plot_ant in zip(np.array(axs).flatten(), range(num_antennas)):
ax.imshow(
np.abs(beamweights[beam_nr, subband, plot_ant]), cmap='plasma')
ax.set_title("Antenna " + str(plot_ant))
if plot_ant < 8:
ax.set_xticklabels([])
for i in range(61):
x, y = give_coord('X', i)
ax.text(x - 0.35, y + 0.18, 'X' + str(i),
color='white', fontsize=5)
x, y = give_coord('Y', i)
ax.text(x - 0.35, y + 0.18, 'Y' + str(i),
color='white', fontsize=5)
plot_name = os.path.join(qa_beamweights_beam_dir, "{0}_{1}_B{2:02d}_S{3:03d}_weights.png".format(
obs_id, flux_cal, beam_nr, subband))
# plt.savefig('/home/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.taskid, args.cal_date,
# beam_nr, subband))
plt.savefig(plot_name, overwrite=True)
logger.info("Saving plot {}".format(plot_name))
plt.close('all')
logger.info("Processing beam {0} ... Done ({1:.0f}s)".format(
beam_nr, time.time()-start_time_beam))
else:
logger.warning(
"Found {0} subbands and {1} antennas for beam {2} in {3}".format(num_subbands, num_antennas, beam_nr, flux_cal))
logger.info("Getting beamweight plots for {0} ... Done ({1:.0f}s)".format(
flux_cal, time.time()-start_time))
if __name__ == '__main__':
main()
| 12,056 | 41.305263 | 145 | py |
dataqa | dataqa-master/run_cube_stats_cont.py | import numpy as np
import sys
import os
import argparse
import glob
import socket
import time
import logging
from apercal.libs import lib
from dataqa.scandata import get_default_imagepath
from dataqa.line.cube_stats_cont import get_cube_stats_cont
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
if __name__ == '__main__':
start_time = time.time()
# Create and parse argument list
# ++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(description='Run validation for line QA')
# main argument: Observation number
parser.add_argument("obs_id", type=str,
help='Observation Number / Scan Number / TASK-ID')
parser.add_argument("-b", "--basedir", type=str,
help='Data directory without taskid')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal')
args = parser.parse_args()
# get taskid/obs_id/scan
obs_id = args.obs_id
# get the QA directory for this observation
qa_dir = get_default_imagepath(obs_id, basedir=args.basedir)
# get the line QA directory for this observation
qa_line_dir = "{0:s}line".format(qa_dir)
if not os.path.exists(qa_line_dir):
print("Creating directory {0:s}".format(qa_line_dir))
os.mkdir(qa_line_dir)
# Create logging file
lib.setup_logger(
'debug', logfile='{0:s}/get_cube_stats.log'.format(qa_line_dir))
logger = logging.getLogger(__name__)
# check host name
host_name = socket.gethostname()
# get data directories depending on the host name
if args.trigger_mode:
logger.info(
"--> Running line QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id)]
elif host_name != "happili-01" and not args.trigger_mode:
logger.warning("You are not working on happili-01.")
logger.warning("The script will not process all beams")
logger.warning("Please switch to happili-01")
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id)]
else:
data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id), '/data2/apertif/{0:s}'.format(
obs_id), '/data3/apertif/{0:s}'.format(obs_id), '/data4/apertif/{0:s}'.format(obs_id)]
# run the function to get the cube statistics
# +++++++++++++++++++++++++++++++++++++++++++
try:
get_cube_stats_cont(qa_line_dir, data_base_dir_list)
except Exception as e:
logger.error(e)
logger.info("Getting cube statistics. Done ({0:.0f}s)".format(
time.time()-start_time))
| 2,875 | 33.650602 | 125 | py |
dataqa | dataqa-master/run_osa_report_check.py | """
This script contains functionality to check the OSA report
"""
import os
import time
import numpy as np
import logging
import socket
import glob
import argparse
from astropy.table import Table
def osa_report_check(output_file=''):
"""Function to check the available OSA reports.
Basic check if the number of OSA reports match the number of checks
"""
# check that we are running on happili-01
host_name = socket.gethostname()
if host_name != "happili-01":
print("Wrong host. Please use happili-01. Abort")
raise RuntimeError("Wrong host")
# data directory
data_dir = "/data/apertif"
# the osa report backup path is fixed
osa_report_path = "/data/apertif/qa/OSA_reports"
# get a list of existing osa reports
osa_report_list = glob.glob(os.path.join(osa_report_path, "*.json"))
# get a list of taskids for which OSA reports exists
osa_report_taskid = np.array([os.path.basename(report).split("_")[
0] for report in osa_report_list])
# number of osa reports
n_reports = len(osa_report_list)
print("Found {} OSA reports".format(n_reports))
# get a list of taskids on happili 1
# avoid getting files before July 10.
# Not sure if this is the best way to do it
# use pattern matching
# already take into account next year
obs_list_1 = glob.glob(os.path.join(
data_dir, "1907[1-3][0-9][0-9][0-9][0-9]"))
obs_list_2 = glob.glob(os.path.join(
data_dir, "190[8-9][0-3][0-9][0-9][0-9][0-9]"))
obs_list_3 = glob.glob(os.path.join(
data_dir, "191[0-2][0-3][0-9][0-9][0-9][0-9]"))
obs_list_4 = glob.glob(os.path.join(
data_dir, "20[0-1][0-9][0-3][0-9][0-9][0-9][0-9]"))
obs_list = np.array([])
if len(obs_list_1) != 0:
obs_list = np.append(obs_list, obs_list_1)
if len(obs_list_2) != 0:
obs_list = np.append(obs_list, obs_list_2)
if len(obs_list_3) != 0:
obs_list = np.append(obs_list, obs_list_3)
if len(obs_list_4) != 0:
obs_list = np.append(obs_list, obs_list_4)
# obs_list = np.append(np.array(obs_list_1), [np.array(obs_list_2),
# np.array(obs_list_3), np.array(obs_list_4)])
# number of total taskids on disk
n_obs = len(obs_list)
print("Found {} taskids on disk".format(n_obs))
# get the taskid
taskid_list = np.array([os.path.basename(taskid)
for taskid in obs_list])
# get the data taskids that are not in the list of report taskids
# this should give the taskids for which there are no reports
data_taskid_without_report_list = np.setdiff1d(
taskid_list, osa_report_taskid)
# number of taskids without reports
n_taskids_without_report = len(data_taskid_without_report_list)
# try to get the OSA from the master list
osa_master_file = "/home/moss/autocal/csv/ImagingSurveyOSAs-MasterList.csv"
if os.path.exists(osa_master_file):
print("Found OSA master file. Getting OSA responsible for a taskid")
osa_data = Table.read(osa_master_file, format="ascii.csv")
# convert the start and end dates to the taskid format
start_date_list = np.array([int("".join(startdate.split("/")[::-1]))
for startdate in osa_data['startdate']])
end_date_list = np.array([int("".join(enddate.split("/")[::-1]))
for enddate in osa_data['enddate']])
# now get the osa for a given date
osa_match_taskid_list = []
for taskid in data_taskid_without_report_list:
date_index_list = np.where(
start_date_list <= int("20"+taskid[:6]))[0]
if len(date_index_list) == 0:
osa_match_taskid_list.append("N/A")
else:
date_index = date_index_list[-1]
osa_match_taskid_list.append(osa_data['osa'][date_index])
else:
print("Did not find OSA master file. Cannot match taskid to OSA")
osa_data = None
osa_match_taskid_list = None
# print out the taskids:
print("Found {} taskids without an OSA report. These are:".format(
n_taskids_without_report))
for k in range(n_taskids_without_report):
if osa_match_taskid_list is not None:
print("\t {0} (OSA: {1})".format(
data_taskid_without_report_list[k], osa_match_taskid_list[k]))
else:
print("\t {0}".format(data_taskid_without_report_list[k]))
# save to file
if output_file != '':
if osa_match_taskid_list is not None:
output_table = Table(
[data_taskid_without_report_list, osa_match_taskid_list], names=("taskid", "osa"))
else:
output_table = Table(
[data_taskid_without_report_list], names=("taskid"))
output_table.write(output_file, format="ascii.csv", overwrite=True)
if __name__ == "__main__":
print("Checking for which taskids OSA reports exists.")
print("Warning: This script does not check for incomplete reports")
parser = argparse.ArgumentParser(
description='Create overview for QA')
# only optional argument is the output file
parser.add_argument("-o", "--output_file", type=str, default='',
help='Specify to write the output to a csv file')
args = parser.parse_args()
osa_report_check(output_file=args.output_file)
print("Checking for which taskids OSA reports exists ... Done, but")
print("Warning: This script does not check for incomplete reports")
| 5,671 | 35.358974 | 98 | py |
dataqa | dataqa-master/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/run_scal_plots.py | """
Script to automatically run crosscal plots
Requires a scan number
Optionally takes a directory for writing plots
"""
import os
from selfcal import selfcal_plots as scplots
import argparse
from timeit import default_timer as timer
from scandata import get_default_imagepath
from selfcal.selfcal_maps import get_selfcal_maps
import time
from apercal.libs import lib
import logging
start = timer()
parser = argparse.ArgumentParser(description='Generate selfcal QA plots')
# 1st argument: File name
parser.add_argument("scan", help='Scan of target field')
parser.add_argument("target", help='Target name')
parser.add_argument('-p', '--path', default=None,
help='Destination for images')
parser.add_argument('-b', '--basedir', default=None,
help='Data directory')
parser.add_argument('-M', '--maps', default=True,
action='store_false', help='Do not generate selfcal maps')
parser.add_argument('-P', '--phase', default=True,
action='store_false', help='Do not generate phase plots')
parser.add_argument('-A', '--amplitude', default=True,
action='store_false', help='Do not generate amplitude plots')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal.')
args = parser.parse_args()
# If no path is given change to default QA path
if args.path is None:
output_path = get_default_imagepath(args.scan, basedir=args.basedir)
# check that selfcal qa directory exists
output_path = os.path.join(output_path, "selfcal/")
if not os.path.exists(output_path):
os.mkdir(output_path)
else:
output_path = args.path
# Create log file
lib.setup_logger(
'info', logfile='{0:s}run_scal_plots.log'.format(output_path))
logger = logging.getLogger(__name__)
# Get selfcal maps
if args.maps:
try:
logger.info("#### Creating selfcal maps ...")
start_time_maps = time.time()
get_selfcal_maps(args.scan, output_path,
trigger_mode=args.trigger_mode)
logger.info("#### Creating selfcal maps. Done ({0:.0f}s)".format(
time.time()-start_time_maps))
except Exception as e:
logger.error(e)
logger.error("#### Creating selfcal maps failed")
else:
logger.info("#### Not generating selfcal maps")
# Get phase plots
if args.phase:
try:
logger.info("#### Creating phase plots")
start_time_plots = time.time()
PH = scplots.PHSols(args.scan, args.target,
trigger_mode=args.trigger_mode, basedir=args.basedir)
PH.get_data()
PH.plot_phase(imagepath=output_path)
logger.info('#### Done with phase plots ({0:.0f}s)'.format(
time.time()-start_time_plots))
except Exception as e:
logger.error(e)
logger.error("Creating phase plots failed.")
else:
logger.info("#### Not generating phase plots")
# Get amplitude plots
if args.amplitude:
try:
logger.info("#### Creating amplitude plots")
start_time_plots = time.time()
AMP = scplots.AMPSols(args.scan, args.target,
trigger_mode=args.trigger_mode, basedir=args.basedir)
AMP.get_data()
AMP.plot_amp(imagepath=output_path)
logger.info('#### Done with amplitude plots ({0:.0f}s)'.format(
time.time()-start_time_plots))
except Exception as e:
logger.error(e)
logger.error("Creating amplitude plots failed.")
else:
logger.info("#### Not generating amplitude plots")
end = timer()
print 'Elapsed time to generate self-calibration data QA inpection plots and images is {} minutes'.format((end - start)/60.)
| 3,890 | 33.433628 | 124 | py |
dataqa | dataqa-master/run_preflag_qa.py | """
Script to automatically run preflag qa
Combines the preflag plots
Requires a scan number
Optionally takes a directory for writing plots
"""
import os
from selfcal import selfcal_plots as scplots
import argparse
from timeit import default_timer as timer
from scandata import get_default_imagepath
from preflag import preflag_plots
import time
from apercal.libs import lib
import logging
start = timer()
parser = argparse.ArgumentParser(description='Combine preflag QA plots')
# 1st argument: File name
parser.add_argument("scan", help='Scan of target field')
# path options
parser.add_argument('-p', '--path', default=None,
help='Destination for images')
parser.add_argument('-b', '--basedir', default=None,
help='Directory where scan is located')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal.')
args = parser.parse_args()
# If no path is given change to default QA path
if args.path is None:
if args.basedir is not None:
qa_dir = get_default_imagepath(args.scan, basedir=args.basedir)
else:
qa_dir = get_default_imagepath(args.scan)
# check that selfcal qa directory exists
qa_preflag_dir = os.path.join(qa_dir, "preflag")
if not os.path.exists(qa_preflag_dir):
os.mkdir(qa_preflag_dir)
else:
qa_preflag_dir = args.path
# Create log file
lib.setup_logger(
'info', logfile=os.path.join(qa_preflag_dir, 'run_preflag_qa.log'))
logger = logging.getLogger(__name__)
logger.info("Running preflag QA")
# now combine the plots
try:
start_time = time.time()
preflag_plots.combine_preflag_plots(
qa_preflag_dir, trigger_mode=args.trigger_mode)
except Exception as e:
logger.warning("Running preflag QA failed")
logger.exception(e)
else:
logger.warning("Running preflag QA ... Done ({0:.0f}s)".format(
time.time()-start_time))
| 2,078 | 27.875 | 94 | py |
dataqa | dataqa-master/run_rfinder.py | #!/usr/bin/env python
"""
Script to automatically run RFInder on flux calibrator
Requires a taskID and name of flux calibrator
"""
import os
import argparse
from timeit import default_timer as timer
import logging
from apercal.libs import lib
from dataqa.scandata import get_default_imagepath
import socket
import glob
start = timer()
parser = argparse.ArgumentParser(description='Run RFInder')
# 1st argument: taskID
parser.add_argument("taskID", help='Task ID of target field')
# 2nd argument: fluxcal name
parser.add_argument("fluxcal", help='Fluxcal name')
# 3rd argument: destination for images, optional
parser.add_argument('-p', '--path', default=None,
help='Destination for images')
parser.add_argument('-b', '--basedir', default=None,
help='Data directory')
# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument("--trigger_mode", action="store_true", default=False,
help='Set it to run Autocal triggering mode automatically after Apercal')
args = parser.parse_args()
# If no path is given change to default QA path
if args.path is None:
output_path = get_default_imagepath(args.taskID, basedir=args.basedir)
# check that preflag qa directory exists
output_path = "{0:s}preflag/".format(output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
else:
output_path = args.path
# Create logging file
lib.setup_logger(
'debug', logfile='{0:s}run_rfinder.log'.format(output_path))
logger = logging.getLogger(__name__)
# get data directories depending on the host name
host_name = socket.gethostname()
if args.trigger_mode:
logger.info(
"--> Running line QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
data_beam_dir_list = glob.glob(
"/data/apertif/{}/[0-3][0-9]".format(args.taskID))
elif host_name != "happili-01" and not args.trigger_mode:
logger.warning("You are not working on happili-01.")
logger.warning("The script will not process all beams")
logger.warning("Please switch to happili-01")
data_beam_dir_list = glob.glob(
"/data/apertif/{}/[0-3][0-9]".format(args.taskID))
else:
logger.info("Running on happili-01. Using data from all nodes.")
data_beam_dir_list = glob.glob(
"/data*/apertif/{}/[0-3][0-9]".format(args.taskID))
# Run RFInder
# iterate over beams
for beam_dir in data_beam_dir_list:
# get beam
b = beam_dir.split("/")[-1]
# get qa path and name MS file
qapath = "{0:s}{1:s}/".format(output_path, b)
msfile = '{0:s}.MS'.format(args.fluxcal)
# create beam directory for QA if necessary
if not os.path.exists(qapath):
os.mkdir(qapath)
datapath = '{0:s}/raw/'.format(beam_dir)
# if b < 10:
# datapath='/data/apertif/{0:s}/{1:02d}/raw/'.format(args.taskID, b)
# if 10 <= b < 20:
# datapath='/data2/apertif/{0:s}/{1:02d}/raw/'.format(args.taskID, b)
# if 20 <= b < 30:
# datapath='/data3/apertif/{0:s}/{1:02d}/raw/'.format(args.taskID, b)
# if 30 <= b < 40:
# datapath='/data4/apertif/{0:s}/{1:02d}/raw/'.format(args.taskID, b)
rfinder_command = ("python /home/apercal/pipeline/bin/rfinder -idir {0:s} -i {1:s} -tel apertif "
"-mode use_flags -odir {2:s} -fl 0 -tStep 5 -yesClp").format(datapath, msfile, qapath)
# print rfinder_command
try:
logging.info("Running RFinder for beam {0:s}".format(b))
os.system(rfinder_command)
logging.info("Running RFinder for beam {0:s} ... Done".format(b))
except Exception as e:
logger.error(e)
logging.error("Running RFinder for beam {0:s} failed".format(b))
# move 2D plot to where report can find it (quick & dirty hack)
old2d = "{0:s}/rfi_q/plots/movies/Time_2Dplot_movie.gif".format(qapath)
new2d = "{0:s}/Time_2D_{1:s}.png".format(qapath, args.fluxcal)
oldaa = "{0:s}/rfi_q/plots/movies/AltAz_movie.gif".format(qapath)
newaa = "{0:s}/test_AltAz_{1:s}.png".format(qapath, args.fluxcal)
try:
os.rename(old2d, new2d)
except:
pass
try:
os.rename(oldaa, newaa)
except:
pass
end = timer()
logger.info('Elapsed time to run RFinder is {} minutes'.format(
(end-start)/60.))
| 4,366 | 33.385827 | 121 | py |
dataqa | dataqa-master/cb_plots.py | # Compound beam overview plots of QA
from __future__ import print_function
__author__ = "E.A.K. Adams"
"""
Functions for producing compound beam plots
showing an overview of data QA
Contributions from R. Schulz
"""
from astropy.io import ascii
from astropy.table import Table
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
import numpy as np
import os
import logging
import pkg_resources
logger = logging.getLogger(__name__)
"""
Globally define beam plotting parameters
So they are updated in one place
"""
radius = 0.2 # radius of beam to plot, in degrees
plotrange = [1.75, -1.75, -1.75, 1.4] # range to plot, RA runs backwards
offset_beam0_x = 1.5
offset_beam0_y = -1.5
def make_cb_plots_for_report(obs_id, qa_dir, plot_dir=None):
"""
Function to create the different cb plots for the web report.
The function currently creates the following plots
- phase selfcal done
- amplitude selfcal done
- continuum rms (range: 10-50 muJy/beam)
- continuum minor axis (range: 10-15 arcsec)
Args:
obs_id (int): ID of observation
qa_dir (str): Path to QA directory of the observation
plot_dir (str): Optional directory for the plots
"""
logger.info("Creating summary cb plots")
# Create a directory to put all the plots in
if plot_dir is None:
cb_plot_dir = os.path.join(qa_dir, "cb_plots")
else:
cb_plot_dir = plot_dir
if not os.path.exists(cb_plot_dir):
os.mkdir(cb_plot_dir)
logger.info("Creating directory {}".format(cb_plot_dir))
else:
logger.info("Directory {} already exists".format(cb_plot_dir))
# get path for cboffset file
package_name = __name__
file_name = 'cb_offsets.txt'
cboffsets_file = pkg_resources.resource_filename(
package_name, file_name)
logger.info(
"Using file {0} for compound beam offsets".format(cboffsets_file))
# first create the compound beam plot
logger.info("Plotting compound beams")
output_dir = cb_plot_dir
plot_name = "{}_cb_overview".format(obs_id)
make_cb_beam_plot(outputdir=output_dir, outname=plot_name,
cboffsets=cboffsets_file)
logger.info("Plotting compound beams ... Done")
# Create selfcal plots
logger.info("Creating cb plot for selfcal")
selfcal_summary_file = os.path.join(
qa_dir, "selfcal/{}_selfcal_summary.csv".format(obs_id))
if os.path.exists(selfcal_summary_file):
# first plot phase selfcal
try:
plot_name = "{}_selfcal_phase".format(obs_id)
make_cb_plot_value(selfcal_summary_file, "targetbeams_phase_status",
boolean=True, outputdir=output_dir, outname=plot_name, cboffsets=cboffsets_file)
except Exception as e:
logger.warning("Creating cb plot for selfcal phase ... Failed")
logger.exception(e)
else:
logger.info("Creating cb plot for selfcal phase ... Done")
# now plot amplitude selfcal
try:
plot_name = "{}_selfcal_amp".format(obs_id)
make_cb_plot_value(selfcal_summary_file, "targetbeams_amp_status",
boolean=True, outputdir=output_dir, outname=plot_name, cboffsets=cboffsets_file)
except Exception as e:
logger.warning("Creating cb plot for selfcal amplitude ... Failed")
logger.exception(e)
else:
logger.info("Creating cb plot for selfcal amplitude ... Done")
else:
logger.warning("Could not find {}".format(selfcal_summary_file))
logger.info("Creating cb plot for selfcal ... Failed")
# Create continuum plots
logger.info("Creating cb plot for continuum")
continuum_summary_file = os.path.join(
qa_dir, "continuum/{}_combined_continuum_image_properties.csv".format(obs_id))
if os.path.exists(continuum_summary_file):
# plot rms
try:
plot_name = "{}_continuum_rms".format(obs_id)
make_cb_plot_value(continuum_summary_file, "RMS",
goodrange=[10, 50], outputdir=output_dir, outname=plot_name, cboffsets=cboffsets_file)
except Exception as e:
logger.warning("Creating cb plot for continuum rms ... Failed")
logger.exception(e)
else:
logger.info("Creating cb plot for continuum rms ... Done")
# plot minor beam axis
try:
plot_name = "{}_continuum_beam_min".format(obs_id)
make_cb_plot_value(continuum_summary_file, "BMIN",
goodrange=[10, 15], outputdir=output_dir, outname=plot_name, cboffsets=cboffsets_file)
except Exception as e:
logger.warning(
"Creating cb plot for continuum beam min ... Failed")
logger.exception(e)
else:
logger.info("Creating cb plot for continuum beam min ... Done")
else:
logger.warning("Could not find {}".format(continuum_summary_file))
logger.info("Creating cb plot for continuum ... Failed")
# Create line plots
logger.info("Creating cb plots for line")
line_summary_file = os.path.join(
qa_dir, "line/{}_HI_cube_noise_statistics.ecsv".format(obs_id))
if os.path.exists(line_summary_file):
# read the file
line_summary_data = Table.read(line_summary_file, format="ascii.ecsv")
# number of cubes
n_cubes = np.size(np.unique(line_summary_data['cube']))
# go through the cubes and create plots for each one
for cube_counter in range(n_cubes):
logger.info("Plotting cube {}".format(cube_counter))
cube_data = line_summary_data[np.where(
line_summary_data['cube'] == cube_counter)]
# remove non-existing beams
cube_data['median_rms'][np.where(
cube_data['median_rms'] == -1)] = np.nan
# convert median rms into mJy
cube_data['median_rms'] *= 1.e3
# plot name
plot_name = "{0}_HI_median_rms_cube{1}".format(
obs_id, cube_counter)
# use a different range of good values for
if n_cubes > 4:
if cube_counter < 7:
goodrange = [0, 2]
else:
goodrange = [0, 3]
else:
if cube_counter < 3:
goodrange = [0, 2]
else:
goodrange = [0, 3]
try:
make_cb_plot_value(cube_data, "median_rms",
goodrange=goodrange, outputdir=output_dir, outname=plot_name, cboffsets=cboffsets_file)
except Exception as e:
logger.warning("Creating cb plot for line ... Failed")
logger.info("Creating cb plot for line ... Done")
else:
logger.warning("Could not find {}".format(line_summary_file))
logger.info("Creating cb plot for line ... Failed")
logger.info("Creating summary cb plots ... Done")
def make_cb_beam_plot(cboffsets='cb_offsets.txt',
outputdir=None, outname=None):
"""
This function specifically makes a plot of only
the compound beams, labelling and numbering them
"""
# get paths and names set
if outname is None:
outname = 'CB_overview'
if outputdir is not None:
outpath = os.path.join(outputdir, outname)
else:
outpath = outname # write to current directory
# get the beams
cbpos = ascii.read(cboffsets)
# open figure
fig, ax = plt.subplots(figsize=(8, 8))
ax.axis(plotrange) # RA axis runs backwards
# set up beams
beams = np.arange(len(cbpos))
r = radius # beam size
for i, (x1, y1) in enumerate(zip(cbpos['ra'], cbpos['dec'])):
if i == 0:
# offset beam 0
x1 = offset_beam0_x
y1 = offset_beam0_y
# set circle
circle = Circle((x1, y1), r, color='blue', alpha=0.4)
fig.gca().add_artist(circle)
# beams.append(circle)
# write text with value
ax.text(x1, y1, ('CB{0:02}').format(beams[i]),
horizontalalignment='center',
verticalalignment='center', size=18,
fontweight='medium')
# p=PatchCollection(beams, alpha=0.4)
# ax.add_collection(p)
ax.set_xlabel('RA offset, deg', size=15)
ax.set_ylabel('Dec offset, deg', size=15)
ax.set_title('{}'.format(outname), fontweight='medium', size=24)
plt.savefig('{}.png'.format(outpath), overwrite=True,
bbox_inches='tight', dpi=200)
def make_cb_plot_value(filename, column, goodrange=None,
boolean=False, cboffsets='cb_offsets.txt',
outputdir=None, outname=None):
"""
Take a csv file or a table object and produce the plots
Provide the column name to plot
Optionally provide a range of good values that
will have beams plotted in green
Or set boolean=True to plot colors based on a boolean value
Lack of data in plotted in grey
Default is to plot as red
Color scheme should be updated to
take into account colorblindness
"""
# check if file name is a string
if type(filename) == str:
# read the csv file
table = ascii.read(filename, format='csv')
# otherwise assume a table object as been given
else:
table = filename
# print(table.colnames)
# check that column name exists:
if column in table.colnames:
pass
else:
# select a column, assume first column is beams
print(('Warning! Column name {0} not found.'
' Using third column of csv, {1} as default.'.format(column,
table.colnames[2])))
column = table.colnames[2]
# gets paths and names set
if outname is None:
outname = column
if outputdir is not None:
outpath = "{0}/{1}".format(outputdir, outname)
else:
outpath = outname # write to current directory
# check automatically if a column is boolean
if (table[column][0]) == 'False' or (table[column][0] == 'True'):
boolean = True
# make an array to hold colors:
colors = np.full(40, 'r')
# colors = np.array(['r' for k in range(40)])
# find empty beams
# not all tables have this so do as try/except
try:
# 'exist' column is read as strings
nanind = np.where(table['exist'] == 'False')[0]
colors[nanind] = 'k'
except:
# assume that the columns have NaNs
if table[column].dtype == np.float64:
nanind = np.where(np.isnan(table[column]) == True)[0]
colors[nanind] = 'k'
# find "good" values
if goodrange != None:
if len(goodrange) == 2:
# first turn good data into floats:
goodind = np.where(np.logical_and(table[column] >= goodrange[0],
table[column] <= goodrange[1]))[0]
# goodind = np.where(table[column]>=goodrange[0])[0]
# print(goodind)
colors[goodind] = 'green'
if boolean == True:
# assume column array is true/false
# find trues and make green
goodind = np.where(table[column] == 'True')[0]
colors[goodind] = 'green'
# in this case, also make sure to overwrite anything done by exist column
badind = np.where(table[column] == 'False')[0]
colors[badind] = 'red'
# get the beams
cbpos = ascii.read(cboffsets)
# open figure
fig, ax = plt.subplots(figsize=(8, 8))
ax.axis(plotrange) # RA axis runs backwards
# set up beams
beams = []
r = radius # beam size
for i, (x1, y1) in enumerate(zip(cbpos['ra'], cbpos['dec'])):
if i == 0:
# offset beam 0
x1 = offset_beam0_x
y1 = offset_beam0_y
# set circle
circle = Circle((x1, y1), r, color=colors[i], alpha=0.4)
fig.gca().add_artist(circle)
# beams.append(circle)
# only add text if the color is not grey:
if colors[i] != 'k':
# write text with value
value = table[column][i]
if type(value) == float or type(value) == np.float64:
value_label = "{0:.1f}".format(value)
else:
value_label = str(value)
ax.text(x1, y1, ('{0}').format(value_label),
horizontalalignment='center',
verticalalignment='center', size=18,
fontweight='medium')
# p=PatchCollection(beams, alpha=0.4)
# ax.add_collection(p)
ax.set_xlabel('RA offset, deg', size=15)
ax.set_ylabel('Dec offset, deg', size=15)
ax.set_title('{}'.format(outname), size=24, fontweight='medium')
plt.savefig('{}.png'.format(outpath), overwrite=True,
bbox_inches='tight', dpi=200)
| 13,115 | 36.261364 | 122 | py |
dataqa | dataqa-master/plot_schedule.py | #!/usr/bin/env python
"""
This script visualises the observing schedule by producing elevation and HA plots as a function of time.
Written in python3 and using pandas, astropy and matplotlib.
Parameter
obs_id : str
csv file with the schedule.
Example:
python plot_schedule.py schedule.csv
"""
import argparse
import numpy as np
from astropy.coordinates import EarthLocation, AltAz, ICRS, SkyCoord, FK5, Galactic
import astropy.units as u
from astropy.time import Time
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['xtick.direction']='in'
mpl.rcParams['ytick.direction']='in'
#--------------------------------------------------
# Create and parse argument list
#--------------------------------------------------
parser = argparse.ArgumentParser(
description='Plot shedule')
# main argument: schedule file
parser.add_argument("sched_file", type=str,
help='schedule file (.csv)')
args = parser.parse_args()
schedule_file = args.sched_file
#schedule_file = 'imaging_sched_longcal_ppmod30_04-08_21h_EAKA.csv'
#--------------------------------------------------
# Basic parameters
#--------------------------------------------------
#WSRT = EarthLocation.of_address("Westerbork radio telescope")
WSRT = EarthLocation.from_geocentric(3828653.54341874, 442651.40305733, 5064900.15903374, unit='m')
data = pd.read_csv(schedule_file)
#--------------------------------------------------
def add_12(vals):
for i in range(len(vals)):
if vals[i] > 12:
vals[i] = vals[i] - 24
if vals[i] < -12:
vals[i] = vals[i] + 24
#--------------------------------------------------
# calculate relavant values for the plot
#--------------------------------------------------
data['time_1'] = data.apply (lambda row: Time(str(row['date1'])+'T'+str(row['time1'])), axis=1)
data['time_2'] = data.apply (lambda row: Time(str(row['date2'])+'T'+str(row['time2'])), axis=1)
data['obs_length'] = data.apply (lambda row: (row['time_2'] - row['time_1']).to(u.h).value, axis=1)
data['timespan'] = data.apply (lambda row: row['time_1'] + np.linspace(0, row['obs_length'], 100)*u.hour, axis=1)
data['altazframe'] = data.apply (lambda row: AltAz(location=WSRT, obstime=row['timespan']), axis=1)
data['my_obj'] = data.apply (lambda row: SkyCoord(ra=row['ra'], dec=row['dec'], unit=(u.hourangle, u.deg)), axis=1)
data['my_obj_altaz'] = data.apply (lambda row: row['my_obj'].transform_to(row['altazframe']), axis=1)
data['my_obj_fk5'] = data.apply (lambda row: row['my_obj'].transform_to(FK5(equinox=row['timespan'])), axis=1)
data['my_obj_HA'] = data.apply (lambda row: row['my_obj_fk5'].ra - row['timespan'].sidereal_time('apparent', longitude =WSRT.lon), axis=1)
data['LST'] = data.apply (lambda row: row['timespan'].sidereal_time('apparent', longitude =WSRT.lon), axis=1)
data['my_obj_LHA'] = data.apply (lambda row: row['timespan'].sidereal_time('apparent', longitude =WSRT.lon) - row['my_obj_fk5'].ra, axis=1)
data['my_obj_LHA_2'] = data.apply (lambda row: add_12(row['my_obj_LHA'].value), axis=1)
#--------------------------------------------------
# make elevation plot
#--------------------------------------------------
fig, ax = plt.subplots(figsize=[8,5])
ax.set_title(schedule_file)
for i in (range(len(data['date1']))):
ax.plot(data['timespan'][i].datetime, data['my_obj_altaz'][i].alt, label=data['source'][i])
ax.grid(True, alpha=0.3)
ax.set_ylim(0,90)
ax.set_ylabel('Elevation')
ax.set_xlabel('Date and time')
ax.axhline(y=20, c='r', linestyle='--')
ax.legend(bbox_to_anchor=(1.03, 1.05))
plt.savefig(schedule_file[:-4]+'_elevation.png', bbox_inches='tight', dpi=200)
#--------------------------------------------------
# make LHA plot
#--------------------------------------------------
fig, ax = plt.subplots(figsize=[8,5])
ax.set_title(schedule_file)
for i in (range(len(data['date1']))):
ax.plot(data['timespan'][i].datetime, data['my_obj_LHA'][i], label=data['source'][i])
ax.grid(True, alpha=0.3)
#ax.set_ylim(0,90)
ax.set_ylabel('HA')
ax.set_xlabel('Date and time')
ax.axhline(y=6, c='r', linestyle='--')
ax.axhline(y=-6, c='r', linestyle='--')
ax.axhline(y=3.3, c='grey', linestyle='--')
ax.axhline(y=-3.3, c='grey', linestyle='--')
ax.legend(bbox_to_anchor=(1.03, 1.05))
plt.savefig(schedule_file[:-4]+'_LHA.png', bbox_inches='tight', dpi=200)
print('DONE') | 4,446 | 37.336207 | 139 | py |
dataqa | dataqa-master/line/cube_stats.py | """
This file contains functionality to analyze the quality of the data cube
generated by the pipeline for each beam.
"""
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
import numpy as np
import os
import glob
import socket
import time
import logging
import glob
from dataqa.scandata import get_default_imagepath
import matplotlib.pyplot as plt
# from scipy.optimize import curve_fit
logger = logging.getLogger(__name__)
# def gauss(x, *p):
# """Function to calculate a Gaussian value
# """
# A, mu, sigma = p
# return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def combine_cube_stats(obs_id, qa_dir, single_node=False):
"""
Function to combine the statistic information from all cubes
"""
logger.info("Collecting cube statistics")
host_name = socket.gethostname()
qa_line_dir = os.path.join(qa_dir, "line")
# output file name
table_file_name = os.path.join(
qa_line_dir, "{}_HI_cube_noise_statistics.ecsv".format(obs_id))
# list of data directories
if "/data" in qa_line_dir:
qa_line_dir_list = [qa_line_dir, qa_line_dir.replace(
"/data/", "/data2/"), qa_line_dir.replace("/data/", "/data3/"), qa_line_dir.replace("/data/", "/data4/")]
else:
qa_line_dir_list = [qa_line_dir, qa_line_dir.replace(
"/tank/", "/tank2/"), qa_line_dir.replace("/tank/", "/tank3/"), qa_line_dir.replace("/tank/", "/tank4/")]
# total number of expected beams
# catching the host should make it work on other happilis, too
if host_name == "happili-01" or single_node:
n_beams = 40
else:
happili_number = int(host_name.split("-")[-1])
n_beams = 10 * happili_number
# total number of cubes
n_cubes = 8
# define table columns
# this column will hold the beam number
beam = np.full(n_cubes * n_beams, -1)
# this one will specify the cube
cube = np.full(n_cubes * n_beams, -1)
# this one will store the median rms
median_rms = np.full(n_cubes * n_beams, -1.)
# this one will store the mean rms
mean_rms = np.full(n_cubes * n_beams, -1.)
# this one will store the min rms
min_rms = np.full(n_cubes * n_beams, -1.)
# this one will store the max rms
max_rms = np.full(n_cubes * n_beams, -1.)
# percentile below a limit of 2mJy/beam and 3mJy/beam
percentile_rms_below_2mJy = np.full(n_cubes * n_beams, -1.)
percentile_rms_below_3mJy = np.full(n_cubes * n_beams, -1.)
percentile_rms_below_4mJy = np.full(n_cubes * n_beams, -1.)
# now go through the different cubes
for cube_counter in range(n_cubes):
logger.info(
"Processing noise information for cube {}".format(cube_counter))
# now go through the data directories and get the cube files
for dir_counter in range(len(qa_line_dir_list)):
line_dir = qa_line_dir_list[dir_counter]
cube_list = glob.glob(os.path.join(
line_dir, "[0-3][0-9]/*cube{0:d}_info.csv".format(cube_counter)))
# first fill beam and cube column
# only 10 beams, because there are 4 happili directories
# here beam_counter is only the last digit in the beam number
for beam_counter in range(10):
table_index = n_beams * cube_counter + 10 * dir_counter + beam_counter
beam[table_index] = 10 * dir_counter + beam_counter
cube[table_index] = cube_counter
# in case there are no such cubes, fill only beam and cube column
if len(cube_list) == 0:
logger.warning("No cube files found in {}".format(line_dir))
# if there are cubes, go through them
else:
cube_list.sort()
for file_counter in range(len(cube_list)):
cube_file = cube_list[file_counter]
# read in the file
cube_data = Table.read(cube_file, format="ascii.csv")
# get only the non-nan values
noise = cube_data['noise'][np.isnan(
cube_data['noise']) == False]
# the beam number is part of the file name
beam_nr = int(os.path.basename(cube_file).split("_")[1])
# get the table index based on the beam numbder
table_index = n_beams * cube_counter + beam_nr
beam[table_index] = beam_nr
cube[table_index] = cube_counter
median_rms[table_index] = np.nanmedian(noise)
mean_rms[table_index] = np.nanmean(noise)
min_rms[table_index] = np.nanmin(noise)
max_rms[table_index] = np.nanmax(noise)
percentile_rms_below_2mJy[table_index] = np.size(
np.where(noise < 0.002)[0]) / float(np.size(noise))
percentile_rms_below_3mJy[table_index] = np.size(
np.where(noise < 0.003)[0]) / float(np.size(noise))
percentile_rms_below_4mJy[table_index] = np.size(
np.where(noise < 0.004)[0]) / float(np.size(noise))
logger.info(
"Processing noise information for cube {} ... Done".format(cube_counter))
# create the table
cube_summary = Table([beam, cube, median_rms, mean_rms, min_rms, max_rms, percentile_rms_below_2mJy, percentile_rms_below_3mJy, percentile_rms_below_4mJy], names=(
'beam', 'cube', 'median_rms', 'mean_rms', 'min_rms', 'max_rms', 'precentile_rms_below_2mJy', 'precentile_rms_below_3mJy', 'precentile_rms_below_4mJy'))
cube_summary.write(table_file_name, format="ascii.ecsv", overwrite=True)
logger.info(
"Collecting cube statistics ... Done. Saving to {}".format(table_file_name))
def get_cube_stats(qa_line_dir, data_base_dir_list):
"""Function to get a simple rms per channel
Parameter:
qa_line_dir : str
Directory where the line QA output is stored
data_base_dir_list : list
List of data directories on happili 1 to 4
"""
# go through all four data directories
# ++++++++++++++++++++++++++++++++++++
for data_dir in data_base_dir_list:
start_time_data = time.time()
logger.info("## Going through the beams in {0:s}".format(data_dir))
# getting a list of beams
data_dir_beam_list = glob.glob("{0:s}/[0-3][0-9]".format(data_dir))
# checking whether no beam was found
if len(data_dir_beam_list) != 0:
# sort beam list
data_dir_beam_list.sort()
start_time_beam = time.time()
# going through all the beams that were found
# +++++++++++++++++++++++++++++++++++++++++++
for data_dir_beam in data_dir_beam_list:
# getting the beam
beam = os.path.basename(data_dir_beam)
logger.info("Analyzing beam {0:s}".format(beam))
# setting the output directory for the beam
qa_line_beam_dir = "{0:s}/{1:s}".format(qa_line_dir, beam)
# this directory does not exist create it
if not os.path.exists(qa_line_beam_dir):
logger.info(
"Creating directory {0:s}".format(qa_line_beam_dir))
os.mkdir(qa_line_beam_dir)
# set the name of the of the cube file to analyze
cube_file = "{0:s}/line/cubes/HI_image_cube.fits".format(
data_dir_beam)
# there are several cubes
cube_file_list = glob.glob(cube_file.replace("cube", "cube*"))
# continue only if glob has found cubes
if len(cube_file_list) != 0:
cube_file_list.sort()
for cube_file in cube_file_list:
# open fits file or at least try
try:
fits_hdulist = fits.open(cube_file)
except Exception as e:
logger.exception(e)
continue
logger.info(
"Getting statistics for cube {}".format(cube_file))
# get wcs object
wcs = WCS(fits_hdulist[0].header)
# getting rid of stokes axis and check that it is a cube
if wcs.naxis == 4:
wcs = wcs.dropaxis(3)
cube = fits_hdulist[0].data[0]
elif wcs.naxis == 3:
cube = fits_hdulist[0].data
else:
logger.warning(
"Fits file {0:s} is not a cube".format(cube_file))
continue
# get the shape of the cube
cube_shape = np.shape(cube)
# get the number of channels
n_channels = cube_shape[0]
# creating an astropy table to store information about the cube
cube_info = Table([np.arange(n_channels), np.zeros(
n_channels)], names=('channel', 'noise'))
# define range for histogram
# histrange = np.arange(-0.5, 0.5, 0.0001)
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
# p0 = [1., 0., 0.001]
# This determines the noise in each channel, using standard rms and also by
# fitting the width of a histogram of image values.
# These give same results for now, but gauss fit could be improved by
# e.g. fitting only to part of histogram
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for ch in range(n_channels):
# get rms
cube_info['noise'][ch] = np.std(cube[ch])
# determine histogram and fit gaussian
# values = cube[ch]
# histch, binedges = np.histogram(
# values, bins=500, range=(-0.2, 0.2))
# bin_centres = binedges[:-1] + np.diff(binedges) / 2
# coeff, var_matrix = curve_fit(
# gauss, bin_centres, histch, p0=p0)
# Get the fitted curve
# hist_fit = gauss(bin_centres, *coeff)
# print 'Fitted mean = ', coeff[1]
# print 'Fitted standard deviation = ', coeff[2]
# cube_info['gauss'][ch] = coeff[2]
# write noise data
cube_info.write(
"{0:s}/beam_{1:s}_{2:s}_info.csv".format(qa_line_beam_dir, beam, os.path.basename(cube_file).rstrip(".fits").split("_")[-1]), format="csv", overwrite=True)
themedian = np.nanmedian(cube_info['noise'])
# print("Median is " + str(round(themedian*1000, 2)) + " mJy")
# Create plot
# +++++++++++
ax = plt.subplot(111)
# plot data and fit
ax.plot(
cube_info['channel'], cube_info['noise'] * 1.e3, color='blue', linestyle='-')
# ax.plot(cube_info['channel'], cube_info['gauss'] *
# 1.e3, color='orange', linestyle='--')
# add axes labels
ax.set_xlabel('Channel number')
ax.set_ylabel('Noise (mJy/beam)')
ax.set_xlim([0, n_channels-1])
# add second axes with frequency
ax_x2 = ax.twiny()
# get frequency for first and last channel
# freq_ticks = np.array(
# [wcs.wcs_pix2world([[0, 0, xtick]], 1)[0, 2]] for xtick in ax.get_xticks())
freq_first_ch = wcs.wcs_pix2world([[0, 0, 0]], 1)[0, 2]
freq_last_ch = wcs.wcs_pix2world(
[[0, 0, n_channels-1]], 1)[0, 2]
ax_x2.set_xlim([freq_first_ch/1.e6, freq_last_ch/1.e6])
ax_x2.set_xlabel("Frequency [MHz]")
# add legend
ax.plot([0.73, 0.78], [0.95, 0.95], transform=ax.transAxes,
color='blue', linestyle='-')
ax.annotate('Data', xy=(0.8, 0.95), xycoords='axes fraction',
va='center', ha='left', color='blue')
# ax.plot([0.73, 0.78], [0.9, 0.9], transform=ax.transAxes,
# color='orange', linestyle='--')
ax.annotate('Median = %s mJy' % (str(round(themedian*1000, 2))), xy=(0.05, 0.9), xycoords='axes fraction',
va='center', ha='left', color='orange')
ax_x2.tick_params(axis='both', bottom='off', top='on',
left='on', right='on', which='major', direction='in')
ax.tick_params(axis='both', bottom='on', top='off', left='on', right='on',
which='major', direction='in')
plt.savefig(
'{0:s}/beam_{1:s}_{2:s}_noise.png'.format(qa_line_beam_dir, beam, os.path.basename(cube_file).rstrip(".fits").split("_")[-1]), dpi=300)
plt.close('all')
# close fits file
fits_hdulist
logger.info("Finished analyzing cube {0:s}".format(
cube_file))
else:
logger.warning(
"No HI cube found for beam {0:s}".format(beam))
logger.info("Finished analyzing beam {0:s} ({1:.1f}s)".format(
beam, time.time()-start_time_beam))
logger.info("Finished going through data directory {0:s} ({1:.1f}s)".format(
data_dir, time.time()-start_time_data))
else:
logger.warning("No beams found in {0:s}".format(data_dir))
| 14,836 | 42.25656 | 183 | py |
dataqa | dataqa-master/line/cube_stats_cont.py | """
This file contains functionality to analyze the quality of the data cube
generated by the pipeline for each beam.
"""
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
import numpy as np
import os
import glob
import socket
import time
import logging
from dataqa.scandata import get_default_imagepath
import matplotlib.pyplot as plt
#from scipy.optimize import curve_fit
logger = logging.getLogger(__name__)
#def gauss(x, *p):
# """Function to calculate a Gaussian value
# """
# A, mu, sigma = p
# return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def get_cube_stats_cont(qa_line_dir, data_base_dir_list):
"""Function to get a simple rms per channel
Parameter:
qa_line_dir : str
Directory where the line QA output is stored
data_base_dir_list : list
List of data directories on happili 1 to 4
"""
# go through all four data directories
# ++++++++++++++++++++++++++++++++++++
for data_dir in data_base_dir_list:
start_time_data = time.time()
logging.info("## Going through the beams in {0:s}".format(data_dir))
# getting a list of beams
data_dir_beam_list = glob.glob("{0:s}/[0-3][0-9]".format(data_dir))
# checking whether no beam was found
if len(data_dir_beam_list) != 0:
# sort beam list
data_dir_beam_list.sort()
# going through all the beams that were found
# +++++++++++++++++++++++++++++++++++++++++++
for data_dir_beam in data_dir_beam_list:
start_time_beam = time.time()
# getting the beam
beam = os.path.basename(data_dir_beam)
logging.info("Analyzing beam {0:s}".format(beam))
# setting the output directory for the beam
qa_line_beam_dir = "{0:s}/{1:s}".format(qa_line_dir, beam)
# this directory does not exist create it
if not os.path.exists(qa_line_beam_dir):
logging.info(
"Creating directory {0:s}".format(qa_line_beam_dir))
os.mkdir(qa_line_beam_dir)
# set the name of the of the cube file to analyze
cube_file = "{0:s}/line/cubes/HI_image_cube_contsub.fits".format(
data_dir_beam)
# make sure the cube file exists:
if os.path.exists(cube_file):
# open fits file or at least try
try:
fits_hdulist = fits.open(cube_file)
except Exception as e:
logging.error(e)
continue
# get wcs object
wcs = WCS(fits_hdulist[0].header)
# getting rid of stokes axis and check that it is a cube
if wcs.naxis == 4:
wcs = wcs.dropaxis(3)
cube = fits_hdulist[0].data[0]
elif wcs.naxis == 3:
cube = fits_hdulist[0].data
else:
logging.warning(
"Fits file {0:s} is not a cube".format(cube_file))
continue
# get the shape of the cube
cube_shape = np.shape(cube)
# get the number of channels
n_channels = cube_shape[0]
# creating an astropy table to store information about the cube
cube_info = Table([np.arange(n_channels), np.zeros(n_channels)], names=('channel', 'noise'))
# define range for histogram
#histrange = np.arange(-0.5, 0.5, 0.0001)
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
#p0 = [1., 0., 0.001]
# This determines the noise in each channel, using standard rms and also by
# fitting the width of a histogram of image values.
# These give same results for now, but gauss fit could be improved by
# e.g. fitting only to part of histogram
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for ch in range(n_channels):
# get rms
cube_info['noise'][ch] = np.std(cube[ch])
# determine histogram and fit gaussian
#values = cube[ch]
#histch, binedges = np.histogram(
# values, bins=500, range=(-0.2, 0.2))
#bin_centres = binedges[:-1] + np.diff(binedges) / 2
#coeff, var_matrix = curve_fit(
# gauss, bin_centres, histch, p0=p0)
# Get the fitted curve
#hist_fit = gauss(bin_centres, *coeff)
# print 'Fitted mean = ', coeff[1]
# print 'Fitted standard deviation = ', coeff[2]
#cube_info['gauss'][ch] = coeff[2]
# write noise data
cube_info.write(
"{0:s}/beam_{1:s}_cube_noise_info_contsub.csv".format(qa_line_beam_dir, beam), format="csv", overwrite=True)
themedian = np.nanmedian(cube_info['noise'])
print("Median is " + str(round(themedian*1000,2)) + " mJy")
# Create plot
# +++++++++++
ax = plt.subplot(111)
# plot data and fit
ax.plot(
cube_info['channel'], cube_info['noise'] * 1.e3, color='blue', linestyle='-')
#ax.plot(cube_info['channel'], cube_info['gauss'] *
# 1.e3, color='orange', linestyle='--')
# add axes labels
ax.set_xlabel('Channel number')
ax.set_ylabel('Noise (mJy/beam)')
ax.set_xlim([0, n_channels-1])
# add second axes with frequency
ax_x2 = ax.twiny()
# get frequency for first and last channel
# freq_ticks = np.array(
# [wcs.wcs_pix2world([[0, 0, xtick]], 1)[0, 2]] for xtick in ax.get_xticks())
freq_first_ch = wcs.wcs_pix2world([[0, 0, 0]], 1)[0, 2]
freq_last_ch = wcs.wcs_pix2world(
[[0, 0, n_channels-1]], 1)[0, 2]
ax_x2.set_xlim([freq_first_ch/1.e6, freq_last_ch/1.e6])
ax_x2.set_xlabel("Frequency [MHz]")
# add legend
ax.plot([0.73, 0.78], [0.95, 0.95], transform=ax.transAxes,
color='blue', linestyle='-')
ax.annotate('Data', xy=(0.8, 0.95), xycoords='axes fraction',
va='center', ha='left', color='blue')
#ax.plot([0.73, 0.78], [0.9, 0.9], transform=ax.transAxes,
# color='orange', linestyle='--')
ax.annotate('Median = %s mJy'%(str(round(themedian*1000,2))), xy=(0.05, 0.9), xycoords='axes fraction',
va='center', ha='left', color='orange')
ax_x2.tick_params(axis='both', bottom='off', top='on',
left='on', right='on', which='major', direction='in')
ax.tick_params(axis='both', bottom='on', top='off', left='on', right='on',
which='major', direction='in')
plt.savefig(
'{0:s}/beam_{1:s}_cube_noise_contsub.png'.format(qa_line_beam_dir, beam), dpi=300)
plt.close('all')
# close fits file
fits_hdulist
logging.info("Finished analyzing beam {0:s} ({1:.1f}s)".format(
beam, time.time()-start_time_beam))
else:
logging.warning(
"No HI cube found for beam {0:s}".format(beam))
logging.info("Finished going through data directory {0:s} ({1:.1f}s)".format(
data_dir, time.time()-start_time_data))
else:
logging.warning("No beams found in {0:s}".format(data_dir))
| 8,602 | 39.966667 | 132 | py |
dataqa | dataqa-master/line/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/report/html_report_content_inspection_plots.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_inspection_plots(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for inspection plots
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>Here you can go through the inspection plots.</p>
</div>\n
"""
qa_report_obs_path_page = os.path.join(qa_report_obs_path, page_type)
# Create html code for inspection plots
# =====================================
# assume that they are in the main directory if no obs infor are given
if obs_info is None:
# get images
image_list = glob.glob(os.path.join(qa_report_obs_path_page, "*.png"))
if len(image_list) != 0:
# sort the list
image_list.sort()
html_code += """
<div class="w3-container w3-margin-top">\n"""
img_counter = 0
for image in image_list:
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
<!--<div class="w3-container w3-center">
<h5>{1:s}</h5>
</div>--!>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 4 == 3 or img_counter == len(image_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No inspection plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plots were found for preflag
</p>
</div>\n"""
# otherwise use the information
else:
if obs_info['Pol_Calibrator'][0] == '':
src_name = [obs_info['Target'][0], obs_info['Flux_Calibrator'][0]]
else:
src_name = [obs_info['Target'][0], obs_info['Flux_Calibrator']
[0], obs_info['Pol_Calibrator'][0]]
# now go through the list of sources
for src in src_name:
qa_report_obs_path_page_src = os.path.join(
qa_report_obs_path_page, src)
button_src_name = "plot_{0:s}".format(src)
# for the target get the images that are in the directory
if src == obs_info['Target'][0]:
image_list = glob.glob(os.path.join(
qa_report_obs_path_page_src, "*.png"))
# check that there are actually images
if len(image_list) != 0:
# sort the list
image_list.sort()
# create container for gallery
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(button_src_name, src)
# to count the plots for the gallery
img_counter = 0
# go through the images
for image in image_list:
# use 4 plots per row
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
# no caption need for the image
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No image" style="width:100%">
</a>
</div>\n""".format(page_type, src, os.path.basename(image))
# to close a row after four images or when there are no more images
if img_counter % 4 == 3 or img_counter == len(image_list)-1:
html_code += """</div>\n"""
img_counter += 1
# close the gallery div
html_code += """</div>\n"""
# otherwise create a disabled button
else:
logger.warning("No inspection plots found for target {0}".format(
src))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(button_src_name, src)
# for the calibrator, the plots in beam directories
else:
# get a list of beams instead of getting all plots
# because they will be separated in polarization
beam_list = np.array(glob.glob(os.path.join(
qa_report_obs_path_page_src, "[0-3][0-9]")))
# check that there are images
if len(beam_list) != 0:
# an array of just the beams
beam_nr_list = np.array(
[os.path.basename(beam) for beam in beam_list])
# a reference list of beams
beam_nr_list_ref = np.array(
['{0:02d}'.format(beam) for beam in range(40)])
# the list of polarisation of the plots
pol_list = ['xx', 'yy']
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(button_src_name, src)
# now go through each of the polarisations
for pol in pol_list:
div_name_pol = "insplot_gallery_{0:s}_{1:s}".format(
src, pol)
# get a list of images
image_list = np.array(glob.glob(os.path.join(
qa_report_obs_path_page_src, "[0-3][0-9]/*{}.png".format(pol))))
# check that there are images
if len(image_list) != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(div_name_pol, pol)
img_counter = 0
# go through all reference beams
for beam_nr in beam_nr_list_ref:
# to properly make the gallery with 5 images in a row
if img_counter % 5 == 0:
html_code += """<div class="w3-row">\n"""
# get the image from the list if one exists for this beam
if beam_nr in beam_nr_list:
image = image_list[np.where(
beam_nr_list == beam_nr)[0]][0]
image_exists = True
else:
image_exists = False
# if there is an image plot it
if image_exists:
html_code += """
<div class="w3-col w3-border" style="width:20%">
<a href="{0:s}/{1:s}/{2:s}/{3:s}">
<img src="{0:s}/{1:s}/{2:s}/{3:s}" alt="No image", width="100%">
</a>
<div class="w3-container w3-center">
<h5>Beam {2:s}</h5>
</div>
</div>\n""".format(page_type, src, beam_nr, os.path.basename(image))
# if not put an empty one there
else:
html_code += """
<div class="w3-col" style="width:20%">
<img src="" alt="No image for beam {0:s}", width="100%">
</div>\n""".format(beam_nr)
# closing the row
if img_counter % 5 == 4 or img_counter == len(beam_nr_list_ref):
html_code += """</div>\n"""
img_counter += 1
# close div for this plot
html_code += """</div>\n"""
# add a disabled button
else:
logging.warning(
"No plots found for polarisation {}".format(pol))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(div_name_pol, pol)
html_code += """
</div>\n"""
# otherwise create a disabled button
else:
logger.warning("No inspection plots found for calibrator {0}".format(
src))
html_code += """
<div class = "w3-container" >
<button class = "w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick = "show_hide_plots('{0:s}')" >
{1:s}
</button>
</div>\n""".format(button_src_name, src)
return html_code
| 11,971 | 42.376812 | 186 | py |
dataqa | dataqa-master/report/pipeline_run_time.py | """
This module containes functionality to read the timing
measurement of the pipeline for the report
"""
import logging
import os
from astropy.table import Table, hstack, vstack
from apercal import parselog
from scandata import get_default_imagepath
import socket
import numpy as np
import glob
from datetime import timedelta
logger = logging.getLogger(__name__)
def get_pipeline_run_time(obs_id, trigger_mode=False):
"""Function to get the run time of apercal parts
Since parselog is broken and the apercal logfiles have changed
due to the parallelisation, this script just reads out the information
from the main logfile
"""
logger.info("## Reading apercal timing measurements")
# get the QA path
qa_dir = get_default_imagepath(obs_id)
host_name = socket.gethostname()
if trigger_mode:
data_dir_list = [qa_dir.replace("qa/", "")]
host_name_list = [host_name]
elif host_name == "happili-01" and not trigger_mode:
data_dir_list = [qa_dir.replace(
"qa/", ""), qa_dir.replace("qa/", "").replace("/data", "/data2"), qa_dir.replace("qa/", "").replace("/data", "/data3"), qa_dir.replace("qa/", "").replace("/data", "/data4")]
host_name_list = ["happili-01",
"happili-02", "happili-03", "happili-04"]
else:
data_dir_list = [qa_dir.replace("qa/", "")]
host_name_list = [host_name]
# Create an apercal QA directory
qa_apercal_dir = "{0:s}apercal_performance/".format(qa_dir)
if not os.path.exists(qa_apercal_dir):
logger.info("Creating directory {0:s}".format(qa_apercal_dir))
try:
os.mkdir(qa_apercal_dir)
except Exception as e:
logger.error(e)
original_useful_lines = ["Running prepare ... Done",
"Running split ... Done",
"Running preflag ... Done",
"Running crosscal ... Done",
"Running convert ... Done",
"Running selfcal and/or continuum and/or polarisation ... Done",
"Running line ... Done",
"Running transfer ... Done",
"Apercal finished after"]
# go through the list of data directories
for k in range(len(data_dir_list)):
# get the log files
apercal_log_list = glob.glob(
"{0:s}apercal.log".format(data_dir_list[k]))
if len(apercal_log_list) != 0:
# sort log list
apercal_log_list.sort()
# go through the log files
for log_counter in range(len(apercal_log_list)):
logger.info(
"Reading out timing measurement for {0:s}".format(apercal_log_list[log_counter]))
# to store the results from reading the information
results = []
lines_found = []
# make a copy of useful_lines to use for next log file
useful_lines = list(original_useful_lines)
# read logfile
with open(apercal_log_list[log_counter], "r") as logfile:
# go through the lines
for logline in logfile:
# abort when we are out of useful lines
if len(useful_lines) == 0:
break
# for each line check that a useful line is in there
for pos, line in enumerate(useful_lines):
# if useful line is found, get value and remove it from list
if line in logline:
# get the measured time
if line == original_useful_lines[-1]:
results.append(logline.split(line)[1])
else:
time_in_s = int(logline.rstrip().lstrip().split(line)[
1].split("(")[1].split(")")[0].split("s")[0])
time_str = str(
timedelta(seconds=time_in_s))
results.append(time_str)
# the line that was found
if line == original_useful_lines[5]:
lines_found.append(
line.replace(" and/or ", "+"))
else:
lines_found.append(line)
# remove the useful line that was found
useful_lines.remove(line)
# move to next logline
break
# take the useful lines found and get only the module
step_info = np.array([step.split(" ")[1]
for step in lines_found])
# number of entries in results list
n_entries = len(results)
# create a column with file name
file_name_col = np.array(
[os.path.basename(apercal_log_list[log_counter]) for m in range(n_entries)])
# create table with the above columns
timing_table = Table([file_name_col, step_info, results], names=(
'file_name', 'step', 'time'))
if log_counter == 0:
complete_table = timing_table.copy()
else:
complete_table = vstack([complete_table, timing_table])
table_output_name = os.path.join(
qa_apercal_dir, "apercal_log_timeinfo_{0:s}.csv".format(host_name_list[k]))
try:
complete_table.write(
table_output_name, format="csv", overwrite=True)
except Exception as e:
logger.error(e)
else:
logger.warning(
"Could not find any apercal log file in {0:s}".format(data_dir_list[k]))
# the following is old code for using parselog
# go through the list of data directories
# for k in range(len(data_dir_list)):
# # get the log files
# apercal_log_list = glob.glob(
# "{0:s}apercal*.log".format(data_dir_list[k]))
# if len(apercal_log_list) != 0:
# # sort log list
# apercal_log_list.sort()
# # go through the log files
# for log_counter in range(len(apercal_log_list)):
# logger.info(
# "Reading out timing measurement for {0:s}".format(apercal_log_list[log_counter]))
# # read timing information
# timinginfo = parselog(apercal_log_list[log_counter])
# # number of entries
# n_entries_in_timinginfo = len(timinginfo)
# # create a column with file name
# file_name_col = np.array(
# [os.path.basename(apercal_log_list[log_counter]) for m in range(n_entries_in_timinginfo)])
# # create a column with beam name
# logfile_name = os.path.basename(
# apercal_log_list[log_counter]).split(".log")[0]
# if logfile_name == "apercal":
# beam_name_col = np.array([
# "--" for m in range(n_entries_in_timinginfo)])
# else:
# beam_name_col = np.array([
# logfile_name.split("apercal")[-1] for m in range(n_entries_in_timinginfo)])
# # create table with the above columns
# beam_file_table = Table([beam_name_col, file_name_col], names=(
# 'beam', 'file_name'))
# # make it an astropy Table
# timinginfo_table = Table(
# rows=timinginfo, names=('pipeline_step', 'run_time'))
# if log_counter == 0:
# complete_table = hstack(
# [beam_file_table, timinginfo_table])
# else:
# tmp_table = hstack([beam_file_table, timinginfo_table])
# complete_table = vstack([complete_table, tmp_table])
# table_output_name = "{0:s}apercal_log_timeinfo_{1:s}.csv".format(
# qa_apercal_dir, host_name_list[k])
# try:
# complete_table.write(
# table_output_name, format="csv", overwrite=True)
# except Exception as e:
# logger.error(e)
# else:
# logger.warning(
# "Could not find any apercal log file in {0:s}".format(data_dir_list[k]))
logger.info("## Reading apercal timing measurements. Done")
| 9,018 | 39.084444 | 185 | py |
dataqa | dataqa-master/report/osa_functions.py | # test script for jupyter notebook OSA report
import numpy as np
import os
import shutil
from astropy.table import Table, join
from ipywidgets import widgets, Layout
from IPython.display import display
import glob
import json
from collections import OrderedDict
def run(obs_id=None, single_node=False):
layout_select = Layout(width='30%')
layout_area = Layout(width='60%', height='100px')
style = {'description_width': 'initial'}
# get the table file (there should only be one)
# get the current director
cwd = os.getcwd()
# get the obs id
if obs_id is None:
obs_id = cwd.split("/")[-3]
# hopefully this file is available
#obs_file = "/data/apertif/{0}/qa/{0}_obs.ecsv".format(obs_id)
obs_file = os.path.join(os.path.dirname(
cwd), "{0}_obs.ecsv".format(obs_id))
# if the report has already been created, then it should als be there
osa_report_file = "{}_OSA_report.json".format(obs_id)
prepare_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> General Information </h2>"
)
display(prepare_label)
obs_text = widgets.Text(value=obs_id,
placeholder='',
description='Obs ID:',
disabled=False,
layout=layout_select)
display(obs_text)
# in case the OSA report already exists, get the values for all fields
if os.path.exists(osa_report_file):
with open(osa_report_file, "r") as f:
report_data = f.read()
report_json = json.loads(report_data)
osa_text_value = report_json['OSA']
target_text_value = report_json['Observation']['Target']
flux_cal_text_value = report_json['Observation']['Flux_Calibrator']
flux_cal_obs_id_list = report_json['Observation']['Flux_Calibrator_Obs_IDs']
pol_cal_text_value = report_json['Observation']['Pol_Calibrator']
pol_cal_obs_id_list = report_json['Observation']['Pol_Calibrator_Obs_IDs']
prepare_status_value = report_json['Apercal']['Prepare']['Status']
# account for change in status values
if prepare_status_value == "Critical":
prepare_status_value = "Failed"
prepare_notes_value = report_json['Apercal']['Prepare']['Notes']
preflag_status_value = report_json['Apercal']['Preflag']['Status']
preflag_notes_value = report_json['Apercal']['Preflag']['Notes']
if preflag_status_value == "Critical":
preflag_status_value = "Failed"
crosscal_status_value = report_json['Apercal']['Crosscal']['Status']
crosscal_notes_value = report_json['Apercal']['Crosscal']['Notes']
if crosscal_status_value == "Critical":
crosscal_status_value = "Failed"
selfcal_status_value = report_json['Apercal']['Selfcal']['Status']
selfcal_notes_value = report_json['Apercal']['Selfcal']['Notes']
if selfcal_status_value == "Critical":
selfcal_status_value = "Failed"
continuum_status_value = report_json['Apercal']['Continuum']['Status']
continuum_notes_value = report_json['Apercal']['Continuum']['Notes']
if continuum_status_value == "Critical":
continuum_status_value = "Failed"
polarisation_status_value = report_json['Apercal']['Polarisation']['Status']
polarisation_notes_value = report_json['Apercal']['Polarisation']['Notes']
if polarisation_status_value == "Critical":
polarisation_status_value = "Failed"
line_status_value = report_json['Apercal']['Line']['Status']
line_notes_value = report_json['Apercal']['Line']['Notes']
if line_status_value == "Critical":
line_status_value = "Failed"
summary_status_value = report_json['Summary']['Status']
summary_notes_value = report_json['Summary']['Notes']
if summary_status_value == "Critical":
summary_status_value = "Failed"
if report_json['Summary'].has_key("Pipeline_Status"):
summary_status_value_pipeline = report_json['Summary']['Pipeline_Status']
summary_notes_value_pipeline = report_json['Summary']['Pipeline_Notes']
else:
summary_status_value_pipeline = 'Unchecked'
summary_notes_value_pipeline = '-'
# if the report does not yet exists try the observation table
elif os.path.exists(obs_file):
obs_table = Table.read(obs_file, format="ascii.ecsv")
# get the obs IDs from the other obs
# unless they should be on a single node
if not single_node:
if "/data" in obs_file:
obs_table_2 = Table.read(obs_file.replace(
"/data/", "/data2/"), format="ascii.ecsv")
obs_table_3 = Table.read(obs_file.replace(
"/data/", "/data3/"), format="ascii.ecsv")
obs_table_4 = Table.read(obs_file.replace(
"/data/", "/data4/"), format="ascii.ecsv")
else:
obs_table_2 = Table.read(obs_file.replace(
"/tank/", "/tank2/"), format="ascii.ecsv")
obs_table_3 = Table.read(obs_file.replace(
"/tank/", "/tank3/"), format="ascii.ecsv")
obs_table_4 = Table.read(obs_file.replace(
"/tank/", "/tank4/"), format="ascii.ecsv")
osa_text_value = obs_table['OSA'][0]
target_text_value = obs_table['Target'][0]
flux_cal_text_value = obs_table['Flux_Calibrator'][0]
# in case information is on a single node
if single_node:
flux_cal_obs_id_list = obs_table['Flux_Calibrator_Obs_IDs'][0]
else:
flux_cal_obs_id_list = obs_table['Flux_Calibrator_Obs_IDs'][0] + \
", " + obs_table_2['Flux_Calibrator_Obs_IDs'][0] + ", " + \
obs_table_3['Flux_Calibrator_Obs_IDs'][0] + ", " + \
obs_table_4['Flux_Calibrator_Obs_IDs'][0]
# in case the polarisation calibrator does not exists, the entries will be masked
if np.ma.is_masked(obs_table['Pol_Calibrator'][0]):
pol_name = "Not available"
pol_name_ids = "Not available"
else:
pol_name = obs_table['Pol_Calibrator'][0]
# in case information is on a single node
if single_node:
pol_name_ids = obs_table['Pol_Calibrator_Obs_IDs'][0]
else:
pol_name_ids = obs_table['Pol_Calibrator_Obs_IDs'][0] + \
", " + obs_table_2['Pol_Calibrator_Obs_IDs'][0] + ", " + \
obs_table_3['Pol_Calibrator_Obs_IDs'][0] + ", " + \
obs_table_4['Pol_Calibrator_Obs_IDs'][0]
pol_cal_text_value = pol_name
pol_cal_obs_id_list = pol_name_ids
prepare_status_value = 'Unchecked'
prepare_notes_value = '-'
preflag_status_value = 'Unchecked'
preflag_notes_value = '-'
crosscal_status_value = 'Unchecked'
crosscal_notes_value = '-'
selfcal_status_value = 'Unchecked'
selfcal_notes_value = '-'
continuum_status_value = 'Unchecked'
continuum_notes_value = '-'
polarisation_status_value = 'Unchecked'
polarisation_notes_value = '-'
line_status_value = 'Unchecked'
line_notes_value = '-'
summary_status_value = 'Unchecked'
summary_notes_value = '-'
summary_status_value_pipeline = 'Unchecked'
summary_notes_value_pipeline = '-'
# otherwise leve most things empty
else:
print("Warning: Could not find observation information. Please provide your name")
osa_text_value = ''
target_text_value = ''
flux_cal_text_value = ''
flux_cal_obs_id_list = ''
pol_cal_text_value = ''
pol_cal_obs_id_list = ''
prepare_status_value = 'Unchecked'
prepare_notes_value = '-'
preflag_status_value = 'Unchecked'
preflag_notes_value = '-'
crosscal_status_value = 'Unchecked'
crosscal_notes_value = '-'
selfcal_status_value = 'Unchecked'
selfcal_notes_value = '-'
continuum_status_value = 'Unchecked'
continuum_notes_value = '-'
polarisation_status_value = 'Unchecked'
polarisation_notes_value = '-'
line_status_value = 'Unchecked'
line_notes_value = '-'
summary_status_value = 'Unchecked'
summary_notes_value = '-'
summary_status_value_pipeline = 'Unchecked'
summary_notes_value_pipeline = '-'
# dropdown_options = ['unchecked', 'unknown',
# 'failed', 'bad', 'acceptable', 'good']
dropdown_options = ['Unchecked', 'Unknown',
'Failed', 'Bad', 'Acceptable', 'Good', 'Excellent']
status_legend_prepare = "<p style='font-weight:bold'>Status legend:</p> Excellent: 100% (0 beams missing); Good: 95% (1-2 beams missing); Acceptable: 85% (3-6 beams missing); Bad: 75% (7-10 beams missing); Critical: <75% (More than 10 beams missing); Unknown (No information available)</p>"
status_legend_preflag = "<p style='font-weight:bold'>Status legend:</p> Excellent: 100% (0 beams failed); Good: 95% (1-2 beams failed); Acceptable: 85% (3-6 beams failed); Bad: 75% (7-10 beams failed); Critical: <75% (More than 10 beams failed); Unknown (No information available)</p>"
status_legend_crosscal = status_legend_preflag
status_legend_selfcal = "<p style='font-weight:bold'>Status legend:</p> Excellent: 100% (No issues or failed beams); Good: 95% (1-2 beams show issues or failed); Acceptable: 85% (3-6 beams show issues or failed); Bad: 75% (7-10 beams show issues); Critical: <75% (More than 10 beams show issues); Unknown (No information available)</p>"
status_legend_continuum = status_legend_selfcal
status_legend_polarisation = status_legend_selfcal
status_legend_line = status_legend_selfcal
status_legend_summary = status_legend_selfcal
# General information
# ===================
osa_text = widgets.Text(value=osa_text_value,
placeholder='',
description='OSA:',
disabled=False,
layout=layout_select)
display(osa_text)
target_text = widgets.Text(value=target_text_value,
placeholder='',
description='Target:',
disabled=False,
layout=layout_select)
display(target_text)
flux_cal_text = widgets.Text(value=flux_cal_text_value,
placeholder='',
description='Flux Cal:',
disabled=False,
layout=layout_select)
display(flux_cal_text)
flux_cal_id_label = widgets.HTML(
value="Flux calibrator Obs IDs: {}".format(flux_cal_obs_id_list)
)
display(flux_cal_id_label)
pol_cal_text = widgets.Text(value=pol_cal_text_value,
placeholder='',
description='Pol Cal:',
disabled=False,
layout=layout_select)
display(pol_cal_text)
pol_cal_id_label = widgets.HTML(
value="Pol calibrator Obs IDs: {}".format(pol_cal_obs_id_list)
)
display(pol_cal_id_label)
# Prepare
# =======
prepare_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Prepare (Inspection Plots) </h2>"
)
display(prepare_label)
prepare_label_info = widgets.HTML(
value=status_legend_prepare)
# value="Status legend: Excellent: 100% (0 beams missing)")
display(prepare_label_info)
prepare_menu = widgets.Dropdown(options=dropdown_options,
value=prepare_status_value,
description='Status:',
disabled=False,
layout=layout_select)
display(prepare_menu)
prepare_notes = widgets.Textarea(value=prepare_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(prepare_notes)
# Preflag
# =======
preflag_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Preflag </h2>")
display(preflag_label)
preflag_label_info = widgets.HTML(
value=status_legend_preflag)
# value="Status legend: Excellent: 100% (0 beams missing)")
display(preflag_label_info)
preflag_menu = widgets.Dropdown(options=dropdown_options,
value=preflag_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(preflag_menu)
preflag_notes = widgets.Textarea(value=preflag_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(preflag_notes)
# Crosscal
# ========
crosscal_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Crosscal </h2>")
display(crosscal_label)
crosscal_label_info = widgets.HTML(
value=status_legend_crosscal)
display(crosscal_label_info)
crosscal_menu = widgets.Dropdown(options=dropdown_options,
value=crosscal_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(crosscal_menu)
crosscal_notes = widgets.Textarea(value=crosscal_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(crosscal_notes)
# Selfcal
# ========
selfcal_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Selfcal </h2>")
display(selfcal_label)
selfcal_label_info = widgets.HTML(
value=status_legend_selfcal)
display(selfcal_label_info)
selfcal_menu = widgets.Dropdown(options=dropdown_options,
value=selfcal_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(selfcal_menu)
selfcal_notes = widgets.Textarea(value=selfcal_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(selfcal_notes)
# Continuum
# =========
continuum_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Continuum </h2>")
display(continuum_label)
continuum_label_info = widgets.HTML(
value=status_legend_continuum)
display(continuum_label_info)
continuum_menu = widgets.Dropdown(options=dropdown_options,
value=continuum_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(continuum_menu)
continuum_notes = widgets.Textarea(value=continuum_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(continuum_notes)
# Polarisation
# ============
polarisation_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Polarisation </h2>")
display(polarisation_label)
polarisation_label_info = widgets.HTML(
value=status_legend_polarisation)
display(polarisation_label_info)
polarisation_menu = widgets.Dropdown(options=dropdown_options,
value=polarisation_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(polarisation_menu)
polarisation_notes = widgets.Textarea(value=polarisation_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(polarisation_notes)
# Line
# ====
line_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Line </h2>")
display(line_label)
line_label_info = widgets.HTML(
value=status_legend_line)
display(line_label_info)
line_menu = widgets.Dropdown(options=dropdown_options,
value=line_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(line_menu)
line_notes = widgets.Textarea(value=line_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(line_notes)
# Summary
# =======
summary_label = widgets.HTML(
value="<h2 style='text-decoration: underline'> Summary </h2>")
display(summary_label)
summary_label_info = widgets.HTML(
value=status_legend_summary)
display(summary_label_info)
summary_label_obs = widgets.HTML(
value="<h4> Observation </h4>")
display(summary_label_obs)
summary_menu = widgets.Dropdown(options=dropdown_options,
value=summary_status_value,
description='Select:',
disabled=False,
layout=layout_select)
display(summary_menu)
# notes_label = widgets.Label("#### Notes")
# display(notes_label)
summary_notes = widgets.Textarea(value=summary_notes_value,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(summary_notes)
summary_label_pipeline = widgets.HTML(
value="<h4> Pipeline </h4>")
display(summary_label_pipeline)
summary_menu_pipeline = widgets.Dropdown(options=dropdown_options,
value=summary_status_value_pipeline,
description='Select:',
disabled=False,
layout=layout_select)
display(summary_menu_pipeline)
# notes_label = widgets.Label("#### Notes")
# display(notes_label)
summary_notes_pipeline = widgets.Textarea(value=summary_notes_value_pipeline,
placeholder='Nothing to add',
description='Notes:',
disabled=False,
layout=layout_area)
display(summary_notes_pipeline)
btn = widgets.Button(description='Save', button_style='primary')
display(btn)
def show_warning_label(module, request_info=False):
if request_info:
warning_label = widgets.HTML(
value="<p style='font-size:large; color:red'> Warning: You have selected a status other then <i> Excellent </i> for <i>{0}</i>. Please provide the beam numbers and a short statement </p>".format(module))
display(warning_label)
else:
warning_label = widgets.HTML(
value="<p style='font-size:large; color:red'> Warning: <i>{0}</i> still unchecked. Please choose status for <i>{0}</i> </p>".format(module))
display(warning_label)
def save_info(b):
save_try_label = widgets.HTML(
value="<p style='font-size:large; color:black'> Trying to to save the report</p>")
display(save_try_label)
report_complete = True
# Check that a name has been entered for the OSA (depracted)
# and that each of the pipeline steps have been checked
if obs_text.value == '':
warning_label = widgets.HTML(
value="<p style='font-size:large; color:red'> Warning: Obs ID is missing. Please enter your Obs ID </p>")
display(warning_label)
report_complete = False
# return -1
if osa_text.value == '':
warning_label = widgets.HTML(
value="<p style='font-size:large; color:red'> Warning: OSA name is missing. Please enter your name </p>")
display(warning_label)
report_complete = False
# return -1
if prepare_menu.value == 'Unchecked':
show_warning_label("Prepare")
report_complete = False
# return -1
elif prepare_menu.value != 'Excellent' and prepare_notes.value == "-":
show_warning_label("Prepare", request_info=True)
report_complete = False
# return -1
if preflag_menu.value == 'Unchecked':
show_warning_label("Preflag")
report_complete = False
# return -1
elif preflag_menu.value != 'Excellent' and preflag_notes.value == "-":
show_warning_label("Preflag", request_info=True)
report_complete = False
# return -1
if crosscal_menu.value == 'Unchecked':
show_warning_label("Crosscal")
report_complete = False
# return -1
elif crosscal_menu.value != 'Excellent' and crosscal_notes.value == "-":
show_warning_label("Crosscal", request_info=True)
report_complete = False
# return -1
if selfcal_menu.value == 'Unchecked':
show_warning_label("Selfcal")
report_complete = False
# return -1
elif selfcal_menu.value != 'Excellent' and selfcal_notes.value == "-":
show_warning_label("Selfcal", request_info=True)
report_complete = False
# return -1
if continuum_menu.value == 'Unchecked':
show_warning_label("Continuum")
report_complete = False
# return -1
elif continuum_menu.value != 'Excellent' and continuum_notes.value == "-":
show_warning_label("Continuum", request_info=True)
report_complete = False
# return -1
if polarisation_menu.value == 'Unchecked':
show_warning_label("Polarisation")
report_complete = False
# return -1
elif polarisation_menu.value != 'Excellent' and polarisation_notes.value == "-":
show_warning_label("Polarisation", request_info=True)
report_complete = False
# return -1
if line_menu.value == 'Unchecked':
show_warning_label("Line")
report_complete = False
# return -1
elif line_menu.value != 'Excellent' and line_notes.value == "-":
show_warning_label("Line", request_info=True)
report_complete = False
# return -1
if summary_menu.value == 'Unchecked':
show_warning_label("Summary")
report_complete = False
# return -1
elif summary_menu.value != 'Excellent' and summary_notes.value == "-":
show_warning_label("Summary", request_info=True)
report_complete = False
# return -1
if summary_menu_pipeline.value == 'Unchecked':
show_warning_label("Summary")
report_complete = False
# return -1
elif summary_menu_pipeline.value != 'Excellent' and summary_notes.value == "-":
show_warning_label("Summary", request_info=True)
report_complete = False
# return -1
# save as json
# OrderedDict to preserve order when dumping to json
json_dict = OrderedDict()
json_dict['Observation'] = OrderedDict()
json_dict['Observation']['Obs_ID'] = obs_text.value
json_dict['Observation']['Target'] = target_text.value
json_dict['Observation']['Flux_Calibrator'] = flux_cal_text.value
json_dict['Observation']['Flux_Calibrator_Obs_IDs'] = flux_cal_obs_id_list
json_dict['Observation']['Pol_Calibrator'] = pol_cal_text.value
json_dict['Observation']['Pol_Calibrator_Obs_IDs'] = pol_cal_obs_id_list
json_dict['OSA'] = osa_text.value
json_dict['Apercal'] = OrderedDict()
json_dict['Apercal']['Prepare'] = OrderedDict()
json_dict['Apercal']['Prepare']['Status'] = prepare_menu.value
json_dict['Apercal']['Prepare']['Notes'] = prepare_notes.value
json_dict['Apercal']['Preflag'] = OrderedDict()
json_dict['Apercal']['Preflag']['Status'] = preflag_menu.value
json_dict['Apercal']['Preflag']['Notes'] = preflag_notes.value
json_dict['Apercal']['Crosscal'] = OrderedDict()
json_dict['Apercal']['Crosscal']['Status'] = crosscal_menu.value
json_dict['Apercal']['Crosscal']['Notes'] = crosscal_notes.value
json_dict['Apercal']['Selfcal'] = OrderedDict()
json_dict['Apercal']['Selfcal']['Status'] = selfcal_menu.value
json_dict['Apercal']['Selfcal']['Notes'] = selfcal_notes.value
json_dict['Apercal']['Continuum'] = OrderedDict()
json_dict['Apercal']['Continuum']['Status'] = continuum_menu.value
json_dict['Apercal']['Continuum']['Notes'] = continuum_notes.value
json_dict['Apercal']['Polarisation'] = OrderedDict()
json_dict['Apercal']['Polarisation']['Status'] = polarisation_menu.value
json_dict['Apercal']['Polarisation']['Notes'] = polarisation_notes.value
json_dict['Apercal']['Line'] = OrderedDict()
json_dict['Apercal']['Line']['Status'] = line_menu.value
json_dict['Apercal']['Line']['Notes'] = line_notes.value
json_dict['Summary'] = OrderedDict()
json_dict['Summary']['Status'] = summary_menu.value
json_dict['Summary']['Notes'] = summary_notes.value
json_dict['Summary']['Pipeline_Status'] = summary_menu_pipeline.value
json_dict['Summary']['Pipeline_Notes'] = summary_notes_pipeline.value
json_file_name = "{0}_OSA_report.json".format(obs_id)
try:
with open(json_file_name, "w") as f:
json.dump(json_dict, f)
except:
warning_save_json_label = widgets.HTML(
value="<p style='font-size:large; color:red'> ERROR: Could not save report. Please ask for help.</p>")
display(warning_save_json_label)
else:
if report_complete:
save_json_label = widgets.HTML(
value="<p style='font-size:large; color:green'> Saved OSA report {0:s}. Thank You.</p>".format(json_file_name))
display(save_json_label)
else:
save_json_label = widgets.HTML(
value="<p style='font-size:large; color:orange'> Saved incomplete OSA report {0:s}. Please remember to finish the report. Thank you.</p>".format(json_file_name))
display(save_json_label)
# copy the file to the collection directory
json_copy = "/data/apertif/qa/OSA_reports/{0}".format(json_file_name)
try:
shutil.copy(json_file_name, json_copy)
except:
warning_copy_json_label = widgets.HTML(
value="<p style='font-size:large; color:red'> ERROR: Could not create back up of report. Please ask for help</p>")
display(warning_copy_json_label)
else:
if report_complete:
copy_json_label = widgets.HTML(
value="<p style='font-size:large; color:green'> Created backup of OSA report.</p>")
display(copy_json_label)
else:
copy_json_label = widgets.HTML(
value="<p style='font-size:large; color:orange'> Created backup of incomplete OSA report.</p>")
display(copy_json_label)
btn.on_click(save_info)
| 29,468 | 43.181409 | 340 | py |
dataqa | dataqa-master/report/html_report_content_continuum.py | import os
import sys
from astropy.table import Table, join
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_continuum(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for continuum
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_info (list(str)): Basic information of observation
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
Here you can inspect for each beam the continuum image, PyBDSF diagnostic plots and the validation tool. The PyBDSF catalog is not accessible from this page, but can be found in the QA directory as a csv table.
Of course, all of this only exists for beams with a continuum image created by the pipeline.<br>
This page will only have content after the continuum QA step has been performed.
</p>
</div>\n
"""
qa_report_obs_page_path = os.path.join(qa_report_obs_path, page_type)
# Create html code for summary table
# ==================================
if obs_info is not None:
obs_id = obs_info['Obs_ID'][0]
source_list = np.array(
[obs_info['Target'][0], obs_info['Flux_Calibrator'][0], obs_info['Pol_Calibrator'][0]])
else:
obs_id = os.path.basename(qa_report_obs_path)
source_list = None
# set the file name where information from the param*npy is stored
continuum_summary_file = os.path.join(
qa_report_obs_page_path, "{0}_{1}_summary.csv".format(obs_id, page_type))
# set the file name where information of the image properties is stored
continuum_image_properties_file = os.path.join(
qa_report_obs_page_path, "{0}_combined_continuum_image_properties.csv".format(obs_id))
if os.path.exists(continuum_summary_file):
summary_table = Table.read(continuum_summary_file, format="ascii.csv")
else:
summary_table = None
if os.path.exists(continuum_image_properties_file):
image_properties_table = Table.read(
continuum_image_properties_file, format="ascii.csv")
if summary_table is None:
summary_table = image_properties_table
else:
summary_table = join(
summary_table, image_properties_table, keys='beam')
# if there is a summary table
# create tables for each source
if summary_table is not None:
# get the keys for the table
table_keys = summary_table.keys()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-1')">
Continuum summary table
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="gallery-1">\n"""
beam_list = summary_table['beam']
# beginning of table
html_code += """
<div class="w3-container w3-center">
<div class="w3-responsive">
<table class="w3-table-all">\n"""
# write the header
html_code += """
<tr class="w3-amber">\n"""
# fill header keys
for key in table_keys:
# make sure that the beam is always there
html_code += """<th>{}</th>\n""".format(
key.replace("targetbeams_", ""))
# close table header
html_code += """</tr>\n"""
for k in range(len(beam_list)):
# open row
html_code += """<tr>\n"""
# now go through keys and fill table
for key in table_keys:
# get the element from table
element = summary_table[key][k]
# check whether it is masked
if np.ma.is_masked(element):
html_code += """<td>-</td>\n"""
else:
html_code += """<td>{0}</td>\n""".format(element)
# close row
html_code += """</tr>\n"""
# end of table
html_code += """
</table>
</div>
</div>\n"""
html_code += """</div>\n"""
else:
logger.warning("No continuum table found")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-1')">
Continuum summary table
</button>
</div>\n"""
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
beam_list = np.array(beam_list)
# get a list of beam numbers
beam_nr_list = np.array([os.path.basename(beam) for beam in beam_list])
# get a list of reference beams
beam_nr_list_ref = np.array(
["{0:02d}".format(beam) for beam in range(40)])
# Create html code for continuum images gallery
# =============================================
# get a list of all images to make sure that at least one exists
image_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]/image_mf_[0-9][0-9].png".format(qa_report_obs_path, page_type))
if len(image_list) != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_cont", "Continuum images")
img_counter = 0
for beam_nr in beam_nr_list_ref:
# to properly make the gallery
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
if beam_nr in beam_nr_list:
beam = beam_list[np.where(beam_nr_list == beam_nr)[0]][0]
# get the phase plots
image_list = glob.glob(os.path.join(
beam, "image_mf_[0-9][0-9].png"))
if len(image_list) != 0:
image = image_list[0]
image_exists = True
else:
image_exists = False
else:
image_exists = False
# if no image exists leave it empty
if image_exists:
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No image" style="width:100%">
</a>
<div class="w3-container w3-center">
<h5>Beam {1:s}</h5>
</div>
</div>\n""".format(page_type, os.path.basename(beam), os.path.basename(image))
else:
html_code += """
<div class="w3-quarter">
<img src="" alt="No image for beam {0:s}", width="100%">
</div>\n""".format(beam_nr)
if img_counter % 4 == 3 or img_counter == len(beam_nr_list_ref):
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_cont", "Continuum images")
# Create html code for beam plots
# ===============================
for beam_nr in beam_nr_list_ref:
button_html_name = "beam{0:s}".format(beam_nr)
div_name = "continuum_gallery{0:s}".format(beam_nr)
# get the diagnostic plots
if beam_nr in beam_nr_list:
image_list = glob.glob(
"{0:s}/*png".format(beam_list[np.where(beam_nr_list == beam_nr)[0]][0]))
else:
image_list = []
n_images = len(image_list)
if n_images != 0:
image_list.sort()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" class="button_continuum" onclick="show_hide_plots('{2:s}')">
PyBDSF Diagnostic plots
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{2:s}">\n""".format(button_html_name, beam_nr, div_name)
# go throught the different types of plots
# they require a different layout because the plot sizes vary
for m in range(n_images):
if m % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, beam_nr, os.path.basename(image_list[m]))
if m % 3 == 2 or m == n_images-1:
html_code += """</div>\n"""
html_code += """</div>\n"""
# add the validation tool
frame_name = glob.glob(
"{0:s}/*continuum_validation_pybdsf_snr5.0_int".format(beam_list[np.where(beam_nr_list == beam_nr)[0]][0]))
if len(frame_name) != 0 and len(frame_name) == 1:
frame_name = frame_name[0]
if os.path.isdir(frame_name):
button_name = "validation_tool{0:s}".format(beam_nr)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" class="button_continuum" onclick="show_hide_plots('{0:s}')">
Validation Tool
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(button_name)
html_code += """
<div class="w3-container w3-large">
<a href="{0:s}/{1:s}/{2:s}/index.html">Click here to open the validation tool</a> if it is not shown below
</div>
<div class="w3-container">
<iframe class="w3-container" style="width:100%; height:1200px" src="{0:s}/{1:s}/{2:s}/index.html"></iframe>
</div>
</div>\n""".format(page_type, beam_nr, os.path.basename(frame_name))
else:
logger.warning("No continuum validation tool found for beam {0:s}".format(
beam_nr))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" class="button_continuum" onclick="show_hide_plots('{0:s}')">
Validation Tool
</button>
</div>\n""".format(button_html_name)
else:
logger.warning("No continuum validation tool found for beam {0:s}".format(
beam_nr))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" class="button_continuum" onclick="show_hide_plots('{0:s}')">
Validation Tool
</button>
</div>\n""".format(button_html_name)
else:
logger.warning("No continuum plots and validation found for beam {0:s}".format(
beam_nr))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" class="disabled" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>\n""".format(button_html_name, beam_nr)
html_code += """</div>\n"""
# html_code += """
# <div class="gallery" name="{0:s}">
# <p class="warning">
# No plots and validation tool were found for {1:s}
# </p>
# </div>\n""".format(button_html_name, page_type)
else:
logger.warning("No beams for continuum QA found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No beams were found for continuum QA.
</p>
</div\n"""
return html_code
| 14,933 | 40.140496 | 226 | py |
dataqa | dataqa-master/report/html_report_content_mosaic.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_mosaic(html_code, qa_report_obs_path, page_type):
"""Function to create the html page for mosaic
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
Here you can inspect the continuum image, PyBDSF diagnostic plots and the validation tool for the mosaic of all available beam images.
The PyBDSF catalog is not accessible from this page, but can be found in the QA directory as a csv table.<br>
This page will only have content after the mosaic was created and the mosaic QA step has been performed.
</p>
</div>\n
"""
# get the diagnostic plots
image_list = glob.glob(
"{0:s}/{1:s}/*png".format(qa_report_obs_path, page_type))
n_images = len(image_list)
if n_images != 0:
div_name = "gallery"
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
PyBDSF Diagnostic plots
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="gallery">\n""".format(div_name)
# go throught the different types of plots
# they require a different layout because the plot sizes vary
for k in range(n_images):
if k % 2 == 0:
html_code += """
<div class="w3-row">\n"""
html_code += """
<div class="w3-half">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, os.path.basename(image_list[k]))
if k % 2 == 1 or k == n_images-1:
html_code += """
</div>\n"""
html_code += """
</div>\n"""
# add the validation tool
frame_name = glob.glob(
"{0:s}/mosaic/*continuum_validation_pybdsf_snr5.0_int".format(qa_report_obs_path))
if len(frame_name) != 0 and len(frame_name) == 1:
frame_name = frame_name[0]
if os.path.isdir(frame_name):
button_name = "validation_tool"
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Validation Tool
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(button_name)
html_code += """
<div class="w3-container w3-large">
<a href="{0:s}/{1:s}/index.html">Click here to open the validation tool</a> if it is not shown below
</div>
<div class="w3-container">
<iframe class="w3-container" style="width:100%; height:1200px" src="{0:s}/{1:s}/index.html"></iframe>
</div>
</div>\n""".format(page_type, os.path.basename(frame_name))
else:
logger.warning("No mosaic plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No validation tool found for mosaic QA.
</p>
</div>\n"""
else:
logger.warning("No mosaic plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No validation tool found for mosaic QA.
</p>
</div>\n"""
else:
logger.warning("No mosaic plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plot and validation tool found for mosaic QA.
</p>
</div>\n"""
return html_code
| 4,479 | 35.721311 | 170 | py |
dataqa | dataqa-master/report/html_report_content_polarisation.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_polarisation(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for polarisation
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_info (list(str)): Basic information of observation
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
This page will provide information on the performance of the polarisation module. The content of the polarisation QA has not been defined yet<br>
</p>
</div>\n
"""
return html_code
| 1,019 | 26.567568 | 161 | py |
dataqa | dataqa-master/report/html_report_content_apercal_logs.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_apercal_log(html_code, qa_report_obs_path, page_type):
"""Function to create the html page for apercal_log
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
Here you can go through the four log files created by apercal.
Please note that there is an issue with reading the timing information which is why they are
incorrect for prepare and polarisation.
Click on one of the buttons and then on the link to open the log file.
You can use the search function of your browser to search the log files.
</p>
</div>\n
"""
node_list = np.array(
["happili-01", "happili-02", "happili-03", "happili-04"])
for node in node_list:
# get the log files in linke to the apercal_log report directory:
log_file_list = glob.glob(
"{0:s}/{1:s}/apercal*_log_{2:s}.txt".format(qa_report_obs_path, page_type, node))
# get the log files in linke to the apercal_log report directory:
# delibrately used the wrong name to avoid running this part of the code
csv_file = "{0:s}/{1:s}/apercal_log_timeinfo_{2:s}_wrong_name.csv".format(
qa_report_obs_path, page_type, node)
button_name = "Apercal performance on {0:s}".format(node)
# number of logfiles
n_log_files = len(log_file_list)
if n_log_files != 0 or os.path.exists(csv_file):
# create button
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name = "{0:s}" >\n""".format(node, button_name)
# create a table with the time info
# +++++++++++++++++++++++++++++++++
if os.path.exists(csv_file):
logger.info(
"Creating table with apercal timing information from {0:s}".format(node))
frame_name = "apercal_gallery_table_{0:s}".format(node)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Apercal timing information
</button>
</div>
<div class="w3-container w3-margin-top w3-margin-bottom w3-hide" name = "{0:s}" >
""".format(frame_name)
# read in data
timinginfo_table = Table.read(csv_file, format="csv")
pipeline_step_list = ["start_pipeline", "prepare",
"preflag", "ccal", "convert", "scal", "continuum", "line", "transfer"]
# # get the minimum and maximum values for each step
# pipeline_step_max_list = np.array([str for pipeline_step in pipeline_step_list])
# pipeline_step_min_list = np.array([str for pipeline_step in pipeline_step_list])
# for k in range(len(pipeline_step_list)):
# pipeline_step_max_list[k] = np.max()
# create the table
# start with the header
html_code += """
<div class="w3-responsive">
<table class="w3-table-all">
<tr class="w3-amber">
<th>beam</th>"""
for pipeline_step in pipeline_step_list:
html_code += """
<th>{0:s}</th>
""".format(pipeline_step)
html_code += """</tr>
"""
# get a list of beams in the file
timinginfo_beam_list = np.unique(timinginfo_table['beam'])
# go through elements in list and fill the table
for timinginfo_beam in timinginfo_beam_list:
html_code += """<tr>
<td>{0:s}</td>
""".format(timinginfo_beam)
# get part of the table for the given beam
timinginfo_table_select = timinginfo_table[np.where(
timinginfo_table['beam'] == timinginfo_beam)]
# go through the pipeline steps
# table_pipeline_steps = timinginfo_table_select['pipeline_steps']
for pipeline_step in pipeline_step_list:
# get the index of the pipeline step in the table
table_pipeline_step_index = np.where(
timinginfo_table_select['pipeline_step'] == pipeline_step)[0]
# not all pipeline steps are in all log files
if len(table_pipeline_step_index) != 0:
html_code += """
<td>{0:s}</td>
""".format(timinginfo_table_select['run_time'][table_pipeline_step_index[0]])
else:
html_code += """
<td>N/A</td>
"""
html_code += """</tr>
"""
html_code += """</table>
</div>
</div>\n"""
else:
logging.warning(
"Could not find timing information file {0:s} ".format(csv_file))
# create buttons and iframes for apercal log files
# ++++++++++++++++++++++++++++++++++++++++++++++++
if n_log_files != 0:
# get only the the log
log_file_list.sort()
# get the log files with line
log_file_list_no_line = []
log_file_list_line = []
for log_file in log_file_list:
if "line" in os.path.basename(log_file):
log_file_list_line.append(log_file)
else:
log_file_list_no_line.append(log_file)
# go through the list of log files without line
for log_counter in range(len(log_file_list_no_line)):
# create frame name
frame_name = "gallery_apercal_{0:s}_{1:d}".format(
node.split("-")[-1], log_counter)
# check beam
beam = os.path.basename(log_file_list_no_line[log_counter]).split(
"_")[0].split("apercal")[-1]
if beam == "":
log_button_name = "Apercal log"
else:
log_button_name = "Apercal log for beam {0:s}".format(
beam)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name = "{0:s}" >
""".format(frame_name, log_button_name)
html_code += """
<div class="w3-container w3-large">
<a href="{0:s}/{1:s}">Click here to open the log file</a> if it is not shown below
</div>
<div class="w3-container">
<iframe class="w3-container" style="width:100%; height:1200px" src="{0:s}/{1:s}"></iframe>
</div>
</div>\n""".format(page_type, os.path.basename(log_file_list_no_line[log_counter]))
# go through the list of log files without line
for log_counter in range(len(log_file_list_line)):
# create frame name
frame_name = "gallery_apercal_{0:s}_{1:d}_line".format(
node.split("-")[-1], log_counter)
# check beam
beam = os.path.basename(log_file_list_line[log_counter]).split(
"_")[0].split("apercal")[-1]
if beam == "":
log_button_name = "Apercal log for line"
else:
log_button_name = "Apercal log for line for beam {0:s}".format(
beam)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name = "{0:s}" >
""".format(frame_name, log_button_name)
html_code += """
<div class="w3-container w3-large">
<a href="{0:s}/{1:s}">Click here to open the log file</a> if it is not shown below
</div>
<div class="w3-container">
<iframe class="w3-container" style="width:100%; height:1200px" src="{0:s}/{1:s}"></iframe>
</div>
</div>\n""".format(page_type, os.path.basename(log_file_list_line[log_counter]))
# # go through the list of log files
# for log_counter in range(n_log_files):
# # get only the the log
# log_file_list.sort()
# frame_name = "gallery_apercal_{0:s}_{1:d}".format(
# node.split("-")[-1], log_counter)
# beam = os.path.basename(log_file_list[log_counter]).split(
# "_")[0].split("apercal")[-1]
# if beam == "":
# if "line" in os.path.basename(log_file_list[log_counter]):
# log_button_name = "Apercal log for line"
# else:
# log_button_name = "Apercal log"
# else:
# if "line" in os.path.basename(log_file_list[log_counter]):
# log_button_name = "Apercal log for line for beam {0:s}".format(
# beam)
# else:
# log_button_name = "Apercal log for beam {0:s}".format(
# beam)
# html_code += """
# <div class="w3-container">
# <button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
# {1:s}
# </button>
# </div>
# <div class="w3-container w3-margin-top w3-hide" name = "{0:s}" >
# """.format(frame_name, log_button_name)
# html_code += """
# <div class="w3-container w3-large">
# <a href="{0:s}/{1:s}">Click here to open the log file</a> if it is not shown below
# </div>
# <div class="w3-container">
# <iframe class="w3-container" style="width:100%; height:1200px" src="{0:s}/{1:s}"></iframe>
# </div>
# </div>\n""".format(page_type, os.path.basename(log_file_list[log_counter]))
# # create iframe supbage
# html_code_iframe_page = """<!DOCTYPE HTML>
# <html lang="en">
# <head>
# <meta http-equiv="content-type" content="text/html; charset=utf-8" />
# <meta name="description" content="" />
# <meta name="keywords" content="" />
# </head>
# <body>
# <a href="{0:s}" target="_self">Click here to open log file</a>
# </body>
# </html>\n""".format(os.path.basename(log_file_list[log_counter]))
# iframe_page_name = "{0:s}/{1:s}/{2:s}".format(qa_report_obs_path, page_type, os.path.basename(
# log_file_list[log_counter]).replace(".txt", ".html"))
# try:
# logger.info(
# "Writing apercal log iframe page {0:s}".format(iframe_page_name))
# html_file = open(iframe_page_name, 'w')
# html_file.write(html_code_iframe_page)
# html_file.close()
# except Exception as e:
# logger.error(e)
# logger.error("writing iframe page content failed")
else:
logging.warning(
"No log files found for {0:s}".format(node))
html_code += """</div>\n"""
else:
logging.warning(
"No timing information and log files found for {0:s}".format(node))
# create button
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" class="disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(node, button_name)
# get the csv files
# n_log_files = len(log_file_list)
# if n_log_files != 0:
# log_file_list.sort()
# # go through the log files and create iframes
# for k in range(n_log_files):
# # create content for iframe
# frame_name = "logfile{0:d}".format(k)
# log_file_happili = os.path.basename(
# log_file_list[k]).split("_")[-1].replace("txt", "")
# button_name = "Apercal logfile for {0:s}".format(
# log_file_happili)
# html_code += """<button onclick="show_hide_plots('{0:s}')">
# {1:s}
# </button>\n""".format(frame_name, button_name)
# html_code += """<p>
# <iframe id="log" name="{0:s}" src="{1:s}/{2:s}"></iframe>
# </p>\n""".format(frame_name, page_type, os.path.basename(log_file_list[k]).replace(".txt", ".html"))
# # create iframe supbage
# html_code_iframe_page = """<!DOCTYPE HTML>
# <html lang="en">
# <head>
# <meta http-equiv="content-type" content="text/html; charset=utf-8" />
# <meta name="description" content="" />
# <meta name="keywords" content="" />
# </head>
# <body>
# <a href="{0:s}" target="_self">Click here to open log file</a>
# </body>
# </html>\n""".format(os.path.basename(log_file_list[k]))
# iframe_page_name = "{0:s}/{1:s}/{2:s}".format(qa_report_obs_path, page_type, os.path.basename(
# log_file_list[k]).replace(".txt", ".html"))
# try:
# logger.info(
# "Writing apercal log iframe page {0:s}".format(iframe_page_name))
# html_file = open(iframe_page_name, 'w')
# html_file.write(html_code_iframe_page)
# html_file.close()
# except Exception as e:
# logger.error(e)
# logger.error("writing iframe page content failed")
# else:
# logger.warning("No apercal log files found in {0:s}/{1:s}/".format(
# qa_report_obs_path, page_type))
return html_code
| 17,268 | 43.279487 | 192 | py |
dataqa | dataqa-master/report/html_report_content_line.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_line(html_code, qa_report_obs_path, page_type):
"""Function to create the html page for line
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
This page provides information on the performance of the line module. You can find the following information here:
</p>
<div class="w3-container w3-large">
1. A summary table (not yet available)<br>
2. For each cube, the spectra of the channal rms per beam. This allows you to look for difference between beams<br>
3. For each beam, the spectra of the channal rms per cube. <br>
</div>
</div>\n
"""
# Create html code for summary table
# ==================================
table_found = False
if table_found:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-1')">
Line summary table
</button>
</div>
<div class="w3-container w3-margin-top w3-show" name="gallery-1">\n"""
html_code += """
<p> No table here yet.
</p>\n"""
html_code += """</div>\n"""
else:
logger.warning("No line table found")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-1')">
Line summary table
</button>
</div>\n"""
# Create html code for cube gallery
# =================================
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
# total number of expected beams
n_beams = 40
# total number of expected cubes per beam
n_cubes = 8
if beam_list != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_cubes", "Cubes")
# go through the list of cubes
for cube_counter in range(n_cubes):
# get a list of cubes
cube_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]/*cube{2:d}*.png".format(qa_report_obs_path, page_type, cube_counter))
div_name = "gallery_cube_{0}".format(cube_counter)
# if there plots for this cube, create the gallery
if len(cube_list) != 0:
cube_list.sort()
cube_list = np.array(cube_list)
# create button for source
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Cube {1:d}
</button>
</div>
<div class="w3-container w3-margin-bottom w3-hide" name="{0}">\n""".format(div_name, cube_counter)
# get a list of beams with cubes
beam_list = np.array(
[int(os.path.dirname(cube).split("/")[-1]) for cube in cube_list])
for beam_nr in range(n_beams):
image = cube_list[np.where(beam_list == beam_nr)]
if beam_nr % 5 == 0:
html_code += """<div class="w3-row">\n"""
# if there is a cube for this beam add it to gallery
if len(image) != 0:
html_code += """
<div class="w3-col w3-border" style="width:20%">
<a href="{0:s}/{1:02d}/{2:s}">
<img src="{0:s}/{1:02d}/{2:s}" alt="No image for beam {1:02d}", width="100%">
</a>
<div class="w3-container"><h5>Beam {1:02d}</h5></div>
</div>\n""".format(page_type, beam_nr, os.path.basename(image[0]))
# otherwise keep it empty
else:
html_code += """
<div class="w3-col" style="width:20%">
<a href="#">
<img src="#" alt="No image for beam {0:02d}", width="100%">
</a>
</div>\n""".format(beam_nr)
if beam_nr % 5 == 4 or beam_nr == n_beams-1:
html_code += """</div>\n"""
html_code += """</div>\n"""
else:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
Cube {1:d}
</button>
</div>\n""".format(div_name, cube_counter)
html_code += """</div>\n"""
else:
logger.warning("No beams found for line cube gallery")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_cubes", "Cubes")
# Create html code for image gallery
# ==================================
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
for k in range(n_beams):
# get the images
images_in_beam = glob.glob("{0:s}/*png".format(beam_list[k]))
div_name = "gallery{0:d}".format(k)
if len(images_in_beam) != 0:
images_in_beam.sort()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(div_name, os.path.basename(beam_list[k]))
img_counter = 0
for image in images_in_beam:
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No cube available for beam {1:s}", width="100%">
</a>
<div class="w3-container w3-center"><h5>{2:s}</h5></div>
</div>\n""".format(page_type, os.path.basename(beam_list[k]), os.path.basename(image))
if img_counter % 4 == 3 or img_counter == len(images_in_beam) - 1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No plot for cube in beam {0:s} found".format(
beam_list[k]))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>\n""".format(div_name, os.path.basename(beam_list[k]))
# html_code += """
# <div class="gallery" name="{0:s}">
# <p class="warning">
# No plots were found for {1:s}
# </p>
# </div>\n""".format(div_name, page_type)
else:
logger.warning("No beams found for cube found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plots were found for cube
</p>
</div>\n"""
# html_code += """
# <p class="info">
# The overview does not cover line QA yet
# </p>\n0
# """
return html_code
| 9,744 | 37.824701 | 183 | py |
dataqa | dataqa-master/report/html_report_content.py | #!/usr/bin/python2.7
"""
This file contains functionality to create the content for the
each subpage of the report
"""
import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
from html_report_content_observing_log import write_obs_content_observing_log
from html_report_content_summary import write_obs_content_summary
from html_report_content_beamweights import write_obs_content_beamweights
from html_report_content_inspection_plots import write_obs_content_inspection_plots
from html_report_content_preflag import write_obs_content_preflag
from html_report_content_crosscal import write_obs_content_crosscal
from html_report_content_selfcal import write_obs_content_selfcal
from html_report_content_continuum import write_obs_content_continuum
from html_report_content_polarisation import write_obs_content_polarisation
from html_report_content_line import write_obs_content_line
from html_report_content_mosaic import write_obs_content_mosaic
from html_report_content_apercal_logs import write_obs_content_apercal_log
logger = logging.getLogger(__name__)
def write_obs_content(page_name, qa_report_path, page_type='', obs_id='', obs_info=None, osa_report=''):
"""
Function to write Observation content
"""
# empty string of html code to start with
html_code = """"""
# html_code = """<p>NOTE: When clicking on the buttons for the first time, please click twice (small bug)</p>"""
qa_report_obs_path = "{0:s}/{1:s}".format(qa_report_path, obs_id)
# create html content for subpage observing_log
# +++++++++++++++++++++++++++++++++++++++++++++
if page_type == 'observing_log':
try:
html_code = write_obs_content_observing_log(
html_code, qa_report_obs_path, page_type)
except Exception as e:
logger.warning("Creating content for observing log failed.")
logger.exception(e)
# create html content for subpage summary
# +++++++++++++++++++++++++++++++++++++++
if page_type == 'summary':
try:
html_code = write_obs_content_summary(
html_code, qa_report_obs_path, page_type, obs_info=obs_info, osa_report=osa_report)
except Exception as e:
logger.warning("Creating content for summary failed.")
logger.exception(e)
# create html content for subpage inspection plots
# ++++++++++++++++++++++++++++++++++++++++++++++++
if page_type == 'beamweights':
try:
html_code = write_obs_content_beamweights(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for beamweights failed.")
logger.exception(e)
# create html content for subpage inspection plots
# ++++++++++++++++++++++++++++++++++++++++++++++++
if page_type == 'inspection_plots':
try:
html_code = write_obs_content_inspection_plots(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for inspection plots failed.")
logger.exception(e)
# create html content for subpage preflag
# +++++++++++++++++++++++++++++++++++++++
if page_type == 'preflag':
try:
html_code = write_obs_content_preflag(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for preflag failed.")
logger.exception(e)
# create html content for subpage crosscal
# ++++++++++++++++++++++++++++++++++++++++
elif page_type == 'crosscal':
try:
html_code = write_obs_content_crosscal(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for crosscal failed.")
logger.exception(e)
# create html content for subpage selfcal
# ++++++++++++++++++++++++++++++++++++
elif page_type == 'selfcal':
try:
html_code = write_obs_content_selfcal(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for selfcal failed.")
logger.exception(e)
# create html content for subpage continuum
# +++++++++++++++++++++++++++++++++++++++++
elif page_type == 'continuum':
try:
html_code = write_obs_content_continuum(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for continuum failed.")
logger.exception(e)
# create html content for subpage polarisation
# +++++++++++++++++++++++++++++++++++++++++
elif page_type == 'polarisation':
try:
html_code = write_obs_content_polarisation(
html_code, qa_report_obs_path, page_type, obs_info=obs_info)
except Exception as e:
logger.warning("Creating content for polarisation failed.")
logger.exception(e)
# create html content for subpage line
# ++++++++++++++++++++++++++++++++++++
elif page_type == 'line':
try:
html_code = write_obs_content_line(
html_code, qa_report_obs_path, page_type)
except Exception as e:
logger.warning("Creating content for line failed.")
logger.exception(e)
# create html content for subpage mosaic
# ++++++++++++++++++++++++++++++++++++++
elif page_type == 'mosaic':
try:
html_code = write_obs_content_mosaic(
html_code, qa_report_obs_path, page_type)
except Exception as e:
logger.warning("Creating content for mosaic failed.")
logger.exception(e)
# create html content for subpage apercal
# as this is a text file, it is a bit more
# complicated and requires creating a dummy
# html file. Otherwise, it can automatically
# trigger the download questions
# +++++++++++++++++++++++++++++++++++++++
elif page_type == "apercal_log":
try:
html_code = write_obs_content_apercal_log(
html_code, qa_report_obs_path, page_type)
except Exception as e:
logger.warning("Creating content for apercal log failed.")
logger.exception(e)
try:
html_file = open(page_name, 'a')
html_file.write(html_code)
html_file.close()
except Exception as e:
logger.exception(e)
logger.error("Writing obs content failed")
return -1
return 1
| 6,830 | 34.952632 | 116 | py |
dataqa | dataqa-master/report/html_report_content_selfcal.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_selfcal(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for selfcal
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_info (list(str)): Basic information from observation
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
This page provides information on the performance of the selfcal module. You can find the following information here:
</p>
<div class="w3-container w3-large">
1. Table of the selfcal parameters from the pipeline. For example, you can see for which beams amplitude calibration was turned on.<br>
2. Plots of the self-calibration gain factors for amplitude and phase. These are the most important plots, you want to check. <br>
3. Selfcal image from the the first and last cycle from each beam. If amplitude self-calibration is available, it is the chosen as the last cycle.<br>
4. Selfcal residuals from the the first and last cycle from each beam. If amplitude self-calibration is available, it is the chosen as the last .cycle.<br>
5. For each beam, plots of all selfcal images and residuals from phase and if available amplitude self-calibration.
</div>
</div>\n
"""
qa_report_obs_page_path = os.path.join(qa_report_obs_path, page_type)
# Create html code for summary table
# ==================================
if obs_info is not None:
obs_id = obs_info['Obs_ID'][0]
source_list = np.array(
[obs_info['Target'][0], obs_info['Flux_Calibrator'][0], obs_info['Pol_Calibrator'][0]])
else:
obs_id = os.path.basename(qa_report_obs_path)
source_list = None
# set the file name
crosscal_summary_file = os.path.join(
qa_report_obs_page_path, "{0}_{1}_summary.csv".format(obs_id, page_type))
if os.path.exists(crosscal_summary_file):
summary_table = Table.read(crosscal_summary_file, format="ascii.csv")
else:
summary_table = None
# if there is a summary table
# create tables for each source
if summary_table is not None:
# get the keys for the table
table_keys = summary_table.keys()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-1')">
Selfcal summary table
</button>
</div>
<div class="w3-container w3-margin-top w3-margin-bottom w3-hide" name="gallery-1">\n"""
beam_list = summary_table['beam']
# beginning of table
html_code += """
<div class="w3-container w3-center">
<div class="w3-responsive">
<table class="w3-table-all">\n"""
# write the header
html_code += """
<tr class="w3-amber">\n"""
# fill header keys
for key in table_keys:
# make sure that the beam is always there
html_code += """<th>{}</th>\n""".format(
key.replace("targetbeams_", ""))
# close table header
html_code += """</tr>\n"""
for k in range(len(beam_list)):
# open row
html_code += """<tr>\n"""
# now go through keys and fill table
for key in table_keys:
# get the element from table
element = summary_table[key][k]
# check whether it is masked
if np.ma.is_masked(element):
html_code += """<td>-</td>\n"""
else:
html_code += """<td>{0}</td>\n""".format(element)
# close row
html_code += """</tr>\n"""
# end of table
html_code += """
</table>
</div>
</div>\n"""
html_code += """</div>\n"""
else:
logger.warning("No selfcal table found")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-1')">
Selfcal summary table
</button>
</div>\n"""
# the plots for the selfcal gains
# ===============================
# get the phase plots
phase_list = glob.glob(
"{0:s}/{1:s}/SCAL_phase*png".format(qa_report_obs_path, page_type))
if len(phase_list) != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_phase", "Gain factors Phase")
img_counter = 0
for image in phase_list:
if img_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 3 == 2 or img_counter == len(phase_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_phase", "Gain factors Phase")
# get the amplitude plots
amp_list = glob.glob(
"{0:s}/{1:s}/SCAL_amp*png".format(qa_report_obs_path, page_type))
if len(amp_list) != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_amp", "Gain factors Amplitude")
img_counter = 0
for image in amp_list:
if img_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 3 == 2 or img_counter == len(phase_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_amp", "Gain factors Amplitude")
# Gallery of selfcal images
# =========================
# get beams
beam_dir_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = 40
image_list = ["" for k in range(2 * n_beams)]
if len(beam_dir_list) != 0:
beam_dir_list.sort()
# go through the beams to get the image directory
for k in range(len(beam_dir_list)):
beam_dir = beam_dir_list[k]
# get the beam which serves as the index for the image list
beam = int(os.path.basename(beam_dir))
# get amplitude selfcal images
image_list_amp = glob.glob(
"{0:s}/amplitude*image.png".format(beam_dir))
image_list_amp.sort()
# get phase selfcal images
image_list_phase = glob.glob(
"{0:s}/phase*image.png".format(beam_dir))
image_list_phase.sort()
if len(image_list_phase) == 0:
image_list_phase = glob.glob(
"{0:s}/*image.png".format(beam_dir))
image_list_phase.sort()
# if there are no phase selfcal images, then there are no amplitude selfcal images
if len(image_list_phase) != 0:
# the first image is always from selfcal
image_first = image_list_phase[0]
# final selfcal image can be from amplitude selfcal
if len(image_list_amp) != 0:
image_last = image_list_amp[-1]
else:
image_last = image_list_phase[-1]
# the first image has an even index
image_list[2 * beam] = image_first
# the laste image has an uneven index
image_list[2 * beam + 1] = image_last
# check that the list of images is not empty
if len(np.unique(image_list)) != 1:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_images", "Selfcal images")
img_counter = 0
beam_counter = 0
for k in range(len(image_list)):
# count one beam every second image
if k != 0 and k % 2 == 0:
beam_counter += 1
image = image_list[k]
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:02d}/{2:s}">
<img src="{0:s}/{1:02d}/{2:s}" alt="No image for beam {1:02d}", width="100%">
</a>
<div class="w3-container"><h5>Beam {1:02d}</h5></div>
</div>\n""".format(page_type, beam_counter, os.path.basename(image))
if img_counter % 4 == 3 or img_counter == len(image_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No images found for selfcal image gallery")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_images", "Selfcal images")
else:
logger.warning("No beams found for selfcal image gallery")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_images", "Selfcal images")
# Gallery of selfcal resdiuals
# ============================
# get beams
beam_dir_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = 40
image_list = ["" for k in range(2 * n_beams)]
if len(beam_dir_list) != 0:
beam_dir_list.sort()
# go through the beams to get the image directory
for k in range(len(beam_dir_list)):
beam_dir = beam_dir_list[k]
# get the beam which serves as the index for the image list
beam = int(os.path.basename(beam_dir))
# get amplitude selfcal residual
image_list_amp = glob.glob(
"{0:s}/amplitude*residual.png".format(beam_dir))
image_list_amp.sort()
# get phase selfcal residual
image_list_phase = glob.glob(
"{0:s}/phase*residual.png".format(beam_dir))
image_list_phase.sort()
if len(image_list_phase) == 0:
image_list_phase = glob.glob(
"{0:s}/*residual.png".format(beam_dir))
image_list_phase.sort()
# if there are no phase selfcal residual, then there are no amplitude selfcal residual
if len(image_list_phase) != 0:
# the first image is always from selfcal
image_first = image_list_phase[0]
# final selfcal image can be from amplitude selfcal
if len(image_list_amp) != 0:
image_last = image_list_amp[-1]
else:
image_last = image_list_phase[-1]
# the first image has an even index
image_list[2 * beam] = image_first
# the laste image has an uneven index
image_list[2 * beam + 1] = image_last
# check that the list of images is not empty
if len(np.unique(image_list)) != 1:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format("gallery_residuals", "Selfcal residuals")
img_counter = 0
beam_counter = 0
for k in range(len(image_list)):
# count one beam every second image
if k != 0 and k % 2 == 0:
beam_counter += 1
image = image_list[k]
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:02d}/{2:s}">
<img src="{0:s}/{1:02d}/{2:s}" alt="No image for beam {1:02d}", width="100%">
</a>
<div class="w3-container"><h5>Beam {1:02d}</h5></div>
</div>\n""".format(page_type, beam_counter, os.path.basename(image))
if img_counter % 4 == 3 or img_counter == len(image_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No images found for selfcal residual gallery")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_residuals", "Selfcal residuals")
else:
logger.warning("No beams found for selfcal residual gallery")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format("gallery_residuals", "Selfcal residual")
# Selfcal iteration maps sorted by beam
# =====================================
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
for k in range(n_beams):
button_html_name = "beam{0:d}".format(k)
div_name = "gallery{0:d}".format(k)
# get the diagnostic plots
image_list_phase = glob.glob(
"{0:s}/phase*png".format(beam_list[k]))
image_list_amp = glob.glob(
"{0:s}/amplitude*png".format(beam_list[k]))
if len(image_list_phase) == 0:
image_list_phase = glob.glob(
"{0:s}/*image.png".format(beam_dir))
image_list_phase.sort()
n_images = len(image_list_phase) + len(image_list_amp)
if n_images != 0:
image_list_phase.sort()
image_list_amp.sort()
image_list = np.append(image_list_phase, image_list_amp)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(div_name, os.path.basename(beam_list[k]))
img_counter = 0
for image in image_list:
selfcal_type = os.path.basename(image).split("_")[0]
major_cycle = os.path.basename(image).split("_")[1]
minor_cycle = os.path.basename(image).split("_")[2]
image_type = os.path.basename(image).split(".")[
0].split("_")[-1]
if image_type == "image":
caption = "{0:s} image: major {1:s}, minor {2:s}".format(
selfcal_type, major_cycle, minor_cycle)
elif image_type == "residual":
caption = "{0:s} residual: major {1:s}, minor {2:s}".format(
selfcal_type, major_cycle, minor_cycle)
else:
caption = ""
if img_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No image", width="100%">
</a>
<div class="w3-container"><h5>{3:s}</h5></div>
</div>\n""".format(page_type, os.path.basename(beam_list[k]), os.path.basename(image), caption)
if img_counter % 4 == 3 or img_counter == n_images-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
# # go throught the different types of plots
# # they require a different layout because the plot sizes vary
# html_code += """<div class="gallery_column" name="{0:s}">\n""".format(
# div_name)
# for m in range(n_images):
# if m % 4 == 0:
# html_code += """<div class="gallery_row">"""
# html_code += """<div class="mosaic_img">
# <a href="{0:s}/{1:s}/{2:s}">
# <img src="{0:s}/{1:s}/{2:s}" alt="No image", width="100%">
# </a>
# </div>\n""".format(page_type, os.path.basename(beam_list[k]), os.path.basename(image_list[m]))
# if m % 2 != 0 or m == n_images-1:
# html_code += """</div>\n"""
else:
logger.warning("No selfcal maps found in {0:s}".format(
os.path.basename(beam_list[k])))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" class="disabled" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>\n""".format(button_html_name, os.path.basename(beam_list[k]))
else:
logger.warning("No beams for selfcal QA found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No beams were found for selfcal QA.
</p>
</div>\n"""
# html_code += """
# <p class="info">
# The overview does not cover selfcal QA yet
# </p>\n
# """
return html_code
| 21,868 | 37.232517 | 192 | py |
dataqa | dataqa-master/report/html_report_content_crosscal.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_crosscal(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for crosscal
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>
This page provides information on the performance of the crosscal module. You can find the following information here:
</p>
<div class="w3-container w3-large">
1. Table of the crosscal quality parameters from the pipeline separated for target, flux calibrator and polarisation calibrator.<br>
2. Various plots related to the cross calibration<br>
</div>
</div>\n
"""
qa_report_obs_page_path = os.path.join(qa_report_obs_path, page_type)
# Create html code for summary table
# ==================================
if obs_info is not None:
obs_id = obs_info['Obs_ID'][0]
source_list = np.array(
[obs_info['Target'][0], obs_info['Flux_Calibrator'][0], obs_info['Pol_Calibrator'][0]])
else:
obs_id = os.path.basename(qa_report_obs_path)
source_list = None
# set the file name
crosscal_summary_file = os.path.join(
qa_report_obs_page_path, "{0}_{1}_summary.csv".format(obs_id, page_type))
if os.path.exists(crosscal_summary_file):
summary_table = Table.read(crosscal_summary_file, format="ascii.csv")
else:
summary_table = None
# if there is a summary table
# create tables for each source
if summary_table is not None:
# get the keys for the table
table_keys = summary_table.keys()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-1')">
Crosscal summary table
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="gallery-1">\n"""
beam_list = summary_table['beam']
# go through the list of sources
for pos, source in enumerate(source_list):
if source != '':
# if it is the first source, get the target information
if pos == 0:
keyword = "target"
# if is the second, get the flux calibrator
elif pos == 1:
keyword = "fluxcal"
# otherwise it is the pol calibrator
else:
keyword = "polcal"
div_name = "gallery_crosscal_{0}".format(source)
# create button for source
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-bottom w3-hide" name="{0}">\n""".format(div_name, source)
# beginning of table
html_code += """
<div class="w3-container w3-center">
<div class="w3-responsive">
<table class="w3-table-all">\n"""
# write the header
html_code += """
<tr class="w3-amber">\n"""
# fill header keys
for key in table_keys:
# make sure that the beam is always there
if "beam" == key:
html_code += """<th>{}</th>\n""".format(
key)
elif keyword in key:
html_code += """<th>{}</th>\n""".format(
key.replace("ccal_", ""))
# close table header
html_code += """</tr>\n"""
# go through the list for each beam
for k in range(len(beam_list)):
# open row
html_code += """<tr>\n"""
# now go through keys and fill table
for key in table_keys:
if "beam" == key:
element = summary_table[key][k]
elif keyword in key:
element = summary_table[key][k]
# if it is neither the beam nor the key, continue
else:
continue
# check whether it is masked
if np.ma.is_masked(element):
html_code += """<td>-</td>\n"""
else:
html_code += """<td>{0}</td>\n""".format(element)
# close row
html_code += """</tr>\n"""
# end of table
html_code += """
</table>
</div>
</div>\n"""
# closing the source button div
html_code += """</div>\n"""
else:
if pos == 2:
logger.warning(
"Could not find polarised calibrator")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(div_name, "Pol Calibrator")
# closing the table button div
html_code += """</div>\n"""
else:
logger.warning("No summary table available")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-1')">
Crosscal summary table
</button>
</div>\n"""
# Create html code for plots
# ==========================
# the different plots
categories = ["Autocorrelation_Antenna", "Autocorrelation_Beam", "BP_amp", "BP_phase", "Gain_amp", "Gain_phase", "K_", "Df_amp", "Df_phase", "Kcross", "Xf_amp",
"Xf_phase", "Raw_amp", "Raw_phase", "Model_amp", "Model_phase", "Corrected_amp", "Corrected_phase"]
categories_titles = ["Autocorrelation per Antenna", "Autocorrelation per Beam", "Bandpass Amplitude", "Bandpass Phase", "Gain factors Amplitude", "Gain factors Phase", "Global Delay", "Leakage Amplitude", "Leakage Phase", "Cross Hand Delay",
"Polarization Angle Amplitude", "Polarization Angle Phase", "Raw visibility Amplitude", "Raw Visibility Phase", "Model Amplitude", "Model Phase", "Corrected Amplitude", "Corrected Phase"]
n_cats = len(categories)
# get the images
image_list = glob.glob(
"{0:s}/{1:s}/*png".format(qa_report_obs_path, page_type))
if len(image_list) != 0:
# go throught the different types of plots
for k in range(n_cats):
# get list of plots for this category
cat_plots = [pl for pl in image_list if categories[k] in pl]
cat_plots.sort()
div_name = "gallery{0:d}".format(k)
if len(cat_plots) != 0:
# html_code += """<div class="plots">
# <button onclick="show_hide_plots()">
# <h3>{0:s}</h3>
# </button>\n""".format(categories_titles[k])
# for image in cat_plots:
# html_code += """<div class="gallery" id="gallery">
# <a href="{0:s}">
# <img src="{0:s}" alt="No image", width="100%">
# </a>
# </div>\n""".format(image)
# html_code += """</div>\n"""
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(div_name, categories_titles[k])
img_counter = 0
for image in cat_plots:
if categories[k] == "Autocorrelation_Beam":
if img_counter % 5 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-col w3-border" style="width:20%">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 5 == 4 or img_counter == len(cat_plots)-1:
html_code += """</div>\n"""
else:
if img_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image", width="100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 3 == 2 or img_counter == len(cat_plots)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
div_name = "gallery{0:d}".format(k)
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(div_name, categories_titles[k])
# html_code += """
# <div class="gallery" name="{0:s}">
# <p class="warning">
# No plots were found for {1:s}
# </p>
# </div>\n""".format(div_name, categories_titles[k])
else:
logger.warning("No crosscal plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plots were found for crosscal
</p>
</div>\n"""
return html_code
| 11,836 | 39.537671 | 245 | py |
dataqa | dataqa-master/report/html_report_content_observing_log.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_observing_log(html_code, qa_report_obs_path, page_type):
"""Function to create the html page for the observing log
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>Here you will find information from the future observing log which is not yet available.</p>
</div>\n
"""
# # Create html code for inspection plots
# # =====================================
# # get images
# image_list = glob.glob(
# "{0:s}/{1:s}/*.png".format(qa_report_obs_path, page_type))
# if len(image_list) != 0:
# html_code += """
# <div class="w3-container w3-margin-top w3-show">\n"""
# for image in image_list:
# html_code += """
# <div class="w3-half">
# <a href="{0:s}/{1:s}">
# <img src="{0:s}/{1:s}" alt="No image" style="width:100%">
# </a>
# <div class="w3-container w3-center">
# <h5>Summary plot</h5>
# </div>
# </div>\n""".format(page_type, os.path.basename(image))
# html_code += """</div>\n"""
# else:
# logger.warning("No summary plot found found")
# html_code += """
# <div class="w3-container w3-large w3-text-red">
# <p>
# No plots were found for summary
# </p>
# </div>\n"""
return html_code
| 1,974 | 29.859375 | 107 | py |
dataqa | dataqa-master/report/make_nptabel_summary.py | #!/usr/bin/env python
import glob
import os
import numpy as np
import logging
import csv
import socket
# ----------------------------------------------
# read data from np file
logger = logging.getLogger(__name__)
def find_sources(obs_id, data_dir):
"""
Identify preflag sources e.g. target name and calibrators
"""
sources = []
logs = glob.glob(data_dir+'/'+str(obs_id)+'/param_01_preflag_*.npy')
for i in range(len(logs)):
sources.append(logs[i][41:-4])
return sources
def simplify_data(d, beamnum):
"""
select relevant beam value from a list in a dictionary
"""
dict = {}
for k in d.keys():
if np.iterable(d[k]):
if len(d[k]) == 40:
dict.update({k: d[k][beamnum]})
if len(d[k]) == 12:
chunks = ''
for i in range(12):
if d[k][i] == False:
chunks = chunks+'F,'
else:
chunks = chunks+'T,'
dict.update({k: chunks})
else:
dict.update({k: d[k]})
return dict
def extract_beam(path, beamnum, module, source):
"""
Function to return numpy files contents as a dictionary filtered for certain keys
Args:
path (str): Directory of the data
beamnum (int): Beam number
module (str): name of the apercal module e.g. 'preflag', 'convert', 'croscal'
source (str): name of the source or calibrators for preflag, for other modules it should be an empty string ('')
Returns:
a dictionary with information extracted from a numpy log file
"""
#logger.info('Checking NPY files for beam {}'.format(beamnum))
continuum_filters = ['targetbeams_mf_status', 'targetbeams_chunk_status']
selfcal_filters = ['targetbeams_average', 'targetbeams_flagline',
'targetbeams_parametric', 'targetbeams_phase_status', 'targetbeams_amp_status']
crosscal_filters = ['calibration_calibrator_finished', 'calibration_restart', 'calibration_try_counter', 'fluxcal_apgains', 'fluxcal_bandpass', 'fluxcal_calibration_restart', 'fluxcal_calibration_try_counter',
'fluxcal_globaldelay', 'fluxcal_initialphase', 'fluxcal_leakage', 'fluxcal_model', 'fluxcal_transfer', 'polcal_crosshanddelay', 'polcal_model', 'polcal_polarisationangle', 'polcal_transfer', 'targetbeams_transfer']
if module == 'selfcal' or module == 'continuum' or module == 'transfer':
f = glob.glob(os.path.join(path, 'param_{:02d}.npy'.format(beamnum)))
elif module == "crosscal":
f = glob.glob(os.path.join(
path, 'param_{:02d}_crosscal.npy'.format(beamnum)))
if len(f) == 0:
f = glob.glob(os.path.join(
path, 'param_{:02d}.npy'.format(beamnum)))
else:
f = glob.glob(os.path.join(
path, 'param_{:02d}*{}*{}.npy'.format(beamnum, module, source)))
res = {}
dict_cut = {}
# print(f)
if len(f) != 0:
d = np.load(f[0]).item()
for k in d.keys():
if module == 'preflag' and "targetbeams" in k:
res.update({k: d[k]})
if module == 'crosscal':
for j in range(len(crosscal_filters)):
if crosscal_filters[j] in k:
res.update({crosscal_filters[j]: d[k]})
#res.update({k: d[k]})
if module == 'convert' and "UVFITS2MIRIAD" in k:
res.update({k: d[k]})
if module == 'convert' and "MS2UVFITS" in k:
res.update({k: d[k]})
if module == 'continuum':
for j in range(len(continuum_filters)):
if continuum_filters[j] in k:
res.update({continuum_filters[j]: d[k]})
if module == 'selfcal':
for j in range(len(selfcal_filters)):
if selfcal_filters[j] in k:
res.update({selfcal_filters[j]: d[k]})
if module == 'transfer' and "transfer" in k:
res.update({'transfer': d[k]})
dict_cut = simplify_data(res, beamnum)
else:
#print('No file for beam: ', i)
logger.info("No file for beam: {0} in {1}".format(beamnum, path))
logger.info("Extracting data for beam {} ... Done".format(beamnum))
dict_cut.update({'beam': beamnum})
return dict_cut
def extract_all_beams(obs_id, module, qa_dir):
"""
Combine data from all beams into a directory.
Args:
obs_id (str): Directory of the data
module (str): name of the apercal module e.g. 'preflag', 'convert', 'croscal'
Returns
a dictionary with information extracted from a numpy log file
"""
if "data" in qa_dir:
# beams_1 = '/data/apertif/'+str(obs_id)+'/'
# beams_2 = '/data2/apertif/'+str(obs_id)+'/'
# beams_3 = '/data3/apertif/'+str(obs_id)+'/'
# beams_4 = '/data4/apertif/'+str(obs_id)+'/'
# if not on happili, asssume all beamse
# are on the same node. Not the best solution
# for this, but requires the least amount of
# changes to the logic below
obs_dir = os.path.dirname(qa_dir.rstrip("/"))
if socket.gethostname() == "happili-01":
# this gives /data/apertif/<taskid>
beams_1 = obs_dir + "/"
beams_2 = obs_dir.replace("/data", "/data2") + "/"
beams_3 = obs_dir.replace("/data", "/data3") + "/"
beams_4 = obs_dir.replace("/data", "/data4") + "/"
else:
beams_1 = obs_dir + "/"
beams_2 = obs_dir + "/"
beams_3 = obs_dir + "/"
beams_4 = obs_dir + "/"
else:
if socket.gethostname() == "happili-01":
beams_1 = obs_dir + "/"
beams_2 = obs_dir.replace("/tank", "/tank2") + "/"
beams_3 = obs_dir.replace("/tank", "/tank3") + "/"
beams_4 = obs_dir.replace("/tank", "/tank4") + "/"
else:
beams_1 = obs_dir + "/"
beams_2 = obs_dir + "/"
beams_3 = obs_dir + "/"
beams_4 = obs_dir + "/"
beamnum = np.arange(40)
dict_beams = []
if module == 'preflag':
source_list = find_sources(obs_id, os.path.dirname(qa_dir))
for j in range(len(source_list)):
for i in beamnum:
if i < 10:
dict_beams_v1 = (extract_beam(
beams_1, i, module, source_list[j]))
dict_beams_v1.update({'source': source_list[j]})
dict_beams.append(dict_beams_v1)
if i >= 10 and i < 20:
dict_beams_v1 = (extract_beam(
beams_2, i, module, source_list[j]))
dict_beams_v1.update({'source': source_list[j]})
dict_beams.append(dict_beams_v1)
if i >= 20 and i < 30:
dict_beams_v1 = (extract_beam(
beams_3, i, module, source_list[j]))
dict_beams_v1.update({'source': source_list[j]})
dict_beams.append(dict_beams_v1)
if i >= 30 and i < 40:
dict_beams_v1 = (extract_beam(
beams_4, i, module, source_list[j]))
dict_beams_v1.update({'source': source_list[j]})
dict_beams.append(dict_beams_v1)
else:
source = ''
for i in beamnum:
if i < 10:
dict_beams.append(extract_beam(beams_1, i, module, source))
if i >= 10 and i < 20:
dict_beams.append(extract_beam(beams_2, i, module, source))
if i >= 20 and i < 30:
dict_beams.append(extract_beam(beams_3, i, module, source))
if i >= 30 and i < 40:
dict_beams.append(extract_beam(beams_4, i, module, source))
return dict_beams
def make_nptabel_csv(obs_id, module, qa_dir, output_path=''):
"""
Creates a dictionary with the summary into
from the numpy files and saves it as a csv file.
Args:
obs_id (str): ID of observation
module (str): Apercal module for which information are extracted
output_path (str): Optional path to where the information is save (default current directory)
"""
logger.info(
"Reading param information for {0} of {1}".format(module, obs_id))
summary_data = extract_all_beams(obs_id, module, qa_dir)
logger.info(
"Reading param information for {0} of {1}... Done".format(module, obs_id))
i = 0
if module == 'transfer':
while len(summary_data[i]) <= 1:
i += 1
if len(summary_data[i]) > 1:
break
else:
while len(summary_data[i]) <= 2:
i += 1
if len(summary_data[i]) > 2:
break
csv_columns = summary_data[i].keys()
csv_columns.sort()
dict_data = summary_data
# save the file
if output_path == '':
csv_file = str(obs_id)+"_"+str(module)+"_summary.csv"
else:
csv_file = os.path.join(output_path, str(
obs_id)+"_"+str(module)+"_summary.csv")
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except Exception as e:
logger.warning("Creating file {} failed".format(csv_file))
logger.exception(e)
# print("Created file: "+str(obs_id)+"_"+str(module)+"_summary.csv")
logger.info("Creating file: {} ... Done".format(csv_file))
| 9,789 | 32.993056 | 238 | py |
dataqa | dataqa-master/report/merge_ccal_scal_plots.py | from dataqa.scandata import get_default_imagepath
import argparse
import time
import logging
import os
import glob
import socket
import numpy as np
from PIL import Image
from apercal.libs import lib
from apercal.subs.managefiles import director
from time import time
import pymp
logger = logging.getLogger(__name__)
def merge_plots(image_list, new_image_name=None):
"""This function does the actual merging
Args:
image_list (list(str)): List of images to merge with full path
new_image_name (str): Optional name of new image
"""
# name of the new image
# =====================
if new_image_name is None:
# will most likely overwrite the existing image
new_image_name = image_list[0]
# check that the new image will be saved in /data/
if new_image_name.split("/")[1] != "data":
new_image_name = new_image_name.replace(
new_image_name.split("/")[1], "data")
# now go through the images and overlay them
# ==========================================
for k in range(len(image_list)):
# the first image will be the background
if k == 0:
background = Image.open(image_list[k])
background = background.convert("RGBA")
else:
overlay = Image.open(image_list[k])
overlay = overlay.convert("RGBA")
# get the image data of the overlay
overlay_data = overlay.load()
# get size of image
img_width, img_height = overlay.size
# go through the pixels and make the white ones transparent
for x_pix in range(img_width):
for y_pix in range(img_height):
if overlay_data[x_pix, y_pix] == (255, 255, 255, 255):
overlay_data[x_pix, y_pix] = (255, 255, 255, 0)
# create a new image and merge it with background
if k == 0:
new_image = Image.new("RGBA", background.size)
new_image = Image.alpha_composite(new_image, background)
else:
new_image = Image.alpha_composite(new_image, overlay)
# save the merged image
new_image.save(new_image_name, "PNG")
def run_merge_plots(qa_dir, do_ccal=True, do_scal=True, do_backup=True, run_parallel=False, n_cores=5):
""" This function merges the crosscal and/or selfcal plots
that are split by beam from the different data directories.
Args:
qa_dir (str): Directory of QA
do_ccal (bool): Set to merge crosscal plots (default: True)
do_scal (bool): Set to merge selfcal plots (default: True)
"""
# Basic settings
# ==============
# get the host name
host_name = socket.gethostname()
# start time
start_time = time()
# it does not make sense to run this script from another happili node
if host_name != "happili-01":
logger.error(
"You are not on happili-01. This script will not work here. Abort")
return -1
# set whether both crosscal and selfcal plot shave to be merged or only one
if not do_ccal and not do_scal:
do_ccal = True
do_scal = True
logger.info("Merging crosscal and selfcal plots")
elif do_ccal and not do_scal:
logger.info("Merging only crosscal plots")
elif not do_ccal and do_scal:
logger.info("Merging only selfcal plots")
# Merge the crosscal plots
# ========================
if do_ccal:
qa_dir_crosscal = os.path.join(qa_dir, "crosscal")
logger.info("## Merging crosscal plots in {}".format(qa_dir_crosscal))
# create a backup of the original files
if do_backup:
# final path of backup
qa_dir_crosscal_backup = os.path.join(
qa_dir_crosscal, "crosscal_backup")
# temporary path of backup for copying
qa_dir_crosscal_backup_tmp = "{}_backup".format(qa_dir_crosscal)
if os.path.exists(qa_dir_crosscal_backup):
logger.info("Backup of crosscal plots already exists")
else:
# copy the original directory
lib.basher("cp -r " + qa_dir_crosscal +
" " + qa_dir_crosscal_backup_tmp)
# move the directory
lib.basher("mv " + qa_dir_crosscal_backup_tmp +
" " + qa_dir_crosscal + "/")
logger.info("Backup of crosscal plots created in {}".format(
qa_dir_crosscal_backup))
# get a list all crosscal plots
ccal_plot_list = glob.glob(
"{0:s}/*.png".format(qa_dir_crosscal.replace("/data", "/data*")))
if len(ccal_plot_list) == 0:
logger.warning("No crosscal plots were found.")
else:
# get a unique list of plot names
ccal_png_name_list = np.array(
[os.path.basename(plot) for plot in ccal_plot_list])
ccal_png_name_list = np.unique(ccal_png_name_list)
if run_parallel:
with pymp.Parallel(n_cores) as p:
# go through all the images and merge them
for png_index in p.range(len(ccal_png_name_list)):
png_name = ccal_png_name_list[png_index]
# time for merging a single plot
start_time_plot = time()
logger.info("Merging {0:s}".format(png_name))
# get a list of plots with this name
ccal_plot_list = glob.glob(
"{0:s}/{1:s}".format(qa_dir_crosscal.replace("/data", "/data*"), png_name))
# now merge the images
try:
merge_plots(ccal_plot_list)
except Exception as e:
logger.warning(
"Merging plots for {0} failed".format(png_name))
logger.exception(e)
else:
logger.info(
"Merged plots for {0} successfully ({1:.0f}s)".format(png_name, time() - start_time_plot))
else:
# go through all the images and merge them
for png_name in ccal_png_name_list:
# time for merging a single plot
start_time_plot = time()
logger.info("Merging {0:s}".format(png_name))
# get a list of plots with this name
ccal_plot_list = glob.glob(
"{0:s}/{1:s}".format(qa_dir_crosscal.replace("/data", "/data*"), png_name))
# now merge the images
try:
merge_plots(ccal_plot_list)
except Exception as e:
logger.warning(
"Merging plots for {0} failed".format(png_name))
logger.exception(e)
else:
logger.info(
"Merged plots for {0} successfully ({1:.0f}s)".format(png_name, time() - start_time_plot))
# Merge the selfcal plots
# ========================
if do_scal:
qa_dir_selfcal = os.path.join(qa_dir, "selfcal")
logger.info("## Merging selfcal plots in {}".format(qa_dir_selfcal))
# create a backup of the original files
if do_backup:
# final path of backup
qa_dir_selfcal_backup = os.path.join(
qa_dir_selfcal, "selfcal_gain_plots_backup")
if os.path.exists(qa_dir_selfcal_backup):
logger.info("Backup of selfcal gain plots already exists")
else:
os.mkdir(qa_dir_selfcal_backup)
# copy the original directory
lib.basher("cp " + os.path.join(qa_dir_selfcal, "*.png") +
" " + qa_dir_selfcal_backup + "/")
logger.info("Backup of selfcal plots created in {}".format(
qa_dir_selfcal_backup))
# get a list all selfcal plots
scal_plot_list = glob.glob(
"{0:s}/*.png".format(qa_dir_selfcal.replace("/data", "/data*")))
if len(scal_plot_list) == 0:
logger.warning("No selfcal plots were found.")
else:
# get a unique list of plot names
scal_png_name_list = np.array(
[os.path.basename(plot) for plot in scal_plot_list])
scal_png_name_list = np.unique(scal_png_name_list)
if run_parallel:
with pymp.Parallel(n_cores) as p:
# go through all the images and merge them
for png_index in p.range(len(scal_png_name_list)):
# time for merging a single plot
start_time_plot = time()
png_name = scal_png_name_list[png_index]
logger.info("Merging {0:s}".format(png_name))
# get a list of plots with this name
scal_plot_list = glob.glob(
"{0:s}/{1:s}".format(qa_dir_selfcal.replace("/data", "/data*"), png_name))
# now merge the images
try:
merge_plots(scal_plot_list)
except Exception as e:
logger.warning(
"Merging plots for {0} failed".format(png_name))
logger.exception(e)
else:
logger.info(
"Merged plots for {0} successfully ({1:.0f}s)".format(png_name, time() - start_time_plot))
else:
# go through all the images and merge them
for png_name in scal_png_name_list:
# time for merging a single plot
start_time_plot = time()
logger.info("Merging {0:s}".format(png_name))
# get a list of plots with this name
scal_plot_list = glob.glob(
"{0:s}/{1:s}".format(qa_dir_selfcal.replace("/data", "/data*"), png_name))
# now merge the images
try:
merge_plots(scal_plot_list)
except Exception as e:
logger.warning(
"Merging plots for {0} failed".format(png_name))
logger.exception(e)
else:
logger.info(
"Merged plots for {0} successfully ({1:.0f}s)".format(png_name, time() - start_time_plot))
logger.info("## Merging ... Done ({0:.0f}s)".format(time() - start_time))
| 11,002 | 36.810997 | 122 | py |
dataqa | dataqa-master/report/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/report/html_report_content_summary.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_summary(html_code, qa_report_obs_path, page_type, obs_info=None, osa_report=''):
"""Function to create the html page for summary
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_info (dict): Information about the observation
add_osa_report (bool): To add the osa report
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large">
<p>Here you will find a summary of the observation.</p>
</div>\n
"""
# Create html code for the summary table
# ======================================
if obs_info is None:
obs_id = ""
target = ""
fluxcal = ""
polcal = ""
osa = ""
else:
obs_id = obs_info['Obs_ID'][0]
target = obs_info['Target'][0]
fluxcal = obs_info['Flux_Calibrator'][0]
polcal = obs_info['Pol_Calibrator'][0]
osa = obs_info['OSA'][0]
html_code += """
<div class="w3-container w3-center">
<div class="w3-responsive">
<table class="w3-table-all">
<tr class="w3-amber">
<th>Obs ID</th>
<th>Target</th>
<th>Flux calibrator</th>
<th>Pol calibrator</th>
<th>OSA</th>
</tr>
<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
</tr>
</table>
</div>
</div>\n\n""".format(obs_id, target, fluxcal, polcal, osa)
# Create html code for the osa report table
# =========================================
if osa_report != '':
pass
# check that osa report really exists:
# if os.path.exist:
# obs_id = ""
# target = ""
# fluxcal = ""
# polcal = ""
# osa = ""
# else:
# obs_id = obs_info['Obs_ID'][0]
# target = obs_info['Target'][0]
# fluxcal = obs_info['Flux_Calibrator'][0]
# polcal = obs_info['Pol_Calibrator'][0]
# osa = obs_info['OSA'][0]
# html_code += """
# <div class="w3-container w3-center">
# <div class="w3-responsive">
# <table class="w3-table-all">
# <tr class="w3-amber">
# <th>Obs ID</th>
# <th>Target</th>
# <th>Flux calibrator</th>
# <th>Pol calibrator</th>
# <th>OSA</th>
# </tr>
# <td>{0:s}</td>
# <td>{1:s}</td>
# <td>{2:s}</td>
# <td>{3:s}</td>
# <td>{4:s}</td>
# </table>
# </div>
# </div>\n\n""".format(obs_id, target, fluxcal, polcal, osa)
# Create html code for summary plot
# =================================
# get images
image_list = glob.glob(
"{0:s}/{1:s}/*.png".format(qa_report_obs_path, page_type))
if len(image_list) != 0:
image_list.sort()
# Make gallery for selfcal
html_code += """
<div class="w3-container w3-margin-top w3-show">
<h3> Selfcal CB plots </h3>
<p> These plots summarise the selfcal step of the pipeline for each of the compound beams. The left plot shows the beam numbers for reference. The middle and right plots shows whether amplitude and phase selfcalibration was performed. A missing beam would be gray. Amplitude selfcalibration is only turned on if the SNR is high enough. Phase selfcalibration is always done which is why only this plot shows if a beam failed on selfcal. Have a look at the selfcal page for further information on a given beam.</p>
<div class="w3-container w3-large">
\n"""
image_counter = 0
for image in image_list:
if "cb_overview" in image or "selfcal" in image:
if image_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if image_counter % 3 == 2 or image_counter == len(image_list)-1:
html_code += """</div>\n"""
image_counter += 1
html_code += """
</div>
</div>\n"""
# Make gallery for continuum
html_code += """
<div class="w3-container w3-margin-top w3-show">
<h3> Continuum CB plots </h3>
<p> These plots summarise the continuum step of the pipeline for each of the compound beams. The left plot shows the beam numbers for reference. The middle and right plots shows the minor beam axis and continuum rms, respectively. A missing beam would be gray. Red indicates the beam has failed if the rms is above 50mJy/beam or the minor axis above 15arcsec. Have a look at the continuum page for further information on a given beam and the image gallery from all beams.</p>
<div class="w3-container w3-large">
\n"""
image_counter = 0
for image in image_list:
if "cb_overview" in image or "continuum" in image:
if image_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if image_counter % 3 == 2 or image_counter == len(image_list)-1:
html_code += """</div>\n"""
image_counter += 1
html_code += """
</div>
</div>\n"""
# Make gallery for line
html_code += """
<div class="w3-container w3-margin-top w3-show">
<h3> Line CB plots </h3>
<p> These plots summarise the line step of the pipeline for each of the compound beams. The first one shows the beam numbers for reference. The other 8 plots show the median rms for each cube. A missing/failed cube for a beam would be gray. Red indicates a failed cube if the median rms is above 2mJy/beam for the cubes 0-6 (which are 3-channel averaged) and above 3mJy/beam for cube 7 (which has the full spectral resolution). It is important to check even good cubes in a beam for systematic affects (subband edges, slopes, etc.). Have a look at the line page to view the noise spectra for all cubes from all beams.</p>
<div class="w3-container w3-large">
\n"""
for image in image_list:
# make the reference plots in an extra line
if "cb_overview" in image:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
html_code += """</div>\n"""
image_counter = 0
for image in image_list:
if "cube" in image:
if image_counter % 4 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-quarter w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
</div>\n""".format(page_type, os.path.basename(image))
if image_counter % 4 == 3 or image_counter == len(image_list)-1:
html_code += """</div>\n"""
image_counter += 1
html_code += """
</div>
</div>\n"""
else:
logger.warning("No summary plots found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plots were found for summary
</p>
</div>\n"""
return html_code
| 9,470 | 37.189516 | 641 | py |
dataqa | dataqa-master/report/html_report.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import argparse
import socket
import report.html_report_content as hrc
# from __future__ import with_statement
logger = logging.getLogger(__name__)
def write_html_header(html_file_name, js_file, css_file=None, page_type='index', obs_id=0):
"""
This function creates the header for an html document
"""
if page_type == 'index':
page_title = 'APERTIF Quality Assessment Overview'
elif page_type == 'obs_page':
page_title = 'Observation {0:s}'.format(obs_id)
css_file = "../{0:s}".format(css_file)
js_file = "../{0:s}".format(js_file)
else:
page_title = '{0:s} {1:s}'.format(obs_id, page_type)
css_file = "../{0:s}".format(css_file)
js_file = "../{0:s}".format(js_file)
html_file = open(html_file_name, 'w')
# this is a quick fix to have the title of the qa pages below the nav bar
# need to find a better solution for this
if page_type != "index":
html_file.write("""<!DOCTYPE HTML>
<html lang="en">
<head>
<title>{0}</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="description" content="" />
<meta name="keywords" content="" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<script src="{1}"></script>
<link rel="stylesheet" type="text/css" href="{2}" />
</head>
<body>
<br><br>
<div class="w3-container w3-center w3-margin-bottom w3-amber">
<h1>{0}</h1>
</div>\n""".format(page_title, js_file, css_file))
else:
html_file.write("""<!DOCTYPE HTML>
<html lang="en">
<head>
<title>{0}</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="description" content="" />
<meta name="keywords" content="" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<script src="{1}"></script>
<link rel="stylesheet" type="text/css" href="{2}" />
</head>
<body>
<div class="w3-container w3-center w3-margin-bottom w3-amber">
<h1>{0}</h1>
</div>\n""".format(page_title, js_file, css_file))
html_file.close()
def write_html_end(html_file_name):
"""
This function closes an html document
"""
try:
html_file = open(html_file_name, 'a')
html_file.write("""</body>\n</html>""")
html_file.close()
except Exception as e:
logger.error(e)
def write_html_obs_index(html_file_name, obs_id):
"""
This function creates an index for the list of observations
"""
# write the html content for the index of observations
obs_index = """
<div class="w3-container w3-center">
<h2> List of Observations </h2>
<p class="w3-center w3-container w3-large">Note: This website will allow you to go through the different qualitiy assessment products
in addition to the apercal logfile from each node. It will not give you access to fits
images and the source catalogue</p>
</div>\n"""
obs_index += """
<div class="w3-container w3-center w3-xlarge">
<b>{0:s}</b>
</div>
<div class="w3-container w3-center">
<div class="w3-bar w3-large w3-dark-gray">
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_summary.html">summary</a>
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_beamweights.html">beamweights</a>
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_inspection_plots.html">inspection
plots</a>
<a class="w3-bar-item w3-button w3-hover-yellow"
href="{0:s}/{0:s}_preflag.html">preflag</a>
<a class="w3-bar-item w3-button w3-hover-yellow"
href="{0:s}/{0:s}_crosscal.html">crosscal</a>
<a class="w3-bar-item w3-button w3-hover-yellow"
href="{0:s}/{0:s}_selfcal.html">selfcal</a>
<a class="w3-bar-item w3-button w3-hover-yellow"
href="{0:s}/{0:s}_continuum.html">continuum</a>
<a class="w3-bar-item w3-button w3-hover-yellow"
href="{0:s}/{0:s}_polarisation.html">polarisation</a>
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_line.html">line</a>
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_mosaic.html">mosaic</a>
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}/{0:s}_apercal_log.html">apercal
log</a>
</div>
</div>\n""".format(obs_id)
try:
html_file = open(html_file_name, 'a')
html_file.write(obs_index)
html_file.close()
except Exception as e:
logger.error(e)
def write_html_navbar(html_file_name, links, page_type='preflag', obs_id=0):
"""
Function to add a navigation bar at the top of the website for each QA
"""
html_code = """
<div class="w3-top">
<div class="w3-container w3-dark-gray w3-large">
<div class="w3-bar">
"""
for page in links:
if page == page_type:
html_code += """
<a class="w3-bar-item w3-button w3-hover-yellow w3-amber" href="{0:s}_{1:s}.html">{2:s}</a>\n""".format(
obs_id, page, page.replace("_", " "))
else:
html_code += """
<a class="w3-bar-item w3-button w3-hover-yellow" href="{0:s}_{1:s}.html">{2:s}</a>\n""".format(
obs_id, page, page.replace("_", " "))
html_code += """
<a class="w3-bar-item w3-button w3-hover-yellow w3-right" href="../index.html">Overview of Observation</a>
<a class="w3-bar-item w3-button w3-hover-yellow w3-right" href="https://docs.google.com/document/d/1LBcx7MmfLeBlSxj7bFI_TRDFMLsQ3cFmFXXrrNf5xIc/edit?usp=sharing" target="_blank">OSA Guide</a>
</div>
</div>
</div>
\n"""
try:
html_file = open(html_file_name, 'a')
html_file.write(html_code)
html_file.close()
except Exception as e:
logger.error(e)
def write_obs_page(qa_report_path, obs_id, css_file, js_file, subpages=None, obs_info=None, osa_report=''):
"""
Function to create the subpages
"""
if subpages is not None:
for page in subpages:
logger.info("# Creating page {0:s}".format(page))
page_name = "{0:s}/{1:s}/{1:s}_{2:s}.html".format(
qa_report_path, obs_id, page)
# create the header
write_html_header(
page_name, js_file, css_file=css_file, page_type=page, obs_id=obs_id)
write_html_navbar(page_name, subpages,
page_type=page, obs_id=obs_id)
hrc.write_obs_content(page_name, qa_report_path,
page_type=page, obs_id=obs_id, obs_info=obs_info, osa_report=osa_report)
# Close the index file
write_html_end(page_name)
def create_main_html(qa_report_dir, obs_id, subpages, css_file=None, js_file=None, obs_info=None, osa_report=''):
"""
Function to create the main HTML file
Args:
qa_report_dir (str): Directory of report
obs_id (str): ID of observation
subpages (list(str)): The subpages of the report
css_file (str): The css file of the report (depracated)
js_file (str): The javascript file for the report
obs_info (dict): Information about the observation
add_osa_report (bool): Update web report to add only the osa report.
"""
# qa_report_dir = '{0:s}/report'.format(qa_report_dir)
# # Check that qa_report_dir and the other directories exists
# if not os.path.exists(qa_report_dir):
# logger.warning(
# "Directory {0:s} does not exists. Abort".format(qa_report_dir))
# logger.info("Creating directory {0:s}".format(qa_report_dir))
# os.mkdir(qa_report_dir)
# else:
# logger.info("Directory {0:s} exists".format(qa_report_dir))
# if continuum:
# if not os.path.exists('{0:s}/continuum'.format(qa_report_dir):
# logger.error("Directory for continuum does not exists")
# return -1
# if crosscal:
# if not os.path.exists('{0:s}/crosscal'.format(qa_report_dir):
# logger.error("Directory for crosscal does not exists")
# return -1
# if line:
# if not os.path.exists('{0:s}/line'.format(qa_report_dir):
# logger.error("Directory for line does not exists")
# return -1
# if mosaic:
# if not os.path.exists('{0:s}/mosaic'.format(qa_report_dir):
# logger.error("Directory for mosaic does not exists")
# return -1
# if selfcal:
# if not os.path.exists('{0:s}/selfcal'.format(qa_report_dir):
# logger.error("Directory for selfcal does not exists")
# return -1
# get a list of observations in this directory
# obs_dir_list = glob.glob('{0:s}/{1:s}'.format(qa_report_dir, '[0-9]'*9))
# if len(obs_dir_list) == 0:
# obs_dir_list =[obs_id]
# logger.error("No observation found in QA directory. Abort")
# obs_dir_list.sort()
# obs_ids = [os.path.basename(obs) for obs in obs_dir_list]
# number of obs_ids
# n_obs_ids = len(obs_dir_list)
# Create index file
# +++++++++++++++++
if osa_report == '':
index_file = '{0:s}/index.html'.format(qa_report_dir)
logging.info("## Creating index file: {0:s}".format(index_file))
# create the header
write_html_header(index_file, os.path.basename(css_file),
os.path.basename(js_file), page_type='index')
# Add a list of Observations
write_html_obs_index(index_file, obs_id)
# Close the index file
write_html_end(index_file)
# Creating subpages
# +++++++++++++++++
logging.info("## Writing subpages for observation {0:s}".format(obs_id))
# obs_report_path = '{0:s}/{1:s}'.format(qa_report_dir, obs_ids[k])
try:
write_obs_page(qa_report_dir, obs_id, os.path.basename(css_file),
os.path.basename(js_file), subpages=subpages, obs_info=obs_info, osa_report=osa_report)
except Exception as e:
logger.error(e)
| 10,998 | 36.927586 | 211 | py |
dataqa | dataqa-master/report/html_report_content_beamweights.py | import os
import sys
from astropy.table import Table, join
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_beamweights(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for beamweights
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_info (list(str)): Basic information of observation
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
if obs_info is not None:
obs_id = obs_info['Obs_ID'][0]
source_list = np.array(
[obs_info['Target'][0], obs_info['Flux_Calibrator'][0], obs_info['Pol_Calibrator'][0]])
else:
obs_id = os.path.basename(qa_report_obs_path)
source_list = ''
html_code += """
<div class="w3-container w3-large">
<p>
Here you can inspect the beamweights per beam for different subbands for the calibrator used in this observation: {0:s}. For each individual beam, you can click your way through the subbands using the back and forward arrows. The thin top arrows allow you to step through every single image while the thick bottom arrows change between every 10th image. When you reach the last image, it starts at the beginning.
These plots should be the same for observations which used the same calibrator.
</p>
<h4> Note: Due to issues with getting the script to extract the beam weights, creating the beamweights plots has been temporarily disabled </h4>
</div>\n
""".format(source_list[1])
# total number of beams
n_beams = 40
qa_report_obs_page_path = os.path.join(qa_report_obs_path, page_type)
# Create html code for image galleries
# ====================================
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
if len(beam_list) != 0:
html_code += """
<div class="w3-container w3-margin-top">\n"""
# get a list of beam numbers
#beam_nr_list = np.array([os.path.basename(beam) for beam in beam_list])
# get a list of reference beams
beam_nr_list = np.array(
["{0:02d}".format(beam) for beam in range(n_beams)])
# go through the beams
for beam_counter in range(n_beams):
# get a list of all images to make sure that at least one exists
image_list = glob.glob(
"{0:s}/{1:s}/{2:02d}/*.png".format(qa_report_obs_path, page_type, beam_counter))
# to properly make the gallery, open the row
if beam_counter % 4 == 0:
html_code += """<div class="w3-row w3-margin-top">\n"""
# check that there are images for this beam
if len(image_list) != 0:
# sort the list
image_list.sort()
# create the slideshow for this beam
# open the slideshow div
html_code += """
<div class="w3-content w3-display-container w3-quarter w3-border">\n"""
# go through each plot and add elements to the slideshow
# the first element gets a different class value
html_code += """
<a href="{0:s}/{1:02d}/{2:s}">
<img name="slideshow{1:d}" class="w3-show" src="{0:s}/{1:02d}/{2:s}" style="width:100%">
</a>\n""".format(page_type, beam_counter, os.path.basename(image_list[0]))
for image_counter in range(1, len(image_list)):
html_code += """
<a href="{0:s}/{1:02d}/{2:s}">
<img name="slideshow{1:d}" class="w3-hide" src="{0:s}/{1:02d}/{2:s}" style="width:100%">
</a>\n""".format(page_type, beam_counter, os.path.basename(image_list[image_counter]))
# write the caption
html_code += """
<div class="w3-container w3-center">
<h5 name="slideshow_label{0:d}">Beam {0:02d}, Subband {1:s}</h5>
</div>""".format(beam_counter, os.path.basename(image_list[0]).split("_")[3].replace("S", ""))
# write the buttons
html_code += """
<button class="w3-button w3-display-topleft" onclick="change_slide(-1, 'slideshow{0:d}', 'slideshow_label{0:d}')">❬</button>
<button class="w3-button w3-display-topright" onclick="change_slide(1, 'slideshow{0:d}', 'slideshow_label{0:d}')">❭</button>
<button class="w3-button w3-display-bottomleft" onclick="change_slide(-10, 'slideshow{0:d}', 'slideshow_label{0:d}')">❰</button>
<button class="w3-button w3-display-bottomright" onclick="change_slide(10, 'slideshow{0:d}', 'slideshow_label{0:d}')">❱</button>\n""".format(beam_counter)
# close the slideshow div
html_code += """</div>\n"""
else:
html_code += """
<div class="w3-content w3-display-container w3-quarter">
<img src="" alt="No image for beam {0:s}", width="100%">
</div>\n""".format(beam_nr_list[beam_counter])
# close the row
if beam_counter % 4 == 3 or beam_counter == len(beam_nr_list):
html_code += """</div>\n"""
html_code += """</div>\n"""
# html_code += """
# <div class="gallery" name="{0:s}">
# <p class="warning">
# No plots and validation tool were found for {1:s}
# </p>
# </div>\n""".format(button_html_name, page_type)
else:
logger.warning("No beams for beamweights found")
html_code += """
<div class="w3-container w3-large w3-text-red w3-margin-top">
<p>
No beams were found for beamweidhts.
</p>
</div\n"""
return html_code
| 6,448 | 42.281879 | 428 | py |
dataqa | dataqa-master/report/test_nptabel_summary.py | #!/usr/bin/env python
import glob
import os
import numpy as np
import logging
from make_nptabel_summary import extract_all_beams, find_sources, make_nptable_csv
import csv
# -------------------------------------------------
beams_1 = '/data/apertif/190602049/'
obs_id = '190602049'
module = 'preflag'
#source = 'LH_GRG'
# -----------------------
# to extract a dictionary
beam_info = extract_all_beams(obs_id, module)
print(beam_info[1]['beam'])
print(len(beam_info))
# print(beam_info)
# ------------------------
# to extract a csv file
make_nptable_csv(obs_id, module)
| 578 | 18.965517 | 82 | py |
dataqa | dataqa-master/report/test_merge.py | from merge_ccal_scal_plots import run_merge_plots
import numpy as np
import os
from apercal.libs import lib
import logging
lib.setup_logger('debug', logfile='test_merge_plots.log')
logger = logging.getLogger(__name__)
basedir = '/data/apertif/190602049_flag-strategy-test/qa'
do_ccal = True
do_scal = False
# file_list = np.array([os.path.join(basedir, img) for img in img_list])
# new_file_name = os.path.join(basedir, "merge_test.png")
run_merge_plots(basedir, do_ccal=do_ccal, do_scal=do_scal)
| 504 | 23.047619 | 72 | py |
dataqa | dataqa-master/report/html_report_dir.py | #!/usr/bin/python2.7
"""
This file contains functionality to create the directory structure for the report.
Instead of copying files, they are linked.
"""
import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
from shutil import copy2, copy
logger = logging.getLogger(__name__)
def create_report_dir_observing_log(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the observing log for the report
Note:
All necessary files will be linked to this directory
from the observing log QA directory
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
logger.info(
"## Creating report directory for observing logs. No files to link yet")
# # get the images in the subdirectory
# images_inspection_plots = glob.glob(
# os.path.join(qa_dir, "inspection_plots/*.png"))
# if len(images_inspection_plots) != 0:
# images_inspection_plots.sort()
# # go through all beams
# for image in images_inspection_plots:
# link_name = "{0:s}/{1:s}".format(
# qa_dir_report_obs_subpage, os.path.basename(image))
# # change to relative link when in trigger mode
# if trigger_mode or single_node:
# image = image.replace(
# qa_dir, "../../../")
# # check if link exists
# if not os.path.exists(link_name):
# os.symlink(image, link_name)
# else:
# os.unlink(link_name)
# os.symlink(image, link_name)
# else:
# logger.warning("No images found for inspection plots.")
# logger.info(
# "## Creating report directory for inspection plots and linking files. Done")
def create_report_dir_summary(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the summary directory for the report
Note:
All necessary files will be linked to this directory
from the QA directory. Currently, it only reads the
compound beam plots
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
logger.info(
"## Creating report directory for summary.")
# get the images in the subdirectory
images_summary = glob.glob(
os.path.join(qa_dir, "cb_plots/*.png"))
if len(images_summary) != 0:
images_summary.sort()
# go through all beams
for image in images_summary:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for summary.")
logger.info(
"## Creating report directory for summary and linking files. Done")
def create_report_dir_beamweights(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the beamweights directory for the report
Note:
All necessary files will be linked to this directory
from the QA directory. Currently, it only reads the
compound beam plots
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
logger.info(
"## Creating report directory for beam weights.")
default_qa_beamweights_dir = os.path.join(qa_dir, "beamweights")
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_beamweights_dir_list = [default_qa_beamweights_dir]
else:
if "/data" in default_qa_beamweights_dir:
qa_beamweights_dir_list = [default_qa_beamweights_dir, default_qa_beamweights_dir.replace(
"data", "data2"), default_qa_beamweights_dir.replace("data", "data3"), default_qa_beamweights_dir.replace("data", "data4")]
else:
qa_beamweights_dir_list = [default_qa_beamweights_dir, default_qa_beamweights_dir.replace(
"tank", "tank2"), default_qa_beamweights_dir.replace("tank", "tank3"), default_qa_beamweights_dir.replace("tank", "tank4")]
# Get every single beamweights plot
# =============================
logger.info("Linking individual beamweights plots")
for qa_beamweights_dir in qa_beamweights_dir_list:
# get beams
qa_beamweights_dir_beam_list = glob.glob(
"{0:s}/[0-3][0-9]".format(qa_beamweights_dir))
# number of beams
n_beams = len(qa_beamweights_dir_beam_list)
if n_beams != 0:
qa_beamweights_dir_beam_list.sort()
# go through all beams
for qa_beamweights_dir_beam in qa_beamweights_dir_beam_list:
qa_dir_report_obs_subpage_beamweights_beam = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(qa_beamweights_dir_beam))
# create a subdirectory in the report dir
if not os.path.exists(qa_dir_report_obs_subpage_beamweights_beam):
try:
os.mkdir(qa_dir_report_obs_subpage_beamweights_beam)
except Exception as e:
logger.error(e)
# get the images in the beam directory and link them
images_in_beam = glob.glob(
"{0:s}/*png".format(qa_beamweights_dir_beam))
# check that there are images in there
if len(images_in_beam) != 0:
images_in_beam.sort()
# go through the images and link them
for image in images_in_beam:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_beamweights_beam, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
# link needs to be removed before it can be overwritten
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images in beam {0:s} found".format(
qa_beamweights_dir_beam))
else:
logger.warning(
"No beams found for beamweights in {0:s}".format(qa_beamweights_dir))
logger.info(
"## Creating report directory for beamweights and linking files. Done")
def create_report_dir_inspection_plots(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False, obs_info=None):
"""Function to create the inspection plot directory for the report
Note:
All necessary files will be linked to this directory
from the inspection plot QA directory.
It is not necessary to add the combine-parameter, because
the inspection plots are only created on happili-01 unless ran manually.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
obs_info (dict): Information about the observation such as the source names
"""
# get the images in the subdirectory
# without the additional obs information assume that the files are in the main dir
if obs_info is None:
logger.warning("No observing information provided. Will assume plots are in main directory")
images_inspection_plots = glob.glob(
os.path.join(qa_dir, "inspection_plots/*.png"))
if len(images_inspection_plots) != 0:
images_inspection_plots.sort()
# go through all beams
for image in images_inspection_plots:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for inspection plots.")
# otherwise go through the sources to get the info
else:
default_qa_plot_dir = os.path.join(qa_dir, "inspection_plots")
if obs_info['Pol_Calibrator'][0] != '':
src_list = [obs_info['Target'][0], obs_info['Flux_Calibrator']
[0], obs_info['Pol_Calibrator'][0]]
else:
src_list = [obs_info['Target'][0], obs_info['Flux_Calibrator'][0]]
# go through each of the sources
for src in src_list:
logger.info("Linking files for {}".format(src))
# this is necessary as plots for the calibrator are per beam
# and will be distributed among the different nodes
if socket.gethostname() != 'happili-01' or trigger_mode :
qa_plot_dir_list = [default_qa_plot_dir]
# only check in one dir for the target plots
elif src == obs_info['Target'][0]:
qa_plot_dir_list = [default_qa_plot_dir]
else:
if "/data" in default_qa_plot_dir:
qa_plot_dir_list = [default_qa_plot_dir, default_qa_plot_dir.replace(
"data", "data2"), default_qa_plot_dir.replace("data", "data3"), default_qa_plot_dir.replace("data", "data4")]
else:
qa_plot_dir_list = [default_qa_plot_dir, default_qa_plot_dir.replace(
"tank", "tank2"), default_qa_plot_dir.replace("tank", "tank3"), default_qa_plot_dir.replace("tank", "tank4")]
# now go through each of the plot directories from the differen nodes
for qa_plot_dir in qa_plot_dir_list:
# set the source directory in the inspection plot dir
qa_plot_dir_src = os.path.join(
qa_plot_dir, "{}".format(src))
logger.info("Looking for plots in {}".format(qa_plot_dir_src))
# set the source directory where the link should be
qa_dir_report_obs_subpage_src = os.path.join(
qa_dir_report_obs_subpage, src)
# create it if it does not exists
if not os.path.exists(qa_dir_report_obs_subpage_src):
os.mkdir(qa_dir_report_obs_subpage_src)
# if it is the target the situation is simple
# as all plots will be in one place
if src == obs_info['Target'][0]:
# now get the images
images_inspection_plots = glob.glob(
os.path.join(qa_plot_dir_src, "*.png"))
if len(images_inspection_plots) != 0:
images_inspection_plots.sort()
# go through all beams
for image in images_inspection_plots:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_src, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning(
"No images found for inspection plots for target {}.".format(src))
# for the calibrators
# they are separated by beam
else:
# get the beams
qa_plot_dir_src_beam_list = glob.glob(os.path.join(qa_plot_dir_src, "[0-3][0-9]"))
# check that there are actually beams
if len(qa_plot_dir_src_beam_list) != 0:
# go through the beams:
for qa_plot_dir_src_beam in qa_plot_dir_src_beam_list:
# now get the images
images_inspection_plots = glob.glob(
os.path.join(qa_plot_dir_src_beam, "*.png"))
# continue only if there are images in the beam dir
if len(images_inspection_plots) != 0:
# set the beam directory where the link should be
qa_dir_report_obs_subpage_src_beam = os.path.join(
qa_dir_report_obs_subpage_src, os.path.basename(qa_plot_dir_src_beam))
# create it if it does not exists
if not os.path.exists(qa_dir_report_obs_subpage_src_beam):
os.mkdir(qa_dir_report_obs_subpage_src_beam)
# go through all images and link them
for image in images_inspection_plots:
link_name="{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_src_beam, os.path.basename(image))
logger.info("Linking {0} to {1}".format(image, link_name))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image=image.replace(
qa_dir, "../../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for inspection plots for calibrator {}.".format(src))
else:
logger.warning("No beam directories found for calibrator {}".format(src))
def create_report_dir_preflag(obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode = False, single_node = False):
"""Function to create the preflag directory for the report
Note:
All necessary files will be linked to this directory
from the preflag QA directory.
It is not necessary to add the combine-parameter, because
the preflag files are already distributed among the nodes.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
default_qa_preflag_dir=os.path.join(qa_dir, "preflag")
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_preflag_dir_list=[default_qa_preflag_dir]
else:
if "/data" in default_qa_preflag_dir:
qa_preflag_dir_list=[default_qa_preflag_dir, default_qa_preflag_dir.replace(
"data", "data2"), default_qa_preflag_dir.replace("data", "data3"), default_qa_preflag_dir.replace("data", "data4")]
else:
qa_preflag_dir_list = [default_qa_preflag_dir, default_qa_preflag_dir.replace(
"tank", "tank2"), default_qa_preflag_dir.replace("tank", "tank3"), default_qa_preflag_dir.replace("tank", "tank4")]
# Get the summary file
# ====================
preflag_summary_file = os.path.join(default_qa_preflag_dir,"{0}_{1}_summary.csv".format(obs_id,"preflag"))
if os.path.exists(preflag_summary_file):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(preflag_summary_file))
# change to relative link when in trigger mode
if trigger_mode or single_node:
preflag_summary_file = preflag_summary_file.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(preflag_summary_file, link_name)
else:
os.unlink(link_name)
os.symlink(preflag_summary_file, link_name)
else:
logger.info("Did not find {} for linking".format(preflag_summary_file))
# Get the combined preflag plots when on happili-01
# =================================================
if socket.gethostname() == 'happili-01':
logger.info("Linking combined preflag plots")
# get the images in the subdirectory
images_preflag_combined=glob.glob(
os.path.join(default_qa_preflag_dir, "*.png"))
if len(images_preflag_combined) != 0:
images_preflag_combined.sort()
# go through all beams
for image in images_preflag_combined:
link_name="{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image=image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for combined preflag plots.")
# Get every single preflag plot
# =============================
logger.info("Linking individual preflag plots")
for qa_preflag_dir in qa_preflag_dir_list:
# get beams
qa_preflag_dir_beam_list=glob.glob(
"{0:s}/[0-3][0-9]".format(qa_preflag_dir))
# number of beams
n_beams=len(qa_preflag_dir_beam_list)
if n_beams != 0:
qa_preflag_dir_beam_list.sort()
# go through all beams
for qa_preflag_dir_beam in qa_preflag_dir_beam_list:
qa_dir_report_obs_subpage_preflag_beam="{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(qa_preflag_dir_beam))
# create a subdirectory in the report dir
if not os.path.exists(qa_dir_report_obs_subpage_preflag_beam):
try:
os.mkdir(qa_dir_report_obs_subpage_preflag_beam)
except Exception as e:
logger.error(e)
# get the images in the beam directory and link them
images_in_beam=glob.glob(
"{0:s}/*png".format(qa_preflag_dir_beam))
# check that there are images in there
if len(images_in_beam) != 0:
images_in_beam.sort()
# go through the images and link them
for image in images_in_beam:
link_name="{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_preflag_beam, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image=image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
# link needs to be removed before it can be overwritten
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images in beam {0:s} found".format(
qa_preflag_dir_beam))
else:
logger.warning(
"No beams found for preflag QA in {0:s}".format(qa_preflag_dir))
def create_report_dir_crosscal(obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False, do_combine=False):
"""Function to create the create directory for the report
Note:
All necessary files will be linked to this directory
from the crosscal QA directory.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
do_combine (bool): Set to combine the QA information from different happilis
"""
qa_crosscal_dir = os.path.join(qa_dir,"crosscal")
# if crosscal from different happilis should be combined
if do_combine:
logger.info("Nothing to combine. This is done in a separate step.")
# combine images
# try:
# logging.info("Combining crosscal plots")
# run_merge_plots(qa_crosscal_dir, do_ccal=True, do_scal=False):
# except Exception as e:
# logger.warning("Combining crosscal plots failed")
# logger.exception(e)
# else:
# logger.info("Combining crosscal plots ... Done")
# Get the summary file
# ====================
crosscal_summary_file = os.path.join(
qa_crosscal_dir, "{0}_{1}_summary.csv".format(obs_id, "crosscal"))
if os.path.exists(crosscal_summary_file):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(crosscal_summary_file))
# change to relative link when in trigger mode
if trigger_mode or single_node:
crosscal_summary_file = crosscal_summary_file.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(crosscal_summary_file, link_name)
else:
os.unlink(link_name)
os.symlink(crosscal_summary_file, link_name)
else:
logger.info("Did not find {} for linking".format(crosscal_summary_file))
# Get the crosscal images
# =======================
# get the images for crosscal
images_crosscal = glob.glob(
"{0:s}/*.png".format(qa_crosscal_dir))
# if there are any link them.
if len(images_crosscal) != 0:
images_crosscal.sort()
# go through all beams
for image in images_crosscal:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for crosscal.")
def create_report_dir_selfcal(obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False, do_combine=False):
"""Function to create the selfcal directory for the report
Note:
All necessary files will be linked to this directory
from the selfcal QA directory.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
do_combine (bool): Set to combine the QA information from different happilis
"""
default_qa_selfcal_dir = os.path.join(qa_dir, "selfcal")
# if crosscal from different happilis should be combined
# ======================================================
if do_combine:
pass
# combine images
# try:
# logging.info("Combining crosscal plots")
# run_merge_plots(default_qa_selfcal_dir, do_ccal=False, do_scal=True):
# except Exception as e:
# logger.warning("Combining crosscal plots failed")
# logger.exception(e)
# else:
# logger.info("Combining crosscal plots ... Done")
qa_selfcal_dir = os.path.join(qa_dir, "selfcal")
# Get the summary file
# ====================
selfcal_summary_file = os.path.join(
qa_selfcal_dir, "{0}_{1}_summary.csv".format(obs_id, "selfcal"))
if os.path.exists(selfcal_summary_file):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(selfcal_summary_file))
# change to relative link when in trigger mode
if trigger_mode or single_node:
selfcal_summary_file = selfcal_summary_file.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(selfcal_summary_file, link_name)
else:
os.unlink(link_name)
os.symlink(selfcal_summary_file, link_name)
else:
logger.info("Did not find {} for linking".format(
selfcal_summary_file))
# Getting selfcal images
# ======================
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_selfcal_dir_list = [default_qa_selfcal_dir]
else:
if "/data" in default_qa_selfcal_dir:
qa_selfcal_dir_list = [default_qa_selfcal_dir, default_qa_selfcal_dir.replace(
"data", "data2"), default_qa_selfcal_dir.replace("data", "data3"), default_qa_selfcal_dir.replace("data", "data4")]
else:
qa_selfcal_dir_list = [default_qa_selfcal_dir, default_qa_selfcal_dir.replace(
"tank", "tank2"), default_qa_selfcal_dir.replace("tank", "tank3"), default_qa_selfcal_dir.replace("tank", "tank4")]
for qa_selfcal_dir in qa_selfcal_dir_list:
# get beams
beam_list = glob.glob(
"{0:s}/[0-3][0-9]".format(qa_selfcal_dir))
# number of beams
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
# go through all beams
for beam in beam_list:
qa_dir_report_obs_subpage_selfcal_beam = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(beam))
# create a subdirectory in the report dir
if not os.path.exists(qa_dir_report_obs_subpage_selfcal_beam):
try:
os.mkdir(qa_dir_report_obs_subpage_selfcal_beam)
except Exception as e:
logger.error(e)
# get the images in the beam directory and link them
images_in_beam = glob.glob(
"{0:s}/*png".format(beam))
if len(images_in_beam) != 0:
images_in_beam.sort()
for image in images_in_beam:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_selfcal_beam, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No selfcal images in beam {0:s} found".format(
beam))
else:
logger.warning("No beams for selfcal found")
# Get the phase plots and link them
# =================================
images_phase = glob.glob(
"{0:s}selfcal/SCAL_phase*.png".format(qa_dir))
if len(images_phase) != 0:
images_phase.sort()
# go through all antennas
for image in images_phase:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No selfcal phase plots found")
# Get the amplitude plots and link them
# =====================================
images_amp = glob.glob(
"{0:s}selfcal/SCAL_amp*.png".format(qa_dir))
if len(images_amp) != 0:
images_amp.sort()
# go through all antennas
for image in images_amp:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No selfcal amplitude plots found")
# # get beams
# beam_list = glob.glob(
# "{0:s}selfcal/[0-3][0-9]".format(qa_dir))
# # number of beams
# n_beams = len(beam_list)
# if n_beams != 0:
# beam_list.sort()
# # go through all beams
# for beam in beam_list:
# qa_dir_report_obs_subpage_line_beam = "{0:s}/{1:s}".format(
# qa_dir_report_obs_subpage, os.path.basename(beam))
# # create a subdirectory in the report dir
# try:
# os.mkdir(qa_dir_report_obs_subpage_line_beam)
# except Exception as e:
# logger.error(e)
# # get the images in the beam directory and link them
# images_in_beam = glob.glob(
# "{0:s}/*png".format(beam))
# if len(images_in_beam) != 0:
# images_in_beam.sort()
# for image in images_in_beam:
# link_name = "{0:s}/{1:s}".format(
# qa_dir_report_obs_subpage_line_beam, os.path.basename(image))
# # check if link exists
# if not os.path.exists(link_name):
# os.symlink(image, link_name)
# else:
# os.unlink(link_name)
# os.symlink(image, link_name)
# else:
# logger.warning("No images in beam {0:s} found".format(
# beam))
# else:
# logger.warning("No beams found for selfcal found")
#
# logger.info(
# "## Creating report directory for selfcal and linking files. Done")
def create_report_dir_continuum(obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the continuum directory for the report
Note:
All necessary files will be linked to this directory
from the continuum QA directory.
No need to addd combine parameter as it will try to look
into all happili nodes.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
qa_continuum_dir = os.path.join(qa_dir, "continuum")
# Get the summary file
# ====================
continuum_summary_file = os.path.join(
qa_continuum_dir, "{0}_{1}_summary.csv".format(obs_id, "continuum"))
if os.path.exists(continuum_summary_file):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(continuum_summary_file))
# change to relative link when in trigger mode
if trigger_mode or single_node:
continuum_summary_file = continuum_summary_file.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(continuum_summary_file, link_name)
else:
os.unlink(link_name)
os.symlink(continuum_summary_file, link_name)
else:
logger.info("Did not find {} for linking".format(
continuum_summary_file))
# Get the image properties file
# =============================
continuum_image_properties = os.path.join(
qa_continuum_dir, "{0}_combined_continuum_image_properties.csv".format(obs_id))
if os.path.exists(continuum_image_properties):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(continuum_image_properties))
# change to relative link when in trigger mode
if trigger_mode or single_node:
continuum_image_properties = continuum_image_properties.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(continuum_image_properties, link_name)
else:
os.unlink(link_name)
os.symlink(continuum_image_properties, link_name)
else:
logger.info("Did not find {} for linking".format(
continuum_image_properties))
# Getting continuum images
# ======================
default_qa_continuum_dir = os.path.join(qa_dir, "continuum")
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_continuum_dir_list = [default_qa_continuum_dir]
else:
if "/data" in default_qa_continuum_dir:
qa_continuum_dir_list = [default_qa_continuum_dir, default_qa_continuum_dir.replace(
"data", "data2"), default_qa_continuum_dir.replace("data", "data3"), default_qa_continuum_dir.replace("data", "data4")]
else:
qa_continuum_dir_list = [default_qa_continuum_dir, default_qa_continuum_dir.replace(
"tank", "tank2"), default_qa_continuum_dir.replace("tank", "tank3"), default_qa_continuum_dir.replace("tank", "tank4")]
for qa_continuum_dir in qa_continuum_dir_list:
# get beams
beam_list = glob.glob(
"{0:s}/[0-3][0-9]".format(qa_continuum_dir))
# number of beams
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
# go through all beams
for beam in beam_list:
qa_dir_report_obs_subpage_continuum_beam = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(beam))
# create a subdirectory in the report dir
if not os.path.exists(qa_dir_report_obs_subpage_continuum_beam):
try:
os.mkdir(qa_dir_report_obs_subpage_continuum_beam)
except Exception as e:
logger.error(e)
# get the images in the beam directory and link them
images_in_beam = glob.glob(
"{0:s}/*png".format(beam))
if len(images_in_beam) != 0:
images_in_beam.sort()
for image in images_in_beam:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_continuum_beam, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images in beam {0:s} found".format(
beam))
# link the validation tool
validation_tool_dir = glob.glob("{0:s}/*continuum_validation_pybdsf_snr5.0_int".format(
beam))
# check that the directory for the validation tool exists
if len(validation_tool_dir) == 1:
validation_tool_dir = validation_tool_dir[0]
if os.path.isdir(validation_tool_dir):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_continuum_beam, os.path.basename(validation_tool_dir))
# change to relative link when in trigger mode
if trigger_mode or single_node:
validation_tool_dir = validation_tool_dir.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(validation_tool_dir, link_name)
else:
os.unlink(link_name)
os.symlink(validation_tool_dir, link_name)
else:
logger.warning(
"No validation tool output found for continuum QA of beam {0:s}".format(beam))
else:
logger.warning(
"No validation tool output found for continuum QA of beam {0:s}".format(beam))
else:
logger.warning("No beams found for continuum found")
def create_report_dir_line(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the line directory for the report
Note:
All necessary files will be linked to this directory
from the line QA directory.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
# Getting line images
# ===================
default_qa_line_dir = os.path.join(qa_dir, "line")
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_line_dir_list = [default_qa_line_dir]
else:
if "/data" in default_qa_line_dir:
qa_line_dir_list = [default_qa_line_dir, default_qa_line_dir.replace(
"data", "data2"), default_qa_line_dir.replace("data", "data3"), default_qa_line_dir.replace("data", "data4")]
else:
qa_line_dir_list = [default_qa_line_dir, default_qa_line_dir.replace(
"tank", "tank2"), default_qa_line_dir.replace("tank", "tank3"), default_qa_line_dir.replace("tank", "tank4")]
for qa_line_dir in qa_line_dir_list:
# get beams
beam_list = glob.glob(
"{0:s}/[0-3][0-9]".format(qa_line_dir))
# number of beams
n_beams = len(beam_list)
if n_beams != 0:
beam_list.sort()
# go through all beams
for beam in beam_list:
qa_dir_report_obs_subpage_line_beam = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(beam))
# create a subdirectory in the report dir
if not os.path.exists(qa_dir_report_obs_subpage_line_beam):
try:
os.mkdir(qa_dir_report_obs_subpage_line_beam)
except Exception as e:
logger.error(e)
# get the images in the beam directory and link them
images_in_beam = glob.glob(
"{0:s}/*png".format(beam))
if len(images_in_beam) != 0:
images_in_beam.sort()
for image in images_in_beam:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage_line_beam, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images in beam {0:s} found".format(
beam))
else:
logger.warning("No beams found for line found")
def create_report_dir_mosaic(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the mosaic directory for the report
Note:
All necessary files will be linked to this directory
from the mosaic QA directory.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
qa_mosaic_dir = "{0:s}mosaic".format(qa_dir)
# get the images in the subdirectory
images_mosaic = glob.glob("{0:s}/*.png".format(qa_mosaic_dir))
if len(images_mosaic) != 0:
images_mosaic.sort()
# go through all beams
for image in images_mosaic:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(image))
# change to relative link when in trigger mode
if trigger_mode or single_node:
image = image.replace(
qa_dir, "../../../moasic/")
# check if link exists
if not os.path.exists(link_name):
os.symlink(image, link_name)
else:
os.unlink(link_name)
os.symlink(image, link_name)
else:
logger.warning("No images found for mosaic")
# link the validation tool
validation_tool_dir = glob.glob("{0:s}/*continuum_validation_pybdsf_snr5.0_int".format(
qa_mosaic_dir))
# check that the directory for the validation tool exists
if len(validation_tool_dir) == 1:
validation_tool_dir = validation_tool_dir[0]
if os.path.isdir(validation_tool_dir):
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(validation_tool_dir))
# change to relative link when in trigger mode
if trigger_mode or single_node:
validation_tool_dir = validation_tool_dir.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(validation_tool_dir, link_name)
else:
os.unlink(link_name)
os.symlink(validation_tool_dir, link_name)
else:
logger.warning("No validation tool output found for mosaic")
else:
logger.warning("No validation tool output found for mosaic")
def create_report_dir_apercal_log(qa_dir, qa_dir_report_obs_subpage, trigger_mode=False, single_node=False):
"""Function to create the apercal log directory for the report
Note:
All four apercal.log file will be linked to this directory, but
for better processing they will be renamed to .txt files.
This function already collects information from different
happilis.
Args:
qa_dir (str): Directory of the QA
qa_dir_report_obs_subpage (str): Directory of the subpage
trigger_mode (bool): Set for when automatically run after Apercal on a single node
"""
# check first on which happili we are:
host_name = socket.gethostname()
# this one does not take parallelisation into account
# only the code for running it on happili-01 does
if host_name != "happili-01" or trigger_mode or single_node:
logger.warning(
"Cannot account for parallalized log files unless running from happili-01 !!!")
# change to relative link when in trigger mode
apercal_log_file = qa_dir.replace("qa/", "apercal.log")
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(apercal_log_file))
if os.path.exists(apercal_log_file):
if trigger_mode or single_node:
apercal_log_file = apercal_log_file.replace(qa_dir.replace("qa/",""),"../../../../")
# rename the link to the log file according to host
if host_name == "happili-02":
link_name = link_name.replace(
".log", "_log_{0:s}.txt".format(host_name))
elif host_name == "happili-03":
link_name = link_name.replace(
".log", "_log_{0:s}.txt".format(host_name))
elif host_name == "happili-04":
link_name = link_name.replace(
".log", "_log_{0:s}.txt".format(host_name))
# check if link exists
if not os.path.exists(link_name):
os.symlink(apercal_log_file, link_name)
else:
os.unlink(link_name)
os.symlink(apercal_log_file, link_name)
else:
logger.warning("Could not find {0:s}".format(apercal_log_file))
else:
# apercal_log_file_list = [
# qa_dir.replace("qa/","apercal.log"), qa_dir.replace("qa/","apercal.log").replace("data", "data2"), qa_dir.replace("qa/","apercal.log").replace("data", "data3"), qa_dir.replace("qa/","apercal.log").replace("data", "data4")]
# get the data directories
data_dir_search_name = qa_dir.split("qa/")[0]
if "/data" in qa_dir:
data_dir_list = glob.glob(
data_dir_search_name) + glob.glob(
data_dir_search_name.replace("/data", "/data2")) + glob.glob(
data_dir_search_name.replace("/data", "/data3")) + glob.glob(
data_dir_search_name.replace("/data", "/data4"))
else:
data_dir_list = glob.glob(
data_dir_search_name) + glob.glob(
data_dir_search_name.replace("/tank", "/tank2")) + glob.glob(
data_dir_search_name.replace("/tank", "/tank3")) + glob.glob(
data_dir_search_name.replace("/tank", "/tank4"))
if len(data_dir_list) != 0:
data_dir_list.sort()
# go through the data directories
for dir_counter in range(len(data_dir_list)):
# get the logfile for this data directory
apercal_log_file_list = glob.glob(
"{0:s}apercal*.log".format(data_dir_list[dir_counter]))
if len(apercal_log_file_list):
apercal_log_file_list.sort()
# go through the log file list
for log_file in apercal_log_file_list:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(log_file))
link_name = link_name.replace(
".log", "_log_happili-{0:02d}.txt".format(dir_counter+1))
# check if link exists
if not os.path.exists(link_name):
os.symlink(log_file, link_name)
else:
os.unlink(link_name)
os.symlink(log_file, link_name)
else:
logger.warning("Could not find any log files in {0:s}".format(
data_dir_list[dir_counter]))
else:
logger.warning("Did not fine any data directories in {0:s}".format(
data_dir_search_name))
# link the timing measurement files
apercal_timeinfo_files = glob.glob(
"{0:s}apercal_performance/*.csv".format(qa_dir))
if len(apercal_timeinfo_files) != 0:
# go through list and link files
for time_file in apercal_timeinfo_files:
link_name = "{0:s}/{1:s}".format(
qa_dir_report_obs_subpage, os.path.basename(time_file))
# change to relative link when in trigger mode
if trigger_mode or single_node:
time_file = time_file.replace(
qa_dir, "../../../")
# check if link exists
if not os.path.exists(link_name):
os.symlink(time_file, link_name)
else:
os.unlink(link_name)
os.symlink(time_file, link_name)
else:
logger.warning(
"Did not fine time measurement files in {0:s}apercal_performance/".format(qa_dir))
def create_report_dirs(obs_id, qa_dir, subpages, css_file='', js_file='', trigger_mode=False, single_node=False, do_combine=False, obs_info=None, osa_files=None):
"""Function to create the directory structure of the report document
Files that are required will be linked to there.
The function can create the directory for the pages:
summary, inspection plots, preflag, crosscal, selfcal, continuum,
line, mosaic and apercal log.
The option to combine QA information from different happilis does not
do anything with inspection plots and preflag. The former is only available
from happili-01 in triggered mode and the latter is already distributed.
Args:
obs_id (str): ID of observation (scan/task_id)
qa_dir (str): Directory of QA
subpages (list(str)): List of pages to be created
css_file (str): Path to local css file (deprecated as w3css is now used)
js_file (str): Path to javascript file
trigger_mode (bool): In trigger mode the report is created only for the data on the given happili
do_combine (bool): Combine the information from different happilis.
obs_info (dict): Additional information about the observation (target name, fluxcal, and polcal)
"""
# first check that the subdirectory report exists
qa_dir_report = os.path.join(qa_dir,"report")
# copy the js and css files
if js_file != '':
try:
copy(js_file, "{0:s}/{1:s}".format(qa_dir_report,
os.path.basename(js_file)))
logger.info("Copied {}".format(js_file))
except Exception as e:
logger.warning("Copying {} failed".format(js_file))
logger.exception(e)
if css_file != '':
try:
copy(css_file,
"{0:s}/{1:s}".format(qa_dir_report, os.path.basename(css_file)))
logger.info("Copied {}".format(css_file))
except Exception as e:
logger.warning("Copying {} failed".format(css_file))
logger.exception(e)
# copy the OSA files
if osa_files is not None:
if not trigger_mode or single_node:
for osa_file in osa_files:
try:
copy(osa_file, "{0:s}/{1:s}".format(qa_dir_report,
os.path.basename(osa_file)))
logger.info("Copied {}".format(osa_files))
except Exception as e:
logger.warning("Copied {}".format(osa_files))
logger.exception(e)
# create sub-directory for observation
# not necessary, but useful if multiple reports are combined
qa_dir_report_obs = "{0:s}/{1:s}".format(qa_dir_report, obs_id)
if os.path.exists(qa_dir_report_obs):
logger.info("Directory 'report' already exists")
else:
logger.warning(
"Directory 'report/{0:s}' does not exists and will be created".format(obs_id))
os.mkdir(qa_dir_report_obs)
# go through the subpages and create the directories for them
# also check for content and link the files
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for page in subpages:
logger.info(
"## Creating report directory for {0} and linking files...".format(page))
qa_dir_report_obs_subpage = "{0:s}/{1:s}".format(
qa_dir_report_obs, page)
if os.path.exists(qa_dir_report_obs_subpage):
logger.info("Directory {0:s} already exists".format(qa_dir_report_obs_subpage))
else:
logger.info(
"Directory '{0:s} does not exists and will be created".format(qa_dir_report_obs_subpage))
os.mkdir(qa_dir_report_obs_subpage)
# Create links for files from Observation log
# +++++++++++++++++++++++++++++++++++++++++++++++
if page == "observing_log":
try:
create_report_dir_observing_log(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from summary
# +++++++++++++++++++++++++++++++++++
if page == "summary":
try:
create_report_dir_summary(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from beamweights
# +++++++++++++++++++++++++++++++++++++++
if page == "beamweights":
try:
create_report_dir_beamweights(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from inspection plot QA
# +++++++++++++++++++++++++++++++++++++++++++++++
if page == "inspection_plots":
try:
create_report_dir_inspection_plots(qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node, obs_info=obs_info)
except Exception as e:
logger.exception(e)
# Create links for files from preflag QA
# ++++++++++++++++++++++++++++++++++++++
if page == "preflag":
try:
create_report_dir_preflag(
obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from crosscal QA
# +++++++++++++++++++++++++++++++++++++++
elif page == "crosscal":
try:
create_report_dir_crosscal(
obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node, do_combine=do_combine)
except Exception as e:
logger.exception(e)
# Create links for files from selfcal QA
# +++++++++++++++++++++++++++++++++++++++
elif page == "selfcal":
try:
create_report_dir_selfcal(
obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node, do_combine=do_combine)
except Exception as e:
logger.exception(e)
# Create links for files from continuum QA
# +++++++++++++++++++++++++++++++++++++++
elif page == "continuum":
try:
create_report_dir_continuum(
obs_id, qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from line QA
# +++++++++++++++++++++++++++++++++++++++
elif page == "line":
try:
create_report_dir_line(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from mosaic QA
# +++++++++++++++++++++++++++++++++++++++
elif page == "mosaic":
try:
create_report_dir_mosaic(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
# Create links for files from aperca log
# ++++++++++++++++++++++++++++++++++++++
if page == "apercal_log":
try:
create_report_dir_apercal_log(
qa_dir, qa_dir_report_obs_subpage, trigger_mode=trigger_mode, single_node=single_node)
except Exception as e:
logger.exception(e)
logger.info(
"## Creating report directory for {0} and linking files... Done".format(page))
| 59,521 | 38.05643 | 236 | py |
dataqa | dataqa-master/report/html_report_content_preflag.py | import os
import sys
from astropy.table import Table
import logging
import glob
import time
import socket
import numpy as np
logger = logging.getLogger(__name__)
def write_obs_content_preflag(html_code, qa_report_obs_path, page_type, obs_info=None):
"""Function to create the html page for preflag
Args:
html_code (str): HTML code with header and title
qa_report_obs_path (str): Path to the report directory
page_type (str): The type of report page
obs_id (str): ID of Observation
Return:
html_code (str): Body of HTML code for this page
"""
logger.info("Writing html code for page {0:s}".format(page_type))
html_code += """
<div class="w3-container w3-large w3-margin-bottom">
<p>This page provides information on the performance of the preflag module. You can find the following information here:</p>
<div class="w3-container w3-large">
1. Table of the preflag parameters for each source. In the current version of preflag, the parameters should all be identical for the calibrators and target. So, it is usually sufficient to look at the output from the target.<br>
2. The preflag plots from the different beams compound into one plot per source and baseline.<br>
3. The preflag plots for each beam individually.<br>
</div>
</div>\n
"""
qa_report_obs_page_path = os.path.join(qa_report_obs_path, page_type)
# Create html code for summary table
# ==================================
if obs_info is not None:
obs_id = obs_info['Obs_ID'][0]
source_list = np.array(
[obs_info['Target'][0], obs_info['Flux_Calibrator'][0], obs_info['Pol_Calibrator'][0]])
else:
obs_id = os.path.basename(qa_report_obs_path)
source_list = None
# set the file name
preflag_summary_file = os.path.join(
qa_report_obs_page_path, "{0}_{1}_summary.csv".format(obs_id, page_type))
if os.path.exists(preflag_summary_file):
summary_table = Table.read(preflag_summary_file, format="ascii.csv")
# check if a source list already exists
if source_list is None:
source_list = np.unique(summary_table['source'])
else:
summary_table = None
# if there is a summary table
# create tables for each source
if summary_table is not None:
# get the keys for the table
table_keys = summary_table.keys()
# remove the source key as it is not necessary
table_keys.remove('source')
# button for table
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-2')">
Preflag summary table
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="gallery-2">\n"""
for source in source_list:
# get the rows for a given source
summary_table_src = summary_table[summary_table['source'] == source]
div_name = "gallery_preflag_{0}".format(source)
if len(summary_table_src) != 0:
beam_list = summary_table_src['beam']
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-dark-gray w3-hover-gray w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>
<div class="w3-container w3-margin-bottom w3-hide" name="{0}">\n""".format(div_name, source)
# beginning of table
html_code += """
<div class="w3-container w3-center">
<div class="w3-responsive">
<table class="w3-table-all">\n"""
# write the header
html_code += """
<tr class="w3-amber">\n"""
# fill header keys
for key in table_keys:
html_code += """<th>{}</th>\n""".format(
key.replace("preflag_", "").replace("targetbeams_", ""))
# close table header
html_code += """</tr>\n"""
# go through the list for each beam
for k in range(len(beam_list)):
# open row
html_code += """<tr>\n"""
# now go through keys and fill table
for key in table_keys:
element = summary_table_src[key][k]
# check whether it is masked
if np.ma.is_masked(element):
html_code += """<td>-</td>\n"""
else:
html_code += """<td>{0}</td>\n""".format(element)
# close row
html_code += """</tr>\n"""
# end of table
html_code += """
</table>
</div>
</div>\n"""
# closing the source button div
html_code += """</div>\n"""
else:
logger.warning(
"Could not find entries for source {}".format(source))
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
{1:s}
</button>
</div>\n""".format(div_name, source)
# closing the table button div
html_code += """</div>\n"""
else:
logger.warning("No summary table available")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-2')">
Preflag summary table
</button>
</div>\n"""
# Create html code for combined preflag plots
# ===========================================
# get images
image_list = glob.glob(
"{0:s}/{1:s}/*.png".format(qa_report_obs_path, page_type))
if len(image_list) != 0:
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('gallery-1')">
Combined plots
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="gallery-1">\n"""
img_counter = 0
for image in image_list:
if img_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}">
<img src="{0:s}/{1:s}" alt="No image" style="width:100%">
</a>
<!-- <div class="w3-container w3-center">
<h5>{1:s}</h5>
</div> --!>
</div>\n""".format(page_type, os.path.basename(image))
if img_counter % 3 == 2 or img_counter == len(image_list)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No combined preflag plots found")
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('gallery-1')">
Combined plots
</button>
</div>\n"""
# Create html code for individual beam plots
# ==========================================
# get beams
beam_list = glob.glob(
"{0:s}/{1:s}/[0-3][0-9]".format(qa_report_obs_path, page_type))
n_beams = len(beam_list)
# if there are beams go through them
if n_beams != 0:
beam_list.sort()
for k in range(n_beams):
# get the images
images_in_beam = glob.glob("{0:s}/*png".format(beam_list[k]))
div_name = "gallery{0:d}".format(k)
if len(images_in_beam) != 0:
images_in_beam.sort()
html_code += """
<div class="w3-container">
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>
</div>
<div class="w3-container w3-margin-top w3-hide" name="{0:s}">\n""".format(div_name, os.path.basename(beam_list[k]))
img_counter = 0
for image in images_in_beam:
if img_counter % 3 == 0:
html_code += """<div class="w3-row">\n"""
html_code += """
<div class="w3-third w3-border">
<a href="{0:s}/{1:s}/{2:s}">
<img src="{0:s}/{1:s}/{2:s}" alt="No image" style="width:100%">
</a>
<div class="w3-container w3-center">
<h5>{2:s}</h5>
</div>
</div>\n""".format(page_type, os.path.basename(beam_list[k]), os.path.basename(image))
if img_counter % 3 == 2 or img_counter == len(images_in_beam)-1:
html_code += """</div>\n"""
img_counter += 1
html_code += """</div>\n"""
else:
logger.warning("No images in beam {0:s} found".format(
beam_list[k]))
html_code += """
<button class="w3-btn w3-large w3-center w3-block w3-border-gray w3-amber w3-hover-yellow w3-margin-bottom w3-disabled" onclick="show_hide_plots('{0:s}')">
Beam {1:s}
</button>\n""".format(div_name, os.path.basename(beam_list[k]))
# html_code += """
# <div class="gallery" name="{0:s}">
# <p class="warning">
# No plots were found for {1:s}
# </p>
# </div>\n""".format(div_name, page_type)
else:
logger.warning("No beams found for preflag found")
html_code += """
<div class="w3-container w3-large w3-text-red">
<p>
No plots were found for preflag
</p>
</div>\n"""
return html_code
| 11,334 | 37.686007 | 245 | py |
dataqa | dataqa-master/preflag/preflag_plots.py | # Module to merge preflag plots
import os
import numpy as np
#import pymp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import socket
import glob
import logging
logger = logging.getLogger(__name__)
def combine_preflag_plots(qa_preflag_dir, trigger_mode=False):
"""
Function to combine the plots generated by preflag
Args:
qa_dir (str): Directory of the qa
trigger_mode (bool): For running this function as part of the automated dataQA
Returns
(int) 1 or -1 if it was successfull or if it aborted
"""
logging.info("Combining preflag plots")
# General set up
# ==============
# set the list of preflag directories to search
if socket.gethostname() != 'happili-01' or trigger_mode:
qa_preflag_dir_list = [qa_preflag_dir]
else:
qa_preflag_dir_list = [qa_preflag_dir, qa_preflag_dir.replace(
"data", "data2"), qa_preflag_dir.replace("data", "data3"), qa_preflag_dir.replace("data", "data4")]
# get a list of beam directories
qa_preflag_beam_dir_list = np.array([
glob.glob(os.path.join(preflag_dir, "[0-3][0-9]")) for preflag_dir in qa_preflag_dir_list])
# combine the beam directory arrays arrays
qa_preflag_beam_dir_list = np.concatenate(qa_preflag_beam_dir_list)
if len(qa_preflag_beam_dir_list) == 0:
logging.warning("No beam directories found. Abort")
return -1
else:
qa_preflag_beam_dir_list.sort()
# get a list of beams
beam_list = np.array([os.path.basename(beam_dir)
for beam_dir in qa_preflag_beam_dir_list])
# get a list of pngs:
qa_preflag_beam_png_list = np.array([glob.glob(os.path.join(
beam_dir, "*.png")) for beam_dir in qa_preflag_beam_dir_list])
# combine the list
qa_preflag_beam_png_list = np.concatenate(qa_preflag_beam_png_list)
if len(qa_preflag_beam_png_list) == 0:
logging.warning("No images found. Abort.")
return -1
else:
qa_preflag_beam_png_list.sort()
# get a list of unique png names
png_name_full_list = np.array([os.path.basename(png_name)
for png_name in qa_preflag_beam_png_list])
png_name_unique_list = np.unique(png_name_full_list)
#logging.info("Following plots were found {}".format(str(png_name_unique_list)))
# Going through the different pngs and combine them
# =================================================
for png_name in png_name_unique_list:
# get the source name
src_name = png_name.split("_")[0]
# name of new image
output_image_name = os.path.join(
qa_preflag_dir, png_name.replace(".png", "_combined.png"))
logging.info("Creating {}".format(output_image_name))
# get a list of indices which correspond to this png
png_indices = np.where(png_name_full_list == png_name)[0]
# number of pngs
n_png = np.size(png_indices)
# get a list of png files for the given type of png
png_path_list = qa_preflag_beam_png_list[png_indices]
# get a list of beams for this type of png
beam_png_list = np.array(
[os.path.basename(os.path.dirname(png_path)) for png_path in png_path_list])
# setting up the plot
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize, ysize))
plt.suptitle(
'{0}'.format(png_name.split(".png")[0]), size=30)
# go through the list of png
for k in range(n_png):
beam_nr = int(beam_png_list[k])
ax = plt.subplot(nx, ny, int(beam_png_list[k])+1)
plt_img = plt.imread(png_path_list[k])
ax.imshow(plt_img)
plt.title('{0}'.format(beam_nr))
ax.axis('tight')
ax.axis('off')
ax.set_aspect("equal")
#plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
# need to use negative space to move plots together
plt.subplots_adjust(wspace=-0.8, hspace=0.4)
plt.savefig(output_image_name, overwrite=True,
bbox_inches='tight', dpi=200)
plt.close("all")
logging.info("Combining preflag plots ... Done")
return 1
| 4,311 | 31.179104 | 111 | py |
dataqa | dataqa-master/preflag/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/mosaic/qa_mosaic.py | import numpy as np
import logging
import bdsf
import os
import time
import logging
import socket
from apercal.libs import lib
import sys
import glob
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from dataqa.continuum.validation_tool import validation
from dataqa.continuum.qa_continuum import qa_plot_pybdsf_images
logger = logging.getLogger(__name__)
# def qa_mosaic_plot_pybdsf_images(fits_file_list, plot_name_list, plot_format="png"):
# """This function creates quick plots of the diagnostic fits files
# Note:
# By default the images are created in png format with 400dpi, but it
# is possible to choose pdf
# Parameter:
# fits_file_list : list
# A list of strings with the file names of the fits files
# plot_name : list
# A list of strings with the names of the plots to save
# plot_format : str (default png)
# The format of the plot for matplotlib
# """
# # number of files
# n_fits_files = len(fits_file_list)
# print("Plotting PyBDSF diagnostic plots")
# # go through the types of images and plot them
# for k in range(n_fits_files):
# fits_hdulist = fits.open(fits_file_list[k])
# # get WCS header of cube
# wcs = WCS(fits_hdulist[0].header)
# if wcs.naxis == 4:
# wcs = wcs.dropaxis(3)
# wcs = wcs.dropaxis(2)
# img = fits_hdulist[0].data[0][0]
# elif wcs.naxis == 3:
# wcs = wcs.dropaxis(2)
# img = fits_hdulist[0].data[0]
# else:
# img = fits_hdulist[0].data
# # set up plot
# ax = plt.subplot(projection=wcs)
# # create image
# fig = ax.imshow(img * 1.e3, origin='lower')
# cbar = plt.colorbar(fig)
# cbar.set_label('Flux Density [mJy/beam]')
# ax.coords[0].set_axislabel('Right Ascension')
# ax.coords[1].set_axislabel('Declination')
# ax.coords[0].set_major_formatter('hh:mm')
# ax.set_title("{0:s}".format(os.path.basename(fits_file_list[k])))
# output = plot_name_list[k]
# if plot_format == "pdf":
# plt.savefig(output.replace(".png", ".pdf"),
# overwrite=True, bbox_inches='tight')
# else:
# plt.savefig(output, overwrite=True, bbox_inches='tight', dpi=400)
# plt.close("all")
# print("Plotting PyBDSF diagnostic plots. Done")
def qa_mosaic_run_validation(mosaic_name, qa_validation_dir, output_name='', overwrite=True):
"""This function runs pybdsf on a mosaic image.
It can also be used to run pybdsf on a single image.
Note:
The function assumes that the mosaic image is a fits file
Parameter:
mosaic_name : str
Name of the mosaic image fits
qa_validation_dir : str
The directory of the QA where the output will be saved.
Most likely this is /home/<user>/qa_science_demo_2019/mosaic/pybdsf/
output_name : str (default '')
Set the name of the output image
overwrite : bool (default True)
Set whether existing pybdsf files should be overwritten
Return:
run_mosaic_validation_status : int
Status of how well this function performed
"""
# # change the working directory to where the qa directory
# os.chdir(qa_validation_dir)
# # Create a link to the fits file so that the pybdsf log file is stored in the qa directory
# image_name = os.path.basename(mosaic_name)
# if not os.path.exists(image_name):
# os.symlink(mosaic_name, image_name)
image_name = mosaic_name
# try:
# os.symlink(mosaic_name, image_name)
# except Exception e:
# return -1
# # Check/create catalogue name
# if output_name == '':
# cat_file = "{0:s}/{1:s}".format(
# qa_validation_dir, os.path.basename(image_name).replace("fits", "_pybdsf_cat.fits"))
# else:
# cat_file = output_name
# assuming pybdsf will work
run_mosaic_validation_status = 1
# Run pybdsf on the input image
logging.info("#### Running pybdsf")
try:
# change into the directory where the QA products should be produced
# This is necessary for the current implementation of the validation tool
# Should it return to the initial directory?
os.chdir(qa_validation_dir)
# run validation tool and pybdsf combined
validation.run(image_name)
# img = bdsf.process_image(image_name, quiet=True)
# # img = bdsf.process_image(image_name, quiet=True, output_opts=True, plot_allgaus=True, plot_islands=True,
# # savefits_meanim=True, savefits_normim=True, savefits_rankim=True, savefits_residim=True, savefits_rmsim=True)
# # Write catalogue as csv file
# logging.info("#### Writing catalogue")
# img.write_catalog(outfile=cat_file, format='fits', clobber=True)
# # Save plots
# logging.info("#### Saving pybdsf plots")
# plot_type_list = ['rms', 'mean',
# 'gaus_model', 'gaus_resid', 'island_mask']
# fits_names = [cat_file.replace(
# ".fits", "_{0:s}.fits".format(plot)) for plot in plot_type_list]
# plot_names = [fits.replace(
# ".fits", ".png") for fits in fits_names]
# # plot_type_list = ['gaus_model', 'gaus_resid', 'island_mask']
# # number of plots
# n_plots = len(plot_type_list)
# for k in range(n_plots):
# img.export_image(outfile=fits_names[k],
# clobber=overwrite, img_type=plot_type_list[k])
except Exception as e:
logger.error(e)
logger.error(
"PyBDSF and validation tool failed on image {0:s}".format(image_name))
run_mosaic_validation_status = -1
plot_type_list = ['gaus_model', 'gaus_resid', 'rms', 'mean', 'island_mask']
fits_names = ["{0:s}/{1:s}".format(qa_validation_dir, os.path.basename(image_name)).replace(
".fits", "_pybdsf_{0:s}.fits".format(plot)) for plot in plot_type_list]
plot_names = [fits.replace(
".fits", ".png") for fits in fits_names]
# add the continuum image
fits_names.append(image_name)
plot_names.append("{0:s}/{1:s}".format(qa_validation_dir, os.path.basename(
image_name)).replace(".fits", ".png"))
plot_type_list.append("cont")
# create images without a lot of adjusting
try:
qa_plot_pybdsf_images(fits_names, plot_names, plot_type_list)
except Exception as e:
logger.error(e)
logger.error("Plotting PyBDSF diagnostic images failed")
return run_mosaic_validation_status
| 6,828 | 32.806931 | 146 | py |
dataqa | dataqa-master/mosaic/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/continuum/test_plot.py | """
Script to test plotting images
"""
import numpy as np
import os
import argparse
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.colors as mc
from astropy.wcs import WCS
#import qa_continuum
import time
def main():
start_time = time.time()
# Create and parse argument list
# ++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(
description='Create overview for QA')
# Input path to fits file
parser.add_argument("fits_file", type=str,
help='Input path to fits file')
parser.add_argument("--output_file", type=str, default='',
help='Output path to fits file. Default current working directory')
parser.add_argument("--vmin", type=float, default=0.05,
help='Min for logscale')
parser.add_argument("--vmax", type=float, default=1000.,
help='Min for logscale')
parser.add_argument("--symlog", action="store_true", default=False,
help='Enable sym log')
args = parser.parse_args()
fits_file = args.fits_file
print("Reading {}".format(fits_file))
if args.output_file == '':
output_file = os.path.basename(fits_file).replace(
".fits", "_{0:.2f}_{1:.0f}.png".format(args.vmin, args.vmax))
else:
output_file = args.output_file
output_file = output_file.replace(
".png", "_{0:.2f}_{1:.0f}.png".format(args.vmin, args.vmax))
# get hdus
fits_hdulist = fits.open(fits_file)
# get WCS header of cube
wcs = WCS(fits_hdulist[0].header)
# remove unnecessary axis
if wcs.naxis == 4:
wcs = wcs.dropaxis(3)
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0][0]
elif wcs.naxis == 3:
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0]
else:
img = fits_hdulist[0].data
print("Plotting image")
# set up plot
ax = plt.subplot(projection=wcs)
if args.symlog:
output_file = output_file.replace(".png", "_symlog.png")
fig = ax.imshow(
img * 1.e3, norm=mc.SymLogNorm(1.e-3, vmin=args.vmin, vmax=args.vmax, clip=False), origin='lower', interpolation="none")
else:
output_file = output_file.replace(".png", "_log.png")
img[np.where(img < 1.e-9)] = 1.e-9
fig = ax.imshow(
img * 1.e3, norm=mc.LogNorm(vmin=args.vmin, vmax=args.vmax, clip=False), origin='lower', interpolation="none")
cbar = plt.colorbar(fig)
cbar.set_label('Flux Density [mJy/beam]')
# legend
ax.coords[0].set_axislabel('Right Ascension')
ax.coords[1].set_axislabel('Declination')
ax.coords[0].set_major_formatter('hh:mm')
ax.set_title("{0:s}".format(output_file.replace(".png", "")))
print("Saving image as {}".format(output_file))
plt.savefig(output_file, overwrite=True, bbox_inches='tight', dpi=300)
if __name__ == "__main__":
main()
| 2,984 | 28.554455 | 133 | py |
dataqa | dataqa-master/continuum/qa_continuum.py | """
This script contains function to run pybdsf for the continuum QA.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as mc
from astropy.wcs import WCS
import numpy as np
import logging
import bdsf
import os
import time
import socket
from apercal.libs import lib
import sys
import glob
from dataqa.continuum.validation_tool import validation
import scipy
from astropy.io import fits
from astropy.table import Table
import pandas as pd
logger = logging.getLogger(__name__)
def get_image_from_fits(fits_file):
"""Function to get the image from a fits file
Parameter:
fits_file : str
File name of the image fits file
Return:
img : array
The image from the fits file of an array
"""
fits_file = fits.open(fits_file)
# check the number image axis
wcs = WCS(fits_hdulist[0].header)
if wcs.naxis == 4:
img = fits_hdulist[0].data[0][0]
elif wcs.naxis == 3:
img = fits_hdulist[0].data[0]
else:
img = fits_hdulist[0].data
# close the fits file
fits_file.close()
return img
def qa_check_image_gaussianity(fits_file, alpha=1.e-2):
"""Check if an image has gaussian distribution
Note:
Function was taken from apercal.subs.qa.checkimagegaussianity
Parameter:
fits_file : str
The name of the fits image to process
alpha : float (default 1e-2)
Parameter to judge the gaussianity, default taken from apercal conifg
Returns:
True if image is ok, False otherwise
"""
img = get_image_from_fits(fits_file)
# determin gaussianity
k2, p = scipy.stats.normaltest(img, nan_policy='omit', axis=None)
if p < alpha:
return True
else:
return False
def qa_get_image_dr(fits_file, rms):
"""Function to determine the image dynamic range
Note:
fits_file : str
The name of the fits image to process
rms : float
The noise level of the image
Returns:
image_dr : float
The image dynamic range
"""
img = get_image_from_fits(fits_file)
image_dr = np.max(img) / rms
return image_dr
def qa_get_source_cat_dr(fits_file, rms, qa_validation_dir):
"""Function to determine the dynamic range in the source catalog
NOT FINISHED
Note:
fits_file : str
The name of the fits image to process
rms : float
The noise level of the image
qa_validation_dir : str
The path to the directory where the QA (and pybdsft output) is stored
Returns:
source_cat_dr : float
The dynamic range in the source catalogue
"""
# get name of source catalogue
source_cat_name = "{0:s}/{1:s}".format(qa_validation_dir, os.path.basename(
fits_file).replace(".fits", "_pybdsf_cat.csv"))
# read pybdsf catalog
cat_data = Table.read(source_cat_name, format=source_cat_name.split(
".")[-1], header_start=4, data_start=5)
# search for dynamic range in catalogue
return 0
def qa_get_image_noise_dr_gaussianity(fits_file, qa_validation_dir):
"""This functions determines additional image QA informaiton.
NOT FINISHED
Note:
The function determines the image noise, the local dynamic range and gaussianity of an image
Parameter:
fits_file : str
The file name of the fits image to process
qa_validation_dir : str
The directory of the continuum or mosaic QA where the output should be saved to
"""
logger.info("# Performing additional QA tests...")
# Checking noise noise
# +++++++++++++++++++
# get the residual continuum image
rms = 1
# Checking image dynamic range
# ++++++++++++++++++++++++++++
logger.info("Determining image dynamic range ...")
image_dyanmic_range = qa_get_image_dr(fits_file, rms)
logger.info("Image dynamic range is: {0:.3g}".format(image_dyanmic_range))
# Checking source dynamic range
# +++++++++++++++++++++++++++++
logger.info("Determining source catalogue dynamic range ...")
source_cat_dyanmic_range = qa_get_source_cat_dr(
fits_file, rms, qa_validation_dir)
logger.info("Source catalogue dynamic range is: {0:.3g}".format(
source_cat_dyanmic_range))
# Checking local dynamic range
# ++++++++++++++++++++++++++++
# Checkking gaussianity
# +++++++++++++++++++++
logger.info("Testing Gaussianity ...")
gaussianity_confirm = qa_check_image_gaussianity(fits_file)
logger.info("Image fullfills gaussianity: {0}".format(gaussianity_confirm))
# Write output file as xml
# ++++++++++++++++++++++++
output_file_name = "{0:s}/{1:s}".format(
qa_validation_dir, os.path.basename(fits_file).replace(".fits", "QA_info.xml"))
def qa_plot_pybdsf_images(fits_file_list, plot_name_list, plot_type_list, plot_format="png"):
"""This function creates quick plots of the diagnostic fits files
Note:
By default the images are created in png format with 400dpi, but it
is possible to choose pdf
Parameter:
fits_file_list : list
A list of strings with the file names of the fits files
plot_name_list : list
A list of strings with the names of the plots to save
plot_type_list : list
List of the types of plots which controls some plot settings.
plot_format : str (default png)
The format of the plot for matplotlib
"""
# number of files
n_fits_files = len(fits_file_list)
logger.info("Plotting PyBDSF diagnostic plots")
# go through the types of images and plot them
for k in range(n_fits_files):
logger.info("Plotting {0:s}".format(fits_file_list[k]))
fits_hdulist = fits.open(fits_file_list[k])
# get WCS header of cube
wcs = WCS(fits_hdulist[0].header)
# remove unnecessary axis
if wcs.naxis == 4:
wcs = wcs.dropaxis(3)
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0][0]
elif wcs.naxis == 3:
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0]
else:
img = fits_hdulist[0].data
# set up plot
ax = plt.subplot(projection=wcs)
# create image
if plot_type_list[k] == 'cont' or plot_type_list[k] == 'gaus_resid':
# using log norm here set to 0.02mJy/beam
fig = ax.imshow(
img * 1.e3, norm=mc.SymLogNorm(1.e-9, vmin=0.02, vmax=1), origin='lower', cmap="hot")
# fig = ax.imshow(
# img * 1.e3, norm=mc.SymLogNorm(1.e-9, vmin=0.02, vmax=1), origin='lower', cmap="hot")
elif plot_type_list[k] == 'gaus_model':
# using log norm here set to 0.02mJy/beam
fig = ax.imshow(
img * 1.e3, norm=mc.LogNorm(0.02), origin='lower', cmap="hot")
elif plot_type_list[k] == 'island_mask':
fig = ax.imshow(img, origin='lower')
else:
fig = ax.imshow(img * 1.e3, origin='lower')
cbar = plt.colorbar(fig)
if not plot_type_list[k] == 'island_mask':
cbar.set_label('Flux Density [mJy/beam]')
ax.coords[0].set_axislabel('Right Ascension')
ax.coords[1].set_axislabel('Declination')
ax.coords[0].set_major_formatter('hh:mm')
ax.set_title("{0:s}".format(os.path.basename(fits_file_list[k])))
output = plot_name_list[k]
if plot_format == "pdf":
plt.savefig(output.replace(".png", ".pdf"),
overwrite=True, bbox_inches='tight')
else:
plt.savefig(output, overwrite=True, bbox_inches='tight', dpi=300)
plt.close("all")
logger.info("Plotting PyBDSF diagnostic plots. Done")
def get_continuum_fits_images(data_basedir_list, qa_validation_dir, save_table=True):
"""Function that gets the list of existing continuum fits images for each beam.
Note:
The function goes through the four data directories and collects all fits image
it finds for the beams. It accounts for missing fits images.
It always looks for 40 beams no matter on which happili node.
Therefore, the output table contains the two columns "beam_exists"
and "fits_image_exists". The first one will be True if a beam is
available on a particular node and the last one if a continuum image
exists. "fits_image_exists" can be False if "beam_exists" is True if
no continuum image was found for an existing beam.
Parameter:
data_basedir_list : list
The data directories on happili
qa_validation_dir : str
Directory where the QA is stored
save_table : bool (default True)
Save the list of fits files to the QA directory
Return:
fits_file_table : Astropy Table
The beams and path to the continuum image
"""
# this is how many beams there should be
n_beams_total = 40
# create table with columns of a beam id, the actual beam name, and the fits image path
beam_id = np.arange(n_beams_total)
beam_names = np.array(['{0:02d}'.format(beam)
for beam in range(n_beams_total)])
beam_exists = np.array([False for beam in range(n_beams_total)])
fits_image_exists = np.array([False for beam in range(n_beams_total)])
fits_file_path = np.array([str for beam in range(n_beams_total)])
# create astropy table
fits_file_table = Table([beam_id, beam_names, fits_file_path, beam_exists, fits_image_exists], names=(
'beam_id', 'beam_name', 'fits_image_path', 'beam_exists', 'fits_image_exists'))
# count how many beams and fits files were found
n_beams_found_total = 0
n_fits_images_found = 0
logger.info("Getting a list of continuum fits images")
# Go through all the different data directories
for data_basedir in data_basedir_list:
# get the beams in this directory
beam_data_dir_list = glob.glob("{0:s}/[0-3][0-9]".format(data_basedir))
beam_data_dir_list.sort()
# number of beams
n_beams = len(beam_data_dir_list)
# total number of beams found
n_beams_found_total += n_beams
# check that beams exists
if n_beams == 0:
logger.error(
"No beams found in {0:s}. Go to next data directory".format(data_basedir))
continue
else:
logger.info("Found {0:d} beams in {1:s}".format(
n_beams, data_basedir))
# Now go through each beam
for beam_dir in beam_data_dir_list:
# get the beam from the directory
beam = os.path.basename(beam_dir)
# get the index for the table where path should be stored
table_beam_index = np.where(
fits_file_table['beam_name'] == beam)[0]
fits_file_table['beam_exists'][table_beam_index] = True
# directory of continuum images
continuum_image_dir = "{0:s}/continuum".format(beam_dir)
# Get the fits image
fits_image = glob.glob(
"{0:s}/image_mf_*.fits".format(continuum_image_dir))
# check whether no fits file was found, one or more fits file
# the latter case should not exists, but I do not want it to stop
if len(fits_image) == 0:
fits_file_table['fits_image_path'][table_beam_index] = ''
logger.warning(
"Did not find any fits image for beam {0:s}".format(beam))
continue
elif len(fits_image) == 1:
fits_image = fits_image[0]
n_fits_images_found += 1
else:
fits_image.sort()
logger.warning(
"Found more than one fits image for beam {0:s}. Take the last one".format(beam))
fits_image = fits_image[-1]
n_fits_images_found += 1
# there should always be a match, but just in case
try:
fits_file_table['fits_image_path'][table_beam_index] = fits_image
fits_file_table['fits_image_exists'][table_beam_index] = True
except Exception as e:
logger.error(e)
logger.error(
"Could not match beam {0:s} to table of fits images".format(beam))
# set all image paths to "-" for which no image exists
fits_file_table['fits_image_path'][np.where(
fits_file_table['fits_image_exists'] == False)] = ""
# Check how many beams failed
if n_beams_found_total < n_beams_total:
logger.info("Found {0:d} out of {1:d} beams".format(
n_beams_found_total, n_beams_total))
else:
logger.info("Found all {0:d} beams".format(n_beams_found_total))
# check how many fits files were found
if n_fits_images_found < n_beams_found_total:
logger.info(
"Found {0:d} fits images out of {1:d} available beams".format(n_fits_images_found, n_beams_found_total))
else:
logger.info("Found a fits file for each of the {0:d}".format(
n_beams_found_total))
# save the file
if save_table:
save_name = "{0:s}/fits_image_list.csv".format(qa_validation_dir)
logger.info("Saving table with fits images to {0:s}".format(save_name))
fits_file_table.write(save_name, format="csv")
# return the table
return fits_file_table
def print_summary(sdict):
beams = ['{:02d}'.format(i) for i in range(40)]
df = pd.DataFrame(columns=['desc'] + beams)
df['desc'] = ['RMS', 'IDR', 'LDR', 'BMAJ', 'BMIN', 'BPA']
for beam in beams:
if not beam in sdict.keys():
df[beam] = [0, 0, 0, 0, 0, 0]
else:
df[beam] = sdict[beam]
# print(df.to_csv(sys.stdout, index=False))
df = df.T
df.reset_index(level=0, inplace=True)
df.columns=df.iloc[0]
df = df.drop(index=0)
df.columns = ['beam', u'RMS', u'IDR', u'LDR', u'BMAJ', u'BMIN', u'BPA']
df.insert(loc=1, column='Success', value=True)
df['Success'][df.RMS==0] = False
df['BMAJ'] = df['BMAJ'].map('{:.1f}'.format)
df['BMIN'] = df['BMIN'].map('{:.1f}'.format)
df['BPA'] = df['BPA'].map('{:.2f}'.format)
df.to_csv('../../continuum_image_properties.csv', index=False)
def qa_continuum_run_validation(data_basedir_list, qa_validation_dir, overwrite=True):
"""This function runs pybdsf on the continuum image of each beam
This function will create a new directory for each beam. In this sub-directory
the fits image and the pybdsf output will be saved.
In the end the function will provide information on how many directories, beams,
or pybdsf runs failed.
Note:
The function will always overwrite existing files.
Parameter:
data_basedir_list : list
List of data directories on the happili node
qa_validation_dir : str
The directory of the QA where the output will be saved.
Most likely this is /home/<user>/qa_science_demo_2019/continuum/
Return:
run_pybdsf_status : int
Status of how well this function performed
"""
logger.info("#### Running validation for each beam")
# get the available fits images for the available beams
fits_file_table = get_continuum_fits_images(
data_basedir_list, qa_validation_dir)
# # Get only the rwos of the table for which beams exists
# fits_file_table = fits_file_table[np.where(fits_file_table['beam_exists']==True)]
summary = dict()
for beam_index in fits_file_table['beam_id']:
# if a beam does not exists go directly to the next one
if fits_file_table['beam_exists'][beam_index]:
logger.info("Found beam {0:s}".format(
fits_file_table['beam_name'][beam_index]))
else:
logger.info("No beam {0:s}".format(
fits_file_table['beam_name'][beam_index]))
continue
# create a subdirectory for the beam in the qa directory
qa_validation_beam_dir = "{0:s}/{1:s}".format(
qa_validation_dir, fits_file_table['beam_name'][beam_index])
if not os.path.exists(qa_validation_beam_dir):
logger.info("Creating {0:s}".format(qa_validation_beam_dir))
os.mkdir(qa_validation_beam_dir)
# get the path to the fits image
fits_image = fits_file_table['fits_image_path'][beam_index]
if fits_image == '':
logger.warning("No fits image for beam {0:s}".format(
fits_file_table['beam_name'][beam_index]))
else:
# run pybdsf
logger.info("## Running validation tool and pybdsf")
try:
# change into the directory where the QA products should be produced
# This is necessary for the current implementation of the validation tool
# Should it return to the initial directory?
os.chdir(qa_validation_beam_dir)
# run validation tool and pybdsf combined
img, cat, rep = validation.run(fits_image)
img_rms = int(cat.img_rms)
idr = int(cat.dynamic_range)
ldr_min, _ = cat.local_dynrange
ldr_min = int(ldr_min)
bmaj = img.bmaj
bmin = img.bmin
bpa = img.bpa
summary.update({'{:02d}'.format(beam_index)
: [img_rms, idr, ldr_min, bmaj, bmin, bpa]})
logger.info("## Running validation tool. Done")
except Exception as e:
logger.error(e)
logger.error("## Running validation tool failed.")
img_rms = 0
idr = 0
ldr_min, _ = 0, 0
bmaj = bmin = bpa = 0
summary.update({'{:02d}'.format(beam_index)
: [img_rms, idr, ldr_min, bmaj, bmin, bpa]})
plot_type_list = ['gaus_model', 'gaus_resid',
'rms', 'mean', 'island_mask']
fits_names = ["{0:s}/{1:s}".format(qa_validation_beam_dir, os.path.basename(fits_image)).replace(
".fits", "_pybdsf_{0:s}.fits".format(plot)) for plot in plot_type_list]
plot_names = [fits.replace(
".fits", ".png") for fits in fits_names]
# add the continuum image
fits_names.append(fits_image)
plot_names.append("{0:s}/{1:s}".format(qa_validation_beam_dir, os.path.basename(
fits_image)).replace(".fits", ".png"))
plot_type_list.append("cont")
# create images without a lot of adjusting
try:
qa_plot_pybdsf_images(fits_names, plot_names, plot_type_list)
except Exception as e:
logger.error(e)
logger.error("## Plotting PyBDSF diagnostic images failed")
print_summary(summary)
| 19,282 | 33.068905 | 116 | py |
dataqa | dataqa-master/continuum/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/continuum/continuum_tables.py | # This module contains functionality to merge the image properties tables
import os
import glob
from astropy.table import Table, vstack
import numpy as np
import logging
logger = logging.getLogger(__name__)
def merge_continuum_image_properties_table(obs_id, qa_dir, single_node=False):
"""
This function combines the image properties tables from the different
"""
# the original tables
cont_table_file_1 = os.path.join(
qa_dir, "continuum/continuum_image_properties.csv")
if "/data" in qa_dir:
cont_table_file_2 = os.path.join(
qa_dir.replace("/data/", "/data2/"), "continuum/continuum_image_properties.csv")
cont_table_file_3 = os.path.join(
qa_dir.replace("/data/", "/data3/"), "continuum/continuum_image_properties.csv")
cont_table_file_4 = os.path.join(
qa_dir.replace("/data/", "/data4/"), "continuum/continuum_image_properties.csv")
else:
cont_table_file_2 = os.path.join(
qa_dir.replace("/tank/", "/tank2/"), "continuum/continuum_image_properties.csv")
cont_table_file_3 = os.path.join(
qa_dir.replace("/tank/", "/tank3/"), "continuum/continuum_image_properties.csv")
cont_table_file_4 = os.path.join(
qa_dir.replace("/tank/", "/tank4/"), "continuum/continuum_image_properties.csv")
# read the content and get only the relevant beams
combined_table = []
# check that table exists, then get content
if os.path.exists(cont_table_file_1):
cont_data_1 = Table.read(cont_table_file_1, format="ascii.csv")
# if everything is on one node, the next line is not necessary
if not single_node:
cont_data_1_beams = cont_data_1[np.where(
(cont_data_1['beam'] >= 0) & (cont_data_1['beam'] <= 9))]
combined_table.append(cont_data_1_beams)
else:
combined_table.append(cont_data_1)
else:
logger.warning("Could not find {}".format(cont_table_file_1))
# check that table exists, then get content
if os.path.exists(cont_table_file_2):
cont_data_2 = Table.read(cont_table_file_2, format="ascii.csv")
cont_data_2_beams = cont_data_2[np.where(
(cont_data_2['beam'] >= 10) & (cont_data_2['beam'] <= 19))]
combined_table.append(cont_data_2_beams)
else:
logger.warning("Could not find {}".format(cont_table_file_2))
# check that table exists, then get content
if os.path.exists(cont_table_file_3):
cont_data_3 = Table.read(cont_table_file_3, format="ascii.csv")
cont_data_3_beams = cont_data_3[np.where(
(cont_data_3['beam'] >= 20) & (cont_data_3['beam'] <= 29))]
combined_table.append(cont_data_3_beams)
else:
logger.warning("Could not find {}".format(cont_table_file_3))
# check that table exists, then get content
if os.path.exists(cont_table_file_4):
cont_data_4 = Table.read(cont_table_file_4, format="ascii.csv")
cont_data_4_beams = cont_data_4[np.where(
(cont_data_4['beam'] >= 30) & (cont_data_4['beam'] <= 39))]
combined_table.append(cont_data_4_beams)
else:
logger.warning("Could not find {}".format(cont_table_file_4))
# check the length of the new table to make sure it is not empty
if len(combined_table) != 0:
new_table = vstack(combined_table)
new_table_name = os.path.join(
qa_dir, "continuum/{}_combined_continuum_image_properties.csv".format(obs_id))
new_table.write(new_table_name, format="ascii.csv", overwrite=True)
else:
logger.warning("No tables with continuum image properties found")
| 3,695 | 41.482759 | 92 | py |
dataqa | dataqa-master/continuum/validation_tool/validation.py | #!/usr/bin/env python2
from __future__ import division
import os
# from datetime import datetime
from inspect import currentframe, getframeinfo
#Set my own obvious warning output
cf = currentframe()
WARN = '\n\033[91mWARNING: \033[0m' + getframeinfo(cf).filename
from functions import find_file, config2dic, changeDir
from radio_image import radio_image
from catalogue import catalogue
from report import report
def run(fits_image, finder='pybdsf', snr=5.0, verbose=True, refind=False, redo=False,
config_files=['FIRST_config.txt', 'NVSS_config.txt', 'TGSS_config.txt'],
use_peak=False, ncores=8, nbins=50, filter_config=None, write_all=True,
aegean_params='--floodclip=3', pybdsf_params=dict()):
#find directory that contains all the necessary files
main_dir, _ = os.path.split(os.path.realpath(__file__))
#Set paramaters passed in by user
img = os.path.abspath(fits_image)
suffix = '{0}_snr{1}_'.format(finder, snr)
if use_peak:
suffix += 'peak'
else:
suffix += 'int'
changeDir(img, suffix, verbose=verbose)
#Load image
IMG = radio_image(img, verbose=verbose, finder=finder, SNR=snr)
#Run Aegean if user didn't pass in Selavy catalogue
if finder == 'aegean':
IMG.run_BANE(ncores=ncores,redo=refind)
IMG.run_Aegean(ncores=ncores, redo=refind, params=aegean_params, write=write_all)
elif finder == 'pybdsf':
IMG.run_PyBDSF(ncores=ncores, redo=refind, pybdsf_params=pybdsf_params, write=write_all)
main_cat = IMG.cat_comp
#Create catalogue object
CAT = catalogue(main_cat, 'APERTIF', finder=finder, image=IMG, SNR=snr,
verbose=verbose, autoload=False, use_peak=use_peak)
#Filter out sources below input SNR, set specs and create report object before filtering
#catalogue further so specs and source counts can be written for all sources above input SNR
CAT.filter_sources(SNR=snr, flags=True, redo=redo, write=write_all,
verbose=verbose, file_suffix='_snr{0}'.format(snr))
CAT.set_specs(IMG)
REPORT = report(CAT, main_dir, img=IMG, verbose=verbose, plot_to='html', redo=redo,
src_cnt_bins=nbins, write=write_all)
# use config file for filtering sources if it exists
if filter_config is not None:
if verbose:
print "Using config file '{0}' for filtering.".format(filter_config)
filter_dic = config2dic(filter_config,main_dir,verbose=verbose)
filter_dic.update({'redo' : redo, 'write' : write_all, 'verbose' : verbose})
CAT.filter_sources(**filter_dic)
else:
# otherwise use default criteria, selecting reliable point sources for comparison
CAT.filter_sources(flux_lim=1e-3, ratio_frac=1.4, ratio_sigma=0,
reject_blends=True,
flags=False, psf_tol=1.5, resid_tol=3,
redo=redo, write=write_all, verbose=verbose)
#process each catalogue object according to list of input catalogue config files
#this will cut out a box, cross-match to this instance, and derive the spectral indices.
for config_file in config_files:
if verbose:
print "Using config file '{0}' for catalogue.".format(config_file)
config_file = config_file.strip() #in case user put a space
config_file = find_file(config_file,main_dir, verbose=verbose)
CAT.process_config_file(config_file, main_dir, redo=redo,
verbose=verbose,
write_all=write_all, write_any=write_all)
# Fit radio SED models using all fluxes except and derive the flux at frequency
if len(CAT.cat_list) > 1:
CAT.fit_spectra(redo=redo, models=None, GLEAM_subbands='int',
GLEAM_nchans=4, cat_name=None, write=write_all)
print "----------------------------"
print "| Running validation tests |"
print "----------------------------"
#Produce validation report for each cross-matched catalogue
for cat_name in CAT.cat_list[1:]:
REPORT.validate(CAT.name, cat_name, redo=redo)
#write file with RA/DEC offsets for pipeline
#and append validation metrics to html file and then close it
REPORT.write_pipeline_offset_params()
REPORT.write_html_end()
return IMG, CAT, REPORT
if __name__ == "__main__":
print("Hello!") | 4,420 | 41.104762 | 96 | py |
dataqa | dataqa-master/continuum/validation_tool/functions.py | from __future__ import division
import os
import numpy as np
import scipy.optimize as opt
import scipy.special as special
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from matplotlib import ticker
def changeDir(filepath, suffix, verbose=False):
"""Derive a directory name from an input file to store all output files, create it, and then change to it.
Arguments:
----------
filepath : string
A path to a fits image or catalogue.
suffix : string
A suffix to append to the end of the created directory.
Keyword arguments:
------------------
verbose : bool
Verbose output."""
#derive directrory name for output files
filename = filepath.split('/')[-1]
basename = remove_extn(filename)
dir = '{0}_continuum_validation_{1}'.format(basename,suffix)
#create it if it doesn't exist
if not os.path.exists(dir):
if verbose:
print "Making directory for output files - {0}.".format(dir)
os.mkdir(dir)
#move to that directory and update the filepath
if verbose:
print "Changing to directory for output files - '{0}'.".format(dir)
os.chdir(dir)
###The following are radio SED models as a function of frequency and several fitted parmaters###
def powlaw(freq,S_norm,alpha):
return S_norm*freq**alpha
def curve(freq, S_max, nu_m, alpha_thick, alpha_thin):
return S_max/(1 -np.exp(-1))*((freq/nu_m)**alpha_thick)*(1 - np.exp(-(freq/nu_m)**(alpha_thin-alpha_thick)))
def pow_CIbreak(freq, S_norm, alpha, nu_br):
return S_norm*(freq/nu_br)**(alpha+0.5 + 0.5*(1 + (freq/nu_br)**4)**-1)
def pow_CIbreak2(freq, S_norm, alpha, nu_br):
alpha,freq = CIbreak(freq,alpha,nu_br)
return S_norm*freq**alpha
def pow_JPbreak(freq, S_norm, alpha, nu_br):
return S_norm*(freq**alpha)*JPbreak(freq,nu_br)
def SSA(freq,S_norm,beta,nu_m):
return S_norm*((freq/nu_m)**(-(beta-1)/2))*(1-np.exp(-(freq/nu_m)**(-(beta+4)/2)))/((freq/nu_m)**(-(beta+4)/2))
def SSA_JPbreak(freq,S_norm,beta,nu_m,nu_br):
return SSA(freq,S_norm,beta,nu_m)*JPbreak(freq,nu_br)
def SSA_CIbreak(freq,S_norm,beta,nu_m,nu_br):
dummyalpha,dummyfreq = CIbreak(freq,beta,nu_br)
return SSA(freq,S_norm,beta,nu_m)*dummyfreq**dummyalpha
def FFA(freq,S_norm,alpha,nu_m):
return S_norm*(freq**(alpha))*np.exp(-(freq/nu_m)**(-2.1))
def Bic98_FFA(freq,S_norm,alpha,p,nu_m):
return S_norm*(p+1)*((freq/nu_m)**(2.1*(p+1)+alpha))*special.gammainc((p+1),((freq/nu_m)**(-2.1)))*special.gamma(p+1)
def Bic98_FFA_CIbreak(freq,S_norm,alpha,p,nu_m,nu_br):
dummyalpha,dummyfreq = CIbreak(freq,alpha,nu_br)
return Bic98_FFA(freq,S_norm,alpha,p,nu_m)*dummyfreq**dummyalpha
def Bic98_FFA_JPbreak(freq,S_norm,alpha,p,nu_m,nu_br):
return Bic98_FFA(freq,S_norm,alpha,p,nu_m)*JPbreak(freq,nu_br)
def CIbreak(freq,alpha,nu_br):
alpha = np.where(freq <= nu_br, alpha, alpha-0.5)
dummyfreq = freq / nu_br
return alpha,dummyfreq
def JPbreak(freq,nu_br):
return np.exp(-freq/nu_br)
def flux_at_freq(freq,known_freq,known_flux,alpha):
"""Get the flux of a source at a given frequency, according to a given power law.
Arguments:
----------
freq : float
The frequency at which to measure the flux.
known_freq : float
A frequency at which the flux is known.
known_flux : float
The flux at the known frequency.
alpha : float
The spectral index.
Returns:
--------
flux : float
The flux at the given frequency."""
return 10**(alpha*(np.log10(freq) - np.log10(known_freq)) + np.log10(known_flux))
def ticks_format_flux(value, index):
"""Return flux density ticks in mJy"""
value = value*1e3
return ticks_format(value, index)
def ticks_format_freq(value, index):
"""Return frequency ticks in GHz"""
value = value/1e3
return ticks_format(value, index)
def ticks_format(value, index):
"""Return matplotlib ticks in LaTeX format, getting the value as integer [0,99], a 1 digit float [0.1, 0.9], or otherwise n*10^m.
Arguments:
----------
value : float
The value of the tick.
index : float
The index of the tick.
Returns:
--------
tick : string
The tick at that value, in LaTeX format."""
#get the exponent and base
exp = np.floor(np.log10(value))
base = value/10**exp
#format according to values
if exp >= 0 and exp <= 3:
return '${0:d}$'.format(int(value))
elif exp <= -1:
return '${0:.2f}$'.format(value)
else:
return '${0:d}\\times10^{{{1:d}}}$'.format(int(base), int(exp))
def sig_figs(value, n=2):
"""Return a string of the input value with n significant figures.
Arguments:
----------
value : float
The input value
Keyword arguments:
------------------
n : int
The number of significant figures.
Returns:
--------
value : string
The value to n significant figures."""
return ("{0:.%d}" % (n)).format(value)
def plot_spectra(freqs, fluxes, errs, models, names, params, param_errs, rcs, BICs, colours, labels, figname, annotate=True, model_selection='better'):
"""Plot a figure of the radio spectra of an individual source, according to the input data and models.
Arguments:
----------
freqs : list
A list of frequencies in MHz.
fluxes : list
A list of fluxes in Jy.
errs : list
A list of flux uncertainties in Jy.
models : list
A list of functions corresponding to models of the radio spectrum.
names : 2D list
A list of fitted parameter names corresponding to each model above.
params : 2D list
A list of fitted parameter values corresponding to each model above.
param_errs : 2D list
A list of uncertainties on the fitted parameters corresponding to each model above.
rcs : list
A list of reduced chi squared values corresponding to each model above.
BICs : list
A list of Bayesian Information Criteria (BIC) values corresponding to each model above.
colours : list
A list of colours corresponding to each model above.
labels : list
A list of labels corresponding to each model above.
figname : string
The filename to give the figure when writing to file.
Keyword arguments:
------------------
annotate : bool
Annotate fit info onto figure.
model_selection : string
How to select models for plotting, based on the BIC values. Options are:
'best' - only plot the best model.
'all' - plot all models.
'better' - plot each model better than the previous, chronologically."""
#create SEDs directory if doesn't already exist
if not os.path.exists('SEDs'):
os.mkdir('SEDs')
fig=plt.figure()
ax=plt.subplot()
#plot frequency axis 20% beyond range of values
xlin = np.linspace(min(freqs)*0.8,max(freqs)*1.2,num=5000)
plt.ylabel(r'Flux Density $S$ (mJy)')
plt.xlabel(r'Frequency $\nu$ (GHz)')
plt.xscale('log')
plt.yscale('log')
#adjust the tick values and add grid lines at minor tick locations
subs = [1.0, 2.0, 5.0]
ax.xaxis.set_major_locator(ticker.LogLocator(subs=subs))
ax.yaxis.set_major_locator(ticker.LogLocator(subs=subs))
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.xaxis.set_major_formatter(ticker.FuncFormatter(ticks_format_freq))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(ticks_format_flux))
ax.grid(b=True, which='minor', color='w', linewidth=0.5)
#plot flux measurements
plt.errorbar(freqs,fluxes,yerr=errs,linestyle='none',marker='.',c='r',zorder=15)
best_bic = 0
dBIC = 3
offset = 0
plotted_models = 0
#plot each model
for i in range (len(models)):
ylin = models[i](xlin,*params[i])
txt = "{0}:\n {1}".format(labels[i],r'$\chi^2_{\rm red} = %.1f$' % rcs[i])
#compare BIC values
bic = BICs[i]
if i > 0:
dBIC = best_bic - bic
if model_selection != 'best':
txt += ', {0}'.format(r'$\Delta{\rm BIC} = %.1f$' % (dBIC))
if dBIC >= 3:
best_bic = bic
#plot model if selected according to input
if model_selection == 'all' or (model_selection == 'better' and dBIC >= 3) or (model_selection == 'best' and BICs[i] == min(BICs)):
plotted_models += 1
plt.plot(xlin,ylin,c=colours[i],linestyle='--',zorder=i+1,label=labels[i])
plt.legend(scatterpoints=1,fancybox=True,frameon=True,shadow=True)
txt += '\n'
#add each fitted parameter to string (in LaTeX format)
for j,param in enumerate(names[i]):
units = ''
tokens = param.split('_')
if len(tokens[0]) > 1:
tokens[0] = "\\" + tokens[0]
if len(tokens) > 1:
param = r'%s_{\rm %s}' % (tokens[0],tokens[1])
else:
param = tokens[0]
val = params[i][j]
err = param_errs[i][j]
if param.startswith('S'):
units = 'Jy'
if val < 0.01:
val = val*1e3
err = err*1e3
units = 'mJy'
elif 'nu' in param:
units = 'MHz'
if val > 100:
val = val/1e3
err = err/1e3
units = 'GHz'
val = sig_figs(val)
err = sig_figs(err)
txt += ' ' + r'${0}$ = {1} $\pm$ {2} {3}'.format(param,val,err,units) + '\n'
#annotate all fit info if it will fit on figure
if annotate and plotted_models <= 3:
plt.text(offset,0,txt,horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
offset += 0.33
#write figure and close
plt.savefig('SEDs/{0}'.format(figname))
plt.close()
def likelihood(ydata,ymodel,yerrs):
"""Return the likelihood for a given model of a single source.
Arguments:
----------
ydata : list
The flux values at each frequency.
ymodel : list
The values of the model at each frequency.
yerrs : list
The uncertainty on the flux at each frequency.
Returns:
--------
likelihood : float
The likelihood."""
return np.prod( ( 1 / (yerrs*np.sqrt(2*np.pi)) ) * np.exp( (-1/(2*yerrs**2)) * (ydata-ymodel)**2 ) )
def fit_info(ydata,ymodel,yerrs,deg):
"""Return the reduced chi squared and BIC values for a given model of a single source.
Arguments:
----------
ydata : list
The flux values at each frequency.
ymodel : list
The values of the model at each frequency.
yerrs : list
The uncertainty on the flux at each frequency.
deg : int
The degrees of freedom.
Returns:
--------
red_chi_sq : float
The reduced chi squared value.
BIC : float
The Bayesian Information Criteria."""
chi_sq=np.sum(((ydata-ymodel)/yerrs)**2)
DOF=len(ydata)-deg
red_chi_sq = chi_sq/DOF
BIC = -2*np.log(likelihood(ydata,ymodel,yerrs)) + deg * np.log(len(ydata))
return red_chi_sq,BIC
def two_freq_power_law(freq, freqs, fluxes, errs):
"""Derive a two-frequency spectral index, uncertainty and fitted flux at the input frequency.
Arguments:
----------
freq : float
The frequency at which to calculate the flux.
freqs : list
A list of frequencies in the same units.
fluxes : list
A list of fluxes in the same units.
errs : list
A list of flux uncertainties in the same units.
Returns:
--------
alpha : float
The spectral index.
alpha_err : float
The uncertainty of the spectral index.
flux : float
The fitted flux at the input frequency."""
#directly derive alpha and error from two fluxes
alpha = np.log10(fluxes[0]/fluxes[1]) / np.log10(freqs[0]/freqs[1])
alpha_err = np.sqrt((errs[0]/fluxes[0])**2 + (errs[1]/fluxes[1])**2)/np.log10(freqs[0]/freqs[1])
flux = flux_at_freq(freq,freqs[0],fluxes[0],alpha)
return alpha,alpha_err,flux
def SED(freq, freqs, fluxes, errs, models='pow', figname=None):
"""Fit SED models to an individual source and return the model params and errors along with the expected flux at a given frequency, for each input model.
Lists must be the same length and contain at least two elements, all with the same units (ideally MHz and Jy).
Arguments:
----------
freq : float
The frequency at which to calculate the flux.
freqs : list
A list of frequencies in the same units.
fluxes : list
A list of fluxes in the same units.
errs : list
A list of flux uncertainties in the same units.
Keyword arguments:
------------------
models : string or list
A single model or list of models to fit (e.g. ['pow','FFA','SSA']).
figname : string
Write a figure of the radio spectra and model to file, using this filename. Use None to not write to file.
Returns:
--------
fit_models : list
A list of fitted models.
names : 2D list
A list of lists of names of fitted parameters, for each input model.
params : 2D list
A list of lists of fitted parameters, for each input model.
errors : 2D list
A list of lists of uncertainties on the fitted parameters, for each input model.
fitted_fluxes : list
A list of fitted fluxes at the input frequency, for each input model.
rcs : list
A list of reduced chi squared values, for each input model.
BICs : list
A list of Bayesian Information Criteria (BIC) values, for each input model."""
#initial guesses of different params
S_max = max(fluxes)
nu_max = freqs[fluxes == S_max][0]
alpha = -0.8
beta = 1-2*alpha
nu_br = np.mean(freqs)
p = 0.5
#initial guesses of different models
params = { 'pow' : [S_max,alpha],
'powcibreak' : [S_max, alpha, nu_br],
'powjpbreak' : [S_max, alpha, nu_br],
'curve' : [S_max, nu_max, 1, alpha],
'ssa' : [S_max, beta, nu_max],
'ssacibreak' : [S_max, beta, nu_max, nu_br],
'ssajpbreak' : [S_max, beta, nu_max, nu_br],
'ffa' : [S_max, alpha, nu_max],
'bicffa' : [S_max, alpha, p, nu_max],
'bicffacibreak' : [S_max, alpha, p, nu_max, nu_br],
'bicffajpbreak' : [S_max, alpha, p, nu_max, nu_br]}
#different SED models from functions above
funcs = { 'pow' : powlaw,
'powcibreak' : pow_CIbreak,
'powjpbreak' : pow_JPbreak,
'curve' : curve,
'ssa' : SSA,
'ssacibreak' : SSA_CIbreak,
'ssajpbreak' : SSA_JPbreak,
'ffa' : FFA,
'bicffa' : Bic98_FFA,
'bicffacibreak' : Bic98_FFA_CIbreak,
'bicffajpbreak' : Bic98_FFA_JPbreak}
#matplotlib colours
colours = { 'pow' : 'black',
'powcibreak' : 'b',
'powjpbreak' : 'violet',
'curve' : 'r',
'ssa' : 'g',
'ssacibreak' : 'r',
'ssajpbreak' : 'g',
'ffa' : 'orange',
'bicffa' : 'r',
'bicffacibreak' : 'b',
'bicffajpbreak' : 'r'}
#labels
labels = { 'pow' : 'Power law',
'powcibreak' : 'Power law\n + CI break',
'powjpbreak' : 'Power law\n + JP break',
'curve' : 'Tschager+03 Curve',
'ssa' : 'Single SSA',
'ssacibreak' : 'Single SSA\n + CI break',
'ssajpbreak' : 'Single SSA\n + JP break',
'ffa' : 'Single FFA',
'bicffa' : 'Bicknell+98 FFA',
'bicffacibreak' : 'Bicknell+98 FFA\n + CI break',
'bicffajpbreak' : 'Bicknell+98 FFA\n + JP break'}
#store used models, fitted parameters and errors, fitted fluxes, reduced chi squared and BIC
fit_models,fit_params,fit_param_errors,fitted_fluxes,rcs,BICs = [],[],[],[],[],np.array([])
#convert single model to list
if type(models) is str:
models = [models]
for model in models:
model = model.lower()
#fit model if DOF >= 1
if len(freqs) >= len(params[model])+1:
try:
#perform a least squares fit
popt, pcov = opt.curve_fit(funcs[model], freqs, fluxes, p0 = params[model], sigma = errs, maxfev = 10000)
#add all fit info to lists
fit_models.append(model)
fit_params.append(popt)
fit_param_errors.append(np.sqrt(np.diag(pcov)))
RCS,bic = fit_info(fluxes,funcs[model](freqs,*popt),errs,len(popt))
rcs.append(RCS)
BICs = np.append(BICs,bic)
fitted_fluxes.append(funcs[model](freq,*popt))
except (ValueError,RuntimeError),e:
print "Couldn't find good fit for {0} model.".format(model)
print e
#get lists of names, functions, colours and labels for all used models
names = [funcs[model].func_code.co_varnames[1:funcs[model].func_code.co_argcount] for model in fit_models]
funcs = [funcs[model] for model in fit_models]
colours = [colours[model] for model in fit_models]
labels = [labels[model] for model in fit_models]
#write figure for this source
if figname is not None and len(fit_models) > 0:
plot_spectra(freqs,fluxes,errs,funcs,names,fit_params,fit_param_errors,rcs,BICs,colours,labels,figname,model_selection='all')
return fit_models,names,fit_params,fit_param_errors,fitted_fluxes,rcs,BICs
def get_pixel_area(fits,flux=0,nans=False,ra_axis=0,dec_axis=1,w=None):
"""For a given image, get the area and solid angle of all non-nan pixels or all pixels below a certain flux (doesn't count pixels=0).
The RA and DEC axes follow the WCS convention (i.e. starting from 0).
Arguments:
----------
fits : astropy.io.fits
The primary axis of a fits image.
Keyword arguments:
------------------
flux : float
The flux in Jy, below which pixels will be selected.
nans : bool
Derive the area and solid angle of all non-nan pixels.
ra_axis : int
The index of the RA axis (starting from 0).
dec_axis : int
The index of the DEC axis (starting from 0).
w : astropy.wcs.WCS
A wcs object to use for reading the pixel sizes.
Returns:
--------
area : float
The area in square degrees.
solid_ang : float
The solid angle in steradians.
See Also:
---------
astropy.io.fits
astropy.wcs.WCS"""
if w is None:
w = WCS(fits.header)
#count the pixels and derive area and solid angle of all these pixels
if nans:
count = fits.data[(~np.isnan(fits.data)) & (fits.data != 0)].shape[0]
else:
count = fits.data[(fits.data < flux) & (fits.data != 0)].shape[0]
area = (count*np.abs(w.wcs.cdelt[ra_axis])*np.abs(w.wcs.cdelt[dec_axis]))
solid_ang = area*(np.pi/180)**2
return area,solid_ang
def axis_lim(data,func,perc=10):
"""Return an axis limit value a certain % beyond the min/max value of a dataset.
Arguments:
----------
data : list-like
A list-like object input into min() or max(). Usually this will be a numpy array or pandas Series.
func : function
max or min.
Keyword Arguments:
------------------
perc : float
The percentage beyond the limit of a dataset.
Returns:
--------
lim : float
A value the input % beyond the limit.
See Also:
--------
numpy.array
pandas.Series"""
lim = func(data)
if (lim < 0 and func is min) or (lim > 0 and func is max):
lim *= (1 + (perc/100))
else:
lim *= (1 - (perc/100))
return lim
def get_stats(data):
"""Return the median, mean, standard deviation, standard error and rms of the median absolute deviation (mad) of the non-nan values in a list.
Arguments:
----------
data : list-like (numpy.array or pandas.Series)
The data used to calculate the statistics.
Returns:
--------
med : float
The median.
mean : float
The mean.
std : float
The standard deviation.
err : float
The standard error.
rms_mad : float
The rms of the mad
See Also
--------
numpy.array
pandas.Series"""
#remove nan indices, as these affect the calculations
values = data[~np.isnan(data)]
med = np.median(values)
mean = np.mean(values)
std = np.std(values)
sterr = std / np.sqrt(len(values))
rms_mad = np.median(np.abs(values-np.median(values)))/0.6745
return med,mean,std,sterr,rms_mad
def remove_extn(filename):
"""Return a file name without its extension.
Arguments:
----------
filename : string
The file name.
Returns:
--------
filename : string
The file name without its extension."""
#do this in case more than one '.' in file name
return '.'.join(filename.split('.')[:-1])
def config2dic(filepath, main_dir, verbose=False):
"""
Read a configuration file and create an dictionary of arguments from its contents,
which will usually be passed into a new object instance.
Arguments:
----------
filepath : string
The absolute filepath of the config file.
main_dir : string
Main directory that contains all the necessary files.
Keyword Arguments:
------------------
verbose : bool
Verbose output.
Returns:
--------
args_dict : dict
A dictionary of arguments, to be passed into some function, usually a new object instance."""
#open file and read contents
config_file = open(filepath)
txt = config_file.read()
args_dict = {}
#set up dictionary of arguments based on their types
for line in txt.split('\n'):
if len(line) > 0 and line.replace(' ','')[0] != '#':
#use '=' as delimiter and strip whitespace
split = line.split('=')
key = split[0].strip()
val = split[1].strip()
val = parse_string(val)
#if parameter is filename, store the filepath
if key == 'filename':
val = find_file(val,main_dir,verbose=verbose)
args_dict.update({key : val})
config_file.close()
return args_dict
def parse_string(val):
"""Parse a string to another data type, based on its value.
Arguments:
----------
val : string
The string to parse.
Returns:
--------
val : string or NoneType or bool or float
The parsed string."""
if val.lower() == 'none':
val = None
elif val.lower() in ('true','false'):
val = (val.lower() == 'true')
elif val.replace('.','',1).replace('e','').replace('-','').isdigit():
val = float(val)
return val
def new_path(filepath):
"""For a given input filepath, return the path after having moved into a new directory. This will add '../' to the beginning of relative filepaths.
Arguments:
----------
filepath : string
The filepath.
Returns:
--------
filepath : string
The updated filepath."""
#add '../' to filepath if it's a relative filepath
if filepath is not None and filepath[0] != '/':
filepath = '../' + filepath
return filepath
def find_file(filepath,main_dir,verbose=True):
"""Look for a file in specific paths. Look one directory up if filepath is relative, otherwise look in main directory, otherwise raise exception.
Arguments:
----------
filepath : string
An absolute or relative filepath.
main_dir : string
Main directory that contains all the necessary files.
Returns:
--------
filepath : string
The path to where the file was found."""
#raise exception if file still not found
if not (os.path.exists(filepath) or os.path.exists('{0}/{1}'.format(main_dir,filepath)) or os.path.exists(new_path(filepath))):
raise Exception("Can't find file - {0}. Ensure this file is in input path or --main-dir.\n".format(filepath))
#otherwise update path to where file exists
elif not os.path.exists(filepath):
#look in main directory if file doesn't exist in relative filepath
if os.path.exists('{0}/{1}'.format(main_dir,filepath)):
if verbose:
print "Looking in '{0}' for '{1}'.".format(main_dir,filepath)
filepath = '{0}/{1}'.format(main_dir,filepath)
#update directory path if file is relative path
else:
filepath = new_path(filepath)
return filepath
| 25,247 | 30.718593 | 157 | py |
dataqa | dataqa-master/continuum/validation_tool/report.py | from __future__ import division
from functions import get_pixel_area, get_stats, flux_at_freq, axis_lim
import os
import collections
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, mpld3
from matplotlib import cm, ticker, colors
from mpld3 import plugins
from matplotlib.patches import Ellipse
import matplotlib.image as image
#import seaborn
from astropy.io import fits as f
from astropy.io import votable
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
import warnings
from inspect import currentframe, getframeinfo
#ignore annoying astropy warnings and set my own obvious warning output
warnings.simplefilter('ignore', category=AstropyWarning)
cf = currentframe()
WARN = '\n\033[91mWARNING: \033[0m' + getframeinfo(cf).filename
class report(object):
def __init__(self, cat, main_dir, img=None, plot_to='html', css_style=None,
src_cnt_bins=50, redo=False, write=True, verbose=True,
fig_font={'fontname':'Serif', 'fontsize' : 18},
fig_size={'figsize' : (8,8)}, cmap='plasma', cbins=20,
label_size={'labelsize' : 12},
markers={'s' : 20, 'linewidth' : 1, 'marker' : 'o', 'color' : 'b'},
colour_markers={'marker' : 'o', 's' : 30, 'linewidth' : 0},
arrows={'color' : 'r', 'width' : 0.04, 'scale' : 20}):
"""Initialise a report object for writing a html report of the image and cross-matches, including plots.
Arguments:
----------
cat : catalogue
Catalogue object with the data for plotting.
main_dir : string
Main directory that contains all the necessary files.
Keyword arguments:
------------------
img : radio_image
Radio image object used to write report table. If None, report will not be written, but plots will be made.
plot_to : string
Where to show or write the plot. Options are:
'html' - save as a html file using mpld3.
'screen' - plot to screen [i.e. call plt.show()].
'extn' - write file with this extension (e.g. 'pdf', 'eps', 'png', etc).
css_style : string
A css format to be inserted in <head>.
fig_font : dict
Dictionary of kwargs for font name and size for title and axis labels of matplotlib figure.
fig_size : dict
Dictionary of kwargs to pass into pyplot.figure.
label_size : dict
Dictionary of kwargs for tick params.
markers : dict
Dictionary of kwargs to pass into pyplot.figure.scatter, etc (when single colour used).
colour_markers : dict
Dictionary of kwargs to pass into pyplot.figure.scatter, etc (when colourmap used).
arrows : dict
Dictionary of kwargs to pass into pyplot.figure.quiver.
redo: bool
Produce all plots and save them, even if the files already exist.
write : bool
Write the source counts and figures to file. Input False to only write report.
verbose : bool
Verbose output.
See Also:
---------
matplotlib.pyplot
mpld3"""
self.cat = cat
self.img = img
self.plot_to = plot_to
self.fig_font = fig_font
self.fig_size = fig_size
self.label_size = label_size
self.markers = markers
self.colour_markers = colour_markers
self.arrows = arrows
self.cmap = plt.get_cmap(cmap,cbins)
self.src_cnt_bins = src_cnt_bins
self.main_dir = main_dir
self.redo = redo
self.write = write
self.verbose = verbose
self.apercal_version, self.apercal_path = self.apercal_specs()
#set name of directory for figures and create if doesn't exist
self.figDir = 'figures'
if self.write and not os.path.exists(self.figDir):
os.mkdir(self.figDir)
#use css style passed in or default style for CASS web server below
if css_style is not None:
self.css_style = css_style
else:
self.css_style = """<?php include("base.inc"); ?>
<meta name="DCTERMS.Creator" lang="en" content="" />
<meta name="DC.Title" lang="en" content="Continuum Validation Report" />
<meta name="DC.Description" lang="en" content="Continuum validation report summarising science readiness of data via several metrics" />
<?php standard_head(); ?>
<style>
.reportTable {
border-collapse: collapse;
width: 100%;
}
.reportTable th, .reportTable td {
padding: 15px;
text-align: middle;
border-bottom: 1px solid #ddd;
vertical-align: top;
}
.reportTable tr {
text-align:center;
vertical-align:middle;
}
.reportTable tr:hover{background-color:#f5f5f5}
#good {
background-color:#00FA9A;
}
#uncertain {
background-color:#FFA500;
}
#bad {
background-color:#FF6347;
}
</style>\n"""
self.css_style += "<title>{0} Continuum Validation Report</title>\n""".format(self.cat.name)
#filename of html report
self.name = 'index.html'
#Open file html file and write css style, title and heading
self.write_html_head()
#write table summary of observations and image if radio_image object passed in
if img is not None:
self.write_html_img_table(img)
rms_map = f.open(img.rms_map)[0]
solid_ang = 0
#otherwise assume area based on catalogue RA/DEC limits
else:
rms_map = None
solid_ang = self.cat.area*(np.pi/180)**2
self.write_html_cat_table()
#plot the int/peak flux as a function of peak flux
self.int_peak_flux(usePeak=True)
#write source counts to report using rms map to measure solid angle or approximate solid angle
if self.cat.name in self.cat.flux.keys():
self.source_counts(self.cat.flux[self.cat.name],self.cat.freq[self.cat.name],
rms_map=rms_map,solid_ang=solid_ang,write=self.write)
else:
self.sc_red_chi_sq = -1
#write cross-match table header
self.write_html_cross_match_table()
#store dictionary of metrics, where they come from, how many matches they're derived from, and their level (0,1 or 2)
#spectral index defaults to -99, as there is a likelihood it will not be needed (if Taylor-term imaging is not done)
#RA and DEC offsets used temporarily and then dropped before final metrics computed
key_value_pairs = [ ('Flux Ratio' , 0),
('Flux Ratio Uncertainty' , 0),
('Positional Offset' , 0),
('Positional Offset Uncertainty' , 0),
('Resolved Fraction' , self.cat.resolved_frac),
('Spectral Index' , 0),
('RMS', self.cat.img_rms),
('Source Counts Reduced Chi-squared' , self.sc_red_chi_sq),
('RA Offset' , 0),
('DEC Offset' , 0)]
self.metric_val = collections.OrderedDict(key_value_pairs)
self.metric_source = self.metric_val.copy()
self.metric_count = self.metric_val.copy()
self.metric_level = self.metric_val.copy()
def apercal_specs(self):
try:
import apercal
aver = apercal.__version__
apath = apercal.__file__
except:
aver = ''
apath = ''
return aver, apath
# def get_dynamic_range(self, radius=30, box=50):
# """
# get source and local dynamic range for the sources
# within the radius [arcmin] of the beam center
# The box [pixels] is the box to estimate local DR.
# """
def write_html_head(self):
"""Open the report html file and write the head."""
self.html = open(self.name,'w')
self.html.write("""<!DOCTYPE HTML>
<html lang="en">
<head>
{0}
</head>
<?php title_bar("atnf"); ?>
<body>
<h1 align="middle">{1} Continuum Data Validation Report</h1>""".format(self.css_style,self.cat.name))
def write_html_img_table(self, img):
"""Write an observations and image and catalogue report tables derived from fits image and header.
Arguments:
----------
img : radio_image
A radio image object used to write values to the html table."""
#generate link to confluence page for each project code
project = img.project
# if project.startswith('AS'):
# project = self.add_html_link("https://confluence.csiro.au/display/askapsst/{0}+Data".format(img.project),img.project,file=False)
#Write observations report table
self.html.write("""
<h2 align="middle">Observations</h2>
<table class="reportTable">
<tr>
<th>SBID</th>
<th>Project</th>
<th>Date</th>
<th>Duration<br>(hours)</th>
<th>Field Centre</th>
<th>Central Frequency<br>(MHz)</th>
</tr>
<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
<td>{5:.2f}</td>
</tr>
</table>""".format( img.sbid,
project,
img.date,
img.duration,
img.centre,
img.freq))
#Write image report table
self.html.write("""
<h2 align="middle">Image</h2>
<h4 align="middle"><i>File: '{0}'</i></h3>
<table class="reportTable">
<tr>
<th>IMAGE SIZE<br>(pixels)</th>
<th>PIXEL SIZE<br>(arcsec)</th>
<th>Synthesised Beam<br>(arcsec)</th>
<th>Median r.m.s.<br>(uJy)</th>
<th>Image peak<br>(Jy)</th>
<th>Image DR</th>
<th>Source DR</th>
<th>Local DR</th>
<th>Sky Area<br>(deg<sup>2</sup>)</th>
<th>Normaltest</th>
</tr>
<tr>
<td>{1}</td>
<td>{2}</td>
<td>{3:.1f} x {4:.1f}</td>
<td>{5}</td>
<td>{6:.2f}</td>
<td>{7:.0f} </td>
<td>{8:.0f} - {9:.0f} </td>
<td>{10:.0f} - {11:.0f} </td>
<td>{12:.2f}</td>
<td>{13:s}</td>
</tr>
</table>""".format( img.name,
img.imsizestr,
img.pixsizestr,
img.bmaj,
img.bmin,
self.cat.img_rms,
self.cat.img_peak,
self.cat.dynamic_range,
self.cat.source_dynrange[0], self.cat.source_dynrange[1],
self.cat.local_dynrange[0], self.cat.local_dynrange[1],
self.cat.area,
img.gaussianity))
def write_html_cat_table(self):
"""Write an observations and image and catalogue report tables derived from fits image, header and catalogue."""
flux_type = 'integrated'
if self.cat.use_peak:
flux_type = 'peak'
if self.cat.med_si == -99:
med_si = ''
else:
med_si = '{0:.2f}'.format(self.cat.med_si)
#Write catalogue report table
self.html.write("""
<h2 align="middle">Catalogue</h2>
<h4 align="middle"><i>File: '{0}'</i></h3>
<table class="reportTable">
<tr>
<th>Source Finder</th>
<th>Flux Type</th>
<th>Number of<br>sources (≥{1}σ)</th>
<th>Multi-component<br>islands</th>
<th>Sum of image flux vs.<br>sum of catalogue flux</th>
<th>Median in-band spectral index</th>
<th>Median int/peak flux</th>
<th>Source Counts<br>χ<sub>red</sub><sup>2</sup></th>
</tr>
<tr>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
<td>{5}</td>
<td>{6:.1f} Jy vs. {7:.1f} Jy</td>
<td>{8}</td>""".format( self.cat.filename,
self.cat.SNR,
self.cat.finder,
flux_type,
self.cat.initial_count,
self.cat.blends,
self.cat.img_flux,
self.cat.cat_flux,
med_si))
def write_html_cross_match_table(self):
"""Write the header of the cross-matches table."""
self.html.write("""
<h2 align="middle">Cross-matches</h2>
<table class="reportTable">
<tr>
<th>Survey</th>
<th>Frequency<br>(MHz)</th>
<th>Cross-matches</th>
<th>Median offset<br>(arcsec)</th>
<th>Median flux ratio</th>
<th>Median spectral index</th>
</tr>""")
def get_metric_level(self,good_condition,uncertain_condition):
"""Return metric level 1 (good), 2 (uncertain) or 3 (bad), according to the two input conditions.
Arguments:
----------
good_condition : bool
Condition for metric being good.
uncertain_condition : bool
Condition for metric being uncertain."""
if good_condition:
return 1
if uncertain_condition:
return 2
return 3
def assign_metric_levels(self):
"""Assign level 1 (good), 2 (uncertain) or 3 (bad) to each metric, depending on specific tolerenace values.
See https://confluence.csiro.au/display/askapsst/Continuum+validation+metrics"""
for metric in self.metric_val.keys():
# Remove keys that don't have a valid value (value=-99 or -1111)
if self.metric_val[metric] == -99 or self.metric_val[metric] == -111:
self.metric_val.pop(metric)
self.metric_source.pop(metric)
self.metric_level.pop(metric)
else:
#flux ratio within 5/10%?
if metric == 'Flux Ratio':
val = np.abs(self.metric_val[metric]-1)
good_condition = val < 0.05
uncertain_condition = val < 0.1
self.metric_source[metric] = 'Median flux density ratio [APERTIF / {0}]'.format(self.metric_source[metric])
#uncertainty on flux ratio less than 10/20%?
elif metric == 'Flux Ratio Uncertainty':
good_condition = self.metric_val[metric] < 0.1
uncertain_condition = self.metric_val[metric] < 0.2
self.metric_source[metric] = 'R.M.S. of median flux density ratio [APERTIF / {0}]'.format(self.metric_source[metric])
self.metric_source[metric] += ' (estimated from median absolute deviation from median)'
#positional offset < 1/5 arcsec
elif metric == 'Positional Offset':
good_condition = self.metric_val[metric] < 1
uncertain_condition = self.metric_val[metric] < 5
self.metric_source[metric] = 'Median positional offset (arcsec) [APERTIF-{0}]'.format(self.metric_source[metric])
#uncertainty on positional offset < 1/5 arcsec
elif metric == 'Positional Offset Uncertainty':
good_condition = self.metric_val[metric] < 5
uncertain_condition = self.metric_val[metric] < 10
self.metric_source[metric] = 'R.M.S. of median positional offset (arcsec) [APERTIF-{0}]'.format(self.metric_source[metric])
self.metric_source[metric] += ' (estimated from median absolute deviation from median)'
#reduced chi-squared of source counts < 3/50?
elif metric == 'Source Counts Reduced Chi-squared':
good_condition = self.metric_val[metric] < 3
uncertain_condition = self.metric_val[metric] < 50
self.metric_source[metric] = 'Reduced chi-squared of source counts'
#resolved fraction of sources between 5-20%?
elif metric == 'Resolved Fraction':
good_condition = self.metric_val[metric] > 0.05 and self.metric_val[metric] < 0.2
uncertain_condition = self.metric_val[metric] < 0.3
self.metric_source[metric] = 'Fraction of sources resolved according to int/peak flux densities'
#spectral index less than 0.2 away from -0.8?
elif metric == 'Spectral Index':
val = np.abs(self.metric_val[metric]+0.8)
good_condition = val < 0.2
uncertain_condition = False
self.metric_source[metric] = 'Median in-band spectral index'
elif metric == 'RMS':
good_condition = self.metric_val[metric] < 100
uncertain_condition = self.metric_val[metric] < 500
self.metric_source[metric] = 'Median image R.M.S. (uJy) from noise map'
#if unknown metric, set it to 3 (bad)
else:
good_condition = False
uncertain_condition = False
#assign level to metric
self.metric_level[metric] = self.get_metric_level(good_condition,uncertain_condition)
if self.img is not None:
self.write_CASDA_xml()
def write_pipeline_offset_params(self):
"""Write a txt file with offset params for soft pipeline for user to easily import into config file, and then drop them from metrics.
See http://www.atnf.csiro.au/computing/software/askapsoft/sdp/docs/current/pipelines/ScienceFieldContinuumImaging.html?highlight=offset"""
txt = open('offset_pipeline_params.txt','w')
txt.write("DO_POSITION_OFFSET=true\n")
txt.write("RA_POSITION_OFFSET={0:.2f}\n".format(-self.metric_val['RA Offset']))
txt.write("DEC_POSITION_OFFSET={0:.2f}\n".format(-self.metric_val['DEC Offset']))
txt.close()
for metric in ['RA Offset','DEC Offset']:
self.metric_val.pop(metric)
self.metric_source.pop(metric)
self.metric_level.pop(metric)
self.metric_count.pop(metric)
def write_CASDA_xml(self):
"""Write xml table with all metrics for CASDA."""
tmp_table = Table( [self.metric_val.keys(),self.metric_val.values(),self.metric_level.values(),self.metric_source.values()],
names=['metric_name','metric_value','metric_status','metric_description'],
dtype=[str,float,np.int32,str])
vot = votable.from_table(tmp_table)
vot.version = 1.3
table = vot.get_first_table()
table.params.extend([votable.tree.Param(vot, name="project", datatype="char", arraysize="*", value=self.img.project)])
valuefield=table.fields[1]
valuefield.precision='2'
prefix = ''
if self.img.project != '':
prefix = '{0}_'.format(self.img.project)
xml_filename = '{0}CASDA_continuum_validation.xml'.format(prefix)
votable.writeto(vot, xml_filename)
def write_html_end(self):
"""Write the end of the html report file (including table of metrics) and close it."""
#Close cross-matches table and write header of validation summary table
self.html.write("""
</td>
</tr>
</table>
<h2 align="middle">{0} continuum validation metrics</h2>
<table class="reportTable">
<tr>
<th>Flux Ratio<br>({0} / {1})</th>
<th>Flux Ratio Uncertainty<br>({0} / {1})</th>
<th>Positional Offset (arcsec)<br>({0} — {2})</th>
<th>Positional Offset Uncertainty (arcsec)<br>({0} — {2})</th>
<th>Resolved Fraction from int/peak Flux<br>({0})</th>
<th>Source Counts χ<sub>red</sub><sup>2</sup><br>({0})</th>
<th>r.m.s. (uJy)<br>({0})</th>
""".format(self.cat.name,self.metric_source['Flux Ratio'],self.metric_source['Positional Offset']))
#assign levels to each metric
self.assign_metric_levels()
#flag if in-band spectral indices not derived
spec_index = False
if 'Spectral Index' in self.metric_val:
spec_index = True
if spec_index:
self.html.write('<th>Median in-band<br>spectral index</th>')
#Write table with values of metrics and colour them according to level
self.html.write("""</tr>
<tr>
<td {0}>{1:.2f}</td>
<td {2}>{3:.2f}</td>
<td {4}>{5:.2f}</td>
<td {6}>{7:.2f}</td>
<td {8}>{9:.2f}</td>
<td {10}>{11:.2f}</td>
<td {12}>{13}</td>
""".format(self.html_colour(self.metric_level['Flux Ratio']),self.metric_val['Flux Ratio'],
self.html_colour(self.metric_level['Flux Ratio Uncertainty']),self.metric_val['Flux Ratio Uncertainty'],
self.html_colour(self.metric_level['Positional Offset']),self.metric_val['Positional Offset'],
self.html_colour(self.metric_level['Positional Offset Uncertainty']),self.metric_val['Positional Offset Uncertainty'],
self.html_colour(self.metric_level['Resolved Fraction']),self.metric_val['Resolved Fraction'],
self.html_colour(self.metric_level['Source Counts Reduced Chi-squared']),self.metric_val['Source Counts Reduced Chi-squared'],
self.html_colour(self.metric_level['RMS']),self.metric_val['RMS']))
if spec_index:
self.html.write('<td {0}>{1:.2f}</td>'.format(self.html_colour(self.metric_level['Spectral Index']),
self.metric_val['Spectral Index']))
# by = ''
# if self.cat.name != 'ASKAP':
# by = """ by <a href="mailto:[email protected]">Jordan Collier</a>"""
#Close table, write time generated, and close html file
self.html.write("""</tr>
</table>
<p><i>Generated at {0}</i></p>
<?php footer(); ?>
</body>
</html>""".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) #,by))
self.html.close()
print "Continuum validation report written to '{0}'.".format(self.name)
def add_html_link(self,target,link,file=True,newline=False):
"""Return the html for a link to a URL or file.
Arguments:
----------
target : string
The name of the target (a file or URL).
link : string
The link to this file (thumbnail file name or string to list as link name).
Keyword Arguments:
------------------
file : bool
The input link is a file (e.g. a thumbnail).
newline : bool
Write a newline / html break after the link.
Returns:
--------
html : string
The html link."""
html = """<a href="{0}">""".format(target)
if file:
html += """<IMG SRC="{0}"></a>""".format(link)
else:
html += "{0}</a>".format(link)
if newline:
html += "<br>"
return html
def text_to_html(self,text):
"""Take a string of text that may include LaTeX, and return the html code that will generate it as LaTeX.
Arguments:
----------
text : string
A string of text that may include LaTeX.
Returns:
--------
html : string
The same text readable as html."""
#this will allow everything between $$ to be generated as LaTeX
html = """
<script type="text/x-mathjax-config">
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}});
</script>
<script type="text/javascript"
src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
<br>
"""
#write a newline / break for each '\n' in string
for line in text.split('\n'):
html += line + '<br>'
return html
def html_colour(self,level):
"""Return a string representing green, yellow or red in html if level is 1, 2 or 3.
Arguments:
----------
level : int
A validation level.
Returns:
--------
colour : string
The html for green, yellow or red."""
if level == 1:
colour = "id='good'"
elif level == 2:
colour = "id='uncertain'"
else:
colour = "id='bad'"
return colour
def int_peak_flux(self,usePeak=False):
"""Plot the int/peak fluxes as a function of peak flux.
Keyword Arguments:
------------------
usePeak : bool
Use peak flux as x axis, instead of SNR."""
ratioCol = '{0}_int_peak_ratio'.format(self.cat.name)
self.cat.df[ratioCol] = self.cat.df[self.cat.flux_col] / self.cat.df[self.cat.peak_col]
SNR = self.cat.df[self.cat.peak_col]/self.cat.df[self.cat.rms_val]
ratio = self.cat.df[ratioCol]
peak = self.cat.df[self.cat.peak_col]
xaxis = SNR
if usePeak:
xaxis = peak
#plot the int/peak flux ratio
fig = plt.figure(**self.fig_size)
title = "{0} int/peak flux ratio".format(self.cat.name)
if self.plot_to == 'html':
if usePeak:
xlabel = 'Peak flux ({0})'.format(self.cat.flux_unit.replace('j','J'))
else:
xlabel = 'S/N'
ylabel = 'Int / Peak Flux Ratio'
else:
xlabel = r'${\rm S_{peak}$'
if usePeak:
xlabel += ' ({0})'.format(self.cat.flux_unit.replace('j','J'))
else:
xlabel += r'$ / \sigma_{rms}}$'
ylabel = r'${\rm S_{int} / S_{peak}}$'
if self.plot_to != 'screen':
filename = '{0}/{1}_int_peak_ratio.{2}'.format(self.figDir,self.cat.name,self.plot_to)
else:
filename = ''
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(xaxis,yaxis=ratio)#,caxis=self.cat.dec[self.cat.name])
plt.loglog()
plt.gca().grid(b=True, which='minor', color='w', linewidth=0.5)
#hack to overlay resolved sources in red
xres,yres= xaxis[self.cat.resolved],ratio[self.cat.resolved]
markers = self.markers.copy()
markers['color'] = 'r'
markers.pop('s')
data, = plt.plot(xres,yres,'o',zorder=50,**markers)
leg_labels = ['Resolved','Unresolved']
#derive the statistics of y and store in string
ymed,ymean,ystd,yerr,ymad = get_stats(ratio)
txt = '$\widetilde{Ratio}$: %.2f\n' % ymed
txt += '$\overline{Ratio}$: %.2f\n' % ymean
txt += '$\sigma_{Ratio}$: %.2f\n' % ystd
txt += '$\sigma_{\overline{Ratio}}$: %.2f' % yerr
#store median int/peak flux ratio and write to report table
self.int_peak_ratio = ymed
self.html.write('<td>{0:.2f}<br>'.format(ymed))
#plot the int/peak flux ratio
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=[self.y1],
title=title,
xlabel=xlabel,
ylabel=ylabel,
text=txt,
loc='tl',
axis_perc=0,
filename=filename,
leg_labels=leg_labels,
handles=[data],
redo=self.redo)
def source_counts(self,fluxes,freq,rms_map=None,solid_ang=0,write=True):
"""Compute and plot the (differential euclidean) source counts based on the input flux densities.
Arguments:
----------
fluxes : list-like
A list of fluxes in Jy.
freq : float
The frequency of these fluxes in MHz.
Keyword arguments:
------------------
rms_map : astropy.io.fits
A fits image of the local rms in Jy.
solid_ang : float
A fixed solid angle over which the source counts are computed. Only used when rms_map is None.
write : bool
Write the source counts to file."""
#derive file names based on user input
filename = 'screen'
counts_file = '{0}_source_counts.csv'.format(self.cat.basename)
if self.plot_to != 'screen':
filename = '{0}/{1}_source_counts.{2}'.format(self.figDir,self.cat.name,self.plot_to)
#read the log of the source counts from Norris+11 from same directory of this script
df_Norris = pd.read_table('{0}/all_counts.txt'.format(self.main_dir),sep=' ')
x = df_Norris['S']-3 #convert from log of flux in mJy to log of flux in Jy
y = df_Norris['Counts']
yerr = (df_Norris['ErrDown'],df_Norris['ErrUp'])
#fit 6th degree polynomial to Norris+11 data
deg = 6
poly_paras = np.polyfit(x,y,deg)
f = np.poly1d(poly_paras)
xlin = np.linspace(min(x)*1.2,max(x)*1.2)
ylin = f(xlin)
#perform source counts if not already written to file or user specifies to re-do
if not os.path.exists(counts_file) or self.redo:
#warn user if they haven't input an rms map or fixed solid angle
if rms_map is None and solid_ang == 0:
warnings.warn_explicit("You must input a fixed solid angle or an rms map to compute the source counts!\n",UserWarning,WARN,cf.f_lineno)
return
#get the number of bins from the user
nbins = self.src_cnt_bins
print "Deriving source counts for {0} using {1} bins.".format(self.cat.name,nbins)
#Normalise the fluxes to 1.4 GHz
fluxes = flux_at_freq(1400,freq,fluxes,-0.8)
#Correct for Eddington bias for every flux, assuming Hogg+98 model
r = self.cat.df[self.cat.flux_col] / self.cat.df[self.cat.rms_val]
slope = np.polyder(f)
q = 1.5 - slope(fluxes)
bias = 0.5 + 0.5*np.sqrt(1 - (4*q+4)/(r**2))
#q is derived in log space, so correct for the bias in log space
fluxes = 10**(np.log10(fluxes)/bias)
if rms_map is not None:
w = WCS(rms_map.header)
if self.verbose:
print "Using rms map '{0}' to derive solid angle for each flux bin.".format(self.img.rms_map)
total_area = get_pixel_area(rms_map, flux=100, w=w)[0]
else:
total_area = 0
#add one more bin and then discard it, since this is dominated by the few brightest sources
#we also add one more to the bins since there's one more bin edge than number of bins
edges = np.percentile(fluxes,np.linspace(0,100,nbins+2))
dN,edges,patches=plt.hist(fluxes,bins=edges)
dN = dN[:-1]
edges = edges[:-1]
#derive the lower and upper edges and dS
lower = edges[:-1]
upper = edges[1:]
dS = upper-lower
S = np.zeros(len(dN))
solid_angs = np.zeros(len(dN))
for i in range(len(dN)):
#derive the mean flux from all fluxes in current bin
indices = (fluxes > lower[i]) & (fluxes < upper[i])
S[i] = np.mean(fluxes[indices])
#Get the pixels from the r.m.s. map where SNR*r.m.s. < flux
if rms_map is not None:
solid_angs[i] = get_pixel_area(rms_map, flux=S[i]/self.cat.SNR, w=w)[1]
# print solid_angs
#otherwise use the fixed value passed in
else:
solid_angs[i] = solid_ang
#compute the differential Euclidean source counts and uncertanties in linear space
# FIXME: this workaround looks ugly
solid_angs[solid_angs == 0] = 1e-19
counts = (S**2.5)*dN/dS/solid_angs
err = (S**2.5)*np.sqrt(dN)/dS/solid_angs
#Store these and the log of these values in pandas data frame
df = pd.DataFrame()
df['dN'] = dN
df['area'] = solid_angs/((np.pi/180)**2)
df['S'] = S
df['logS'] = np.log10(S)
df['logCounts'] = np.log10(counts)
df['logErrUp'] = np.log10(counts+err) - np.log10(counts)
df['logErrDown'] = np.abs(np.log10(counts-err) - np.log10(counts))
#remove all bins with less than 10% of total solid angle
bad_bins = df['area'] / total_area < 0.1
output = ['Solid angle for bin S={0:.2f} mJy less than 10% of total image. Removing bin.'.\
format(S) for S in S[np.where(bad_bins)]*1e3]
if self.verbose:
for line in output:
print line
df = df[~bad_bins]
if write:
if self.verbose:
print "Writing source counts to '{0}'.".format(counts_file)
df.to_csv(counts_file,index=False)
#otherwise simply read in source counts from file
else:
print "File '{0}' already exists. Reading source counts from this file.".format(counts_file)
df = pd.read_csv(counts_file)
#create a figure for the source counts
plt.close()
fig=plt.figure(**self.fig_size)
title = '{0} 1.4 GHz source counts'.format(self.cat.name,self.cat.freq[self.cat.name])
#write axes using unicode (for html) or LaTeX
if self.plot_to == 'html':
ylabel = u"log\u2081\u2080 S\u00B2\u22C5\u2075 dN/dS [Jy\u00B9\u22C5\u2075 sr\u207B\u00B9]"
xlabel = u"log\u2081\u2080 S [Jy]"
else:
ylabel = r"$\log_{10}$ S$^{2.5}$ dN/dS [Jy$^{1.5}$ sr$^{-1}$]"
xlabel = r"$\log_{10}$ S [Jy]"
#for html plots, add labels for the bin centre, count and area for every data point
labels = [u'S: {0:.2f} mJy, dN: {1:.0f}, Area: {2:.2f} deg\u00B2'.format(bin,count,area) for bin,count,area in zip(df['S']*1e3,df['dN'],df['area'])]
#derive the square of the residuals (chi squared), and their sum
#divided by the number of data points (reduced chi squared)
chi = ((df['logCounts']-f(df['logS']))/df['logErrDown'])**2
red_chi_sq = np.sum(chi)/len(df)
#store reduced chi squared value
self.sc_red_chi_sq = red_chi_sq
#Plot Norris+11 data
data = plt.errorbar(x,y,yerr=yerr,linestyle='none',marker='.',c='r')
line, = plt.plot(xlin,ylin,c='black',linestyle='--',zorder=5)
txt = ''
if self.plot_to == 'html':
txt += 'Data from <a href="http://adsabs.harvard.edu/abs/2011PASA...28..215N">Norris+11</a>'
txt += ' (updated from <a href="http://adsabs.harvard.edu/abs/2003AJ....125..465H">Hopkins+03</a>)\n'
txt += '$\chi^2_{red}$: %.2f' % red_chi_sq
#Legend labels for the Norris data and line, and the data
xlab = 'Norris+11'
leg_labels = [xlab,'{0}th degree polynomial fit to {1}'.format(deg,xlab),self.cat.name]
#write reduced chi squared to report table
self.html.write('</td><td>{0:.2f}<br>'.format(red_chi_sq))
#Plot data on top of Norris+11 data
self.plot(df['logS'],
y=df['logCounts'],
yerr=(df['logErrDown'],df['logErrUp']),
figure=fig,
title=title,
labels=labels,
xlabel=xlabel,
ylabel=ylabel,
axis_perc=0,
text=txt,
loc='br',
leg_labels=leg_labels,
handles=[data,line],
filename=filename,
redo=self.redo)
self.html.write("""</td>
</tr>
</table>""")
def x(self,x,y):
"""For given x and y data, return a line at y=x.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
The list of x values."""
return x,x
def y0(self,x,y):
"""For given x and y data, return a line at y=0.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
A list of zeros."""
return x,x*0
def y1(self,x,y):
"""For given x and y data, return a line at y=1.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
A list of ones."""
return x,[1]*len(x)
def x0(self,x,y):
"""For given x and y data, return a line at x=0.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
A list of zeros.
y : list-like
The same list of y values."""
return y*0,y
def ratio_err_max(self,SNR,ratio):
"""For given x and y data (flux ratio as a function of S/N), return the maximum uncertainty in flux ratio.
Arguments:
----------
SNR : list-like
A list of S/N ratios.
ratio : list-like
A list of flux ratios.
Returns:
--------
SNR : list-like
All S/N values > 0.
ratio : list-like
The maximum uncertainty in the flux ratio for S/N values > 0."""
return SNR[SNR > 0],1+3*np.sqrt(2)/SNR[SNR > 0]
def ratio_err_min(self,SNR,ratio):
"""For given x and y data (flux ratio as a function of S/N), return the minimum uncertainty in flux ratio.
Arguments:
----------
SNR : list-like
A list of S/N ratios.
ratio : list-like
A list of flux ratios.
Returns:
--------
SNR : list-like
All S/N values > 0.
ratio : list-like
The minimum uncertainty in the flux ratio for S/N values > 0."""
return SNR[SNR > 0],1-3*np.sqrt(2)/SNR[SNR > 0]
def axis_to_np(self,axis):
"""Return a numpy array of the non-nan data from the input axis.
Arguments:
----------
axis : string or numpy.array or pandas.Series or list
The data for a certain axis. String are interpreted as column names from catalogue object passed into constructor.
Returns:
--------
axis : numpy.array
All non-nan values of the data.
See Also
--------
numpy.array
pandas.Series"""
#convert input to numpy array
if type(axis) is str:
axis = self.cat.df[axis].values
elif axis is pd.Series:
axis = axis.values
return axis
def shared_indices(self,xaxis,yaxis=None,caxis=None):
"""Return a list of non-nan indices shared between all used axes.
Arguments:
----------
xaxis : string or numpy.array or pandas.Series or list
A list of the x axis data. String are interpreted as column names from catalogue object passed into constructor.
yaxis : string or numpy.array or pandas.Series or list
A list of the y axis data. String are interpreted as column names from catalogue object passed into constructor.
If this is None, yaxis and caxis will be ignored.
caxis : string or numpy.array or pandas.Series or list
A list of the colour axis data. String are interpreted as column names from catalogue object passed into constructor.
If this is None, caxis will be ignored.
Returns:
--------
x : list
The non-nan x data shared between all used axes.
y : list
The non-nan y data shared between all used axes. None returned if yaxis is None.
c : list
The non-nan colour data shared between all used axes. None returned if yaxis or caxis are None.
indices : list
The non-nan indices.
See Also
--------
numpy.array
pandas.Series"""
#convert each axis to numpy array (or leave as None)
x = self.axis_to_np(xaxis)
y = self.axis_to_np(yaxis)
c = self.axis_to_np(caxis)
#get all shared indices from used axes that aren't nan
if yaxis is None:
indices = np.where(~np.isnan(x))[0]
return x[indices],None,None,indices
elif caxis is None:
indices = np.where((~np.isnan(x)) & (~np.isnan(y)))[0]
return x[indices],y[indices],None,indices
else:
indices = np.where((~np.isnan(x)) & (~np.isnan(y)) & (~np.isnan(c)))[0]
return x[indices],y[indices],c[indices],indices
def plot(self, x, y=None, c=None, yerr=None, figure=None, arrows=None, line_funcs=None,
title='', labels=None, text=None, reverse_x=False,
xlabel='', ylabel='', clabel='', leg_labels='', handles=[], loc='bl',
ellipses=None, axis_perc=10, filename='screen', redo=False):
"""Create and write a scatter plot of the data from an input x axis, and optionally, a y and colour axis.
This function assumes shared_indices() has already been called and all input axes
are equal in length and the same data type.
Arguments:
----------
x : numpy.array
The data to plot on the x axis.
Keyword arguments:
------------------
y : numpy.array or pandas.Series
The data to plot on the y axis. Use None to plot a histogram.
c : numpy.array or pandas.Series
The data to plot as the colour axis. Use None for no colour axis.
yerr : numpy.array or pandas.Series
The data to plot as the uncertainty on the y axis. Use None for no uncertainties.
figure : pyplot.figure
Use this matplotlib figure object.
arrows : tuple
A 2-element tuple with the lengths of the arrows to plot at x and y (usually a list) - i.e. (dx[],dy[])
line_funcs : list-like
A list of functions for drawing lines (e.g. [self.x0(), self.y1()]).
title : string
The title of the plot.
lables : list
A list of string labels to give each data point. Length must be the same as all used axes.
text : string
Annotate this text on the figure (written to bottom of page for html figures).
reverse_x : bool
Reverse the x-axis?
xlabel : string
The label of the x axis.
ylabel : string
The label of the y axis.
clabel : string
The label of the colour axis.
leg_labels : list
A list of labels to include as a legend.
handles : list
A list of pre-defined handles associated the legend labels.
loc : string
Location of the annotated text (not used for html plots). Options are 'bl', 'br', 'tl' and 'tr'.
ellipses : list of matplotlib.patches.Ellipse objects
Draw these ellipses on the figure.
axis_perc : float
The percentage beyond which to calculate the axis limits. Use 0 for no limits.
filename : string
Write the plot to this file name. If string contains 'html', file will be written to html using mpld3.
If it is 'screen', it will be shown on screen. Otherwise, it will attempt to write an image file.
redo: bool
Produce this plot and write it, even if the file already exists.
See Also
--------
numpy.array
pandas.Series
matplotlib.patches.Ellipse"""
#only write figure if user wants it
if self.write:
#derive name of thumbnail file
thumb = '{0}_thumb.png'.format(filename[:-1-len(self.plot_to)])
#don't produce plot if file exists and user didn't specify to re-do
if os.path.exists(filename) and not redo:
if self.verbose:
print 'File already exists. Skipping plot.'
else:
#open html file for plot
if 'html' in filename:
html_fig = open(filename,'w')
#use figure passed in or create new one
if figure is not None:
fig = figure
else:
fig = plt.figure(**self.fig_size)
ax = plt.subplot(111)
norm = None
#plot histogram
if y is None:
edges = np.linspace(-3,2,11) #specific to spectral index
err_data = ax.hist(x,bins=edges)
#plot scatter of data points with fixed colour
elif c is None:
markers = self.markers.copy()
markers.pop('s')
#hack to display markers in legend (https://github.com/mpld3/mpld3/issues/290)
#have to use ax.plot() since ax.scatter() has problems (https://github.com/mpld3/mpld3/issues/227)
#hack to display html labels when line or ellipse overlaid
ax.plot(x,y,'o',zorder=20,alpha=0.0,**markers)
data, = ax.plot(x,y,'o',**markers)
handles.append(data)
#plot scatter of data points with colour axis
else:
#normalise the colour bar so each bin contains equal number of data points
norm = colors.BoundaryNorm(np.percentile(c,np.linspace(0,100,self.cmap.N+1)),self.cmap.N)
data = ax.scatter(x,y,c=c,cmap=self.cmap,norm=norm,**self.colour_markers)
cbar = plt.colorbar(data)
cbar.ax.tick_params(**self.label_size)
cbar.set_label(clabel,**self.fig_font)
data = ax.scatter(x,y,c=c,cmap=self.cmap,zorder=20,alpha=0.0,norm=norm,**self.colour_markers) #same hack as above
#plot error bars and add to list of handles
if yerr is not None:
err_data = ax.errorbar(x, y, yerr=yerr, zorder=4,
linestyle='none', marker=self.markers['marker'],
color=self.markers['color'])
handles.append(err_data)
#set default min and max axis limits, which may change
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
#derive limits of x and y axes axis_perc % beyond their current limit
if axis_perc > 0:
xmin = axis_lim(x, min, perc=axis_perc)
xmax = axis_lim(x, max, perc=axis_perc)
ymin = axis_lim(y, min, perc=axis_perc)
ymax = axis_lim(y, max, perc=axis_perc)
#plot each line according to the input functions
if line_funcs is not None:
xlin = np.linspace(xmin, xmax, num=1000)
ylin = np.linspace(ymin, ymax, num=1000)
for func in line_funcs:
xline,yline = func(xlin, ylin)
_ = plt.plot(xline,yline, lw=2, color='black', linestyle='-', zorder=12)
#doing this here forces the lines in html plots to not increase the axis limits
if reverse_x:
plt.xlim(xmax,xmin)
else:
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
#overlay the title and labels according to given fonts and sizes
plt.tick_params(**self.label_size)
plt.title(title,**self.fig_font)
plt.xlabel(xlabel,**self.fig_font)
plt.ylabel(ylabel,**self.fig_font)
#overlay arrows on each data point
if arrows is not None:
if not (type(arrows) is tuple and len(arrows) == 2):
warnings.warn_explicit('Arrows not formatted correctly for plt.quiver(). Input a 2-element tuple.\n',UserWarning,WARN,cf.f_lineno)
elif c is None:
plt.quiver(x,y,arrows[0],arrows[1],units='x',**self.arrows)
else:
plt.quiver(x,y,arrows[0],arrows[1],c,units='x',cmap=self.cmap,norm=norm,**self.arrows)
#annotate input text
if text is not None and 'html' not in filename:
#write to given location on plot
kwargs = self.fig_font.copy()
if loc == 'tl':
args = (xmin,ymax,text)
kwargs.update({'horizontalalignment' : 'left', 'verticalalignment' : 'top'})
elif loc == 'tr':
args = (xmax,ymax,text)
kwargs.update({'horizontalalignment' : 'right', 'verticalalignment' : 'top'})
elif loc == 'br':
args = (xmax,ymin,text)
kwargs.update({'horizontalalignment' : 'right', 'verticalalignment' : 'bottom'})
else:
args = (xmin,ymin,text)
kwargs.update({'horizontalalignment' : 'left', 'verticalalignment' : 'bottom'})
plt.text(*args,**kwargs)
#write a legend
if len(leg_labels) > 0:
plt.legend(handles,leg_labels,fontsize=self.fig_font['fontsize']//1.5)
#BELOW NOT CURRENTLY WORKING WELL
#if 'html' in filename:
#interactive_legend = plugins.InteractiveLegendPlugin(handles,leg_labels)
#plugins.connect(fig, interactive_legend)
#overlay ellipses on figure
if ellipses is not None:
for e in ellipses:
ax.add_patch(e)
if self.verbose:
print "Writing figure to '{0}'.".format(filename)
#write thumbnail of this figure
if filename != 'screen':
plt.savefig(thumb)
image.thumbnail(thumb,thumb,scale=0.05)
#write html figure
if 'html' in filename:
#include label for every datapoint
if labels is not None:
tooltip = plugins.PointHTMLTooltip(data, labels=labels)
plugins.connect(fig, tooltip)
#print coordinates of mouse as it moves across figure
plugins.connect(fig, plugins.MousePosition(fontsize=self.fig_font['fontsize']))
html_fig.write(mpld3.fig_to_html(fig))
#write annotations to end of html file if user wants html plots
if text is not None:
html_fig.write(self.text_to_html(text))
#otherwise show figure on screen
elif filename == 'screen':
plt.show()
#otherwise write with given extension
else:
plt.savefig(filename)
#Add link and thumbnail to html report table
self.html.write(self.add_html_link(filename,thumb))
plt.close()
def validate(self, name1, name2, redo=False):
"""Produce a validation report between two catalogues, and optionally produce plots.
Arguments:
----------
name1 : string
The dictionary key / name of a catalogue from the main catalogue object used to compare other data.
name2 : string
The dictionary key / name of a catalogue from the main catalogue object used as a comparison.
Keyword Arguments:
------------------
redo: bool
Produce this plot and write it, even if the file already exists.
Returns:
--------
ratio_med : float
The median flux density ratio. -1 if this is not derived.
sep_med : float
The median sky separation between the two catalogues.
alpha_med : float
The median spectral index. -1 if this is not derived."""
print 'Validating {0} with {1}...'.format(name1,name2)
filename = 'screen'
#write survey and number of matched to cross-matches report table
self.html.write("""<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}""".format(name2,
self.cat.freq[name2],
self.cat.count[name2]))
#plot the positional offsets
fig = plt.figure(**self.fig_size)
title = u"{0} \u2014 {1} positional offsets".format(name1,name2)
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_astrometry.{3}'.format(self.figDir,name1,name2,self.plot_to)
#compute the S/N and its log based on main catalogue
if name1 in self.cat.flux.keys():
self.cat.df['SNR'] = self.cat.flux[name1] / self.cat.flux_err[name1]
self.cat.df['logSNR'] = np.log10(self.cat.df['SNR'])
caxis = 'logSNR'
else:
caxis = None
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.dRA[name2],yaxis=self.cat.dDEC[name2],caxis=caxis)
#derive the statistics of x and y and store in string to annotate on figure
dRAmed,dRAmean,dRAstd,dRAerr,dRAmad = get_stats(x)
dDECmed,dDECmean,dDECstd,dDECerr,dDECmad = get_stats(y)
txt = '$\widetilde{\Delta RA}$: %.2f\n' % dRAmed
txt += '$\overline{\Delta RA}$: %.2f\n' % dRAmean
txt += '$\sigma_{\Delta RA}$: %.2f\n' % dRAstd
txt += '$\sigma_{\overline{\Delta RA}}$: %.2f\n' % dRAerr
txt += '$\widetilde{\Delta DEC}$: %.2f\n' % dDECmed
txt += '$\overline{\Delta DEC}$: %.2f\n' % dDECmean
txt += '$\sigma_{\Delta DEC}$: %.2f\n' % dDECstd
txt += '$\sigma_{\overline{\Delta DEC}}$: %.2f' % dDECerr
#create an ellipse at the position of the median with axes of standard deviation
e1 = Ellipse((dRAmed,dDECmed),width=dRAstd,height=dDECstd,color='black',
fill=False,linewidth=3,zorder=10,alpha=0.9)
#force axis limits of the search radius
radius = max(self.cat.radius[name1],self.cat.radius[name2])
plt.axis('equal')
plt.xlim(-radius,radius)
plt.ylim(-radius,radius)
#create an ellipse at 0,0 with width 2 x search radius
e2 = Ellipse((0,0),width=radius*2,height=radius*2,color='grey',fill=False,
linewidth=3,linestyle='--',zorder=1,alpha=0.9)
#format labels according to destination of figure
if self.plot_to == 'html':
xlabel = u'\u0394RA (arcsec)'
ylabel = u'\u0394DEC (arcsec)'
clabel = u'log\u2081\u2080 S/N'
else:
xlabel = '$\Delta$RA (arcsec)'
ylabel = '$\Delta$DEC (arcsec)'
clabel = r'$\log_{10}$ S/N'
#for html plots, add S/N and separation labels for every data point
if caxis is not None:
labels = ['S/N = {0:.2f}, separation = {1:.2f}\"'.format(cval,totSep)\
for cval,totSep in zip(self.cat.df.loc[indices,'SNR'],self.cat.sep[name2][indices])]
else:
labels = ['Separation = {0:.2f}\"'.format(cval) for cval in self.cat.sep[name2][indices]]
#get median separation in arcsec
c1 = SkyCoord(ra=0,dec=0,unit='arcsec,arcsec')
c2 = SkyCoord(ra=dRAmed,dec=dDECmed,unit='arcsec,arcsec')
sep_med = c1.separation(c2).arcsec
#get mad of separation in arcsec
c1 = SkyCoord(ra=0,dec=0,unit='arcsec,arcsec')
c2 = SkyCoord(ra=dRAmad,dec=dDECmad,unit='arcsec,arcsec')
sep_mad = c1.separation(c2).arcsec
#write the dRA and dDEC to html table
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f} (RA)<br>{2:.2f} ± {3:.2f} (Dec)<br>""".\
format(dRAmed,dRAmad,dDECmed,dDECmad))
#plot the positional offsets
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=(self.x0,self.y0),
title=title,
xlabel=xlabel,
ylabel=ylabel,
clabel=clabel,
text=txt,
ellipses=(e1,e2),
axis_perc=0,
loc='tr',
filename=filename,
labels=labels,
redo=redo)
#plot the positional offsets across the sky
title += " by sky position"
xlabel = 'RA (deg)'
ylabel = 'DEC (deg)'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_astrometry_sky.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.ra[name2],yaxis=self.cat.dec[name2],caxis=caxis)
#for html plots, add S/N and separation labels for every data point
if caxis is not None:
labels = [u'S/N = {0:.2f}, \u0394RA = {1:.2f}\", \u0394DEC = {2:.2f}\"'.format(cval,dra,ddec) for cval,dra,ddec\
in zip(self.cat.df.loc[indices,'SNR'],self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices])]
else:
labels = [u'\u0394RA = {0:.2f}\", \u0394DEC = {1:.2f}\"'.format(dra,ddec) for dra,ddec\
in zip(self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices])]
#plot the positional offsets across the sky
self.plot(x,
y=y,
c=c,
title=title,
xlabel=xlabel,
ylabel=ylabel,
reverse_x=True,
arrows=(self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices]),
clabel=clabel,
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#derive column names and check if they exist
freq = int(round(self.cat.freq[name1]))
fitted_flux_col = '{0}_extrapolated_{1}MHz_flux'.format(name2,freq)
fitted_ratio_col = '{0}_extrapolated_{1}MHz_{2}_flux_ratio'.format(name2,freq,name1)
ratio_col = '{0}_{1}_flux_ratio'.format(name2,name1)
#only plot flux ratio if it was derived
if ratio_col not in self.cat.df.columns and (fitted_ratio_col not in self.cat.df.columns or np.all(np.isnan(self.cat.df[fitted_ratio_col]))):
print "Can't plot flux ratio since you haven't derived the fitted flux density at this frequency."
ratio_med = -111
ratio_mad = -111
flux_ratio_type = ''
self.html.write('<td>')
else:
#compute flux ratio based on which one exists and rename variable for figure title
if ratio_col in self.cat.df.columns:
ratio = self.cat.df[ratio_col]
flux_ratio_type = name2
elif fitted_ratio_col in self.cat.df.columns:
ratio = self.cat.df[fitted_ratio_col]
flux_ratio_type = '{0}-extrapolated'.format(name2)
logRatio = np.log10(ratio)
#plot the flux ratio as a function of S/N
fig = plt.figure(**self.fig_size)
title = "{0} / {1} flux ratio".format(name1,flux_ratio_type)
xlabel = 'S/N'
ylabel = 'Flux Density Ratio'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_ratio.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices('SNR',yaxis=ratio)#,caxis=self.cat.dec[name1])
plt.loglog()
plt.gca().grid(b=True, which='minor', color='w', linewidth=0.5)
#derive the ratio statistics and store in string to append to plot
ratio_med,ratio_mean,ratio_std,ratio_err,ratio_mad = get_stats(y)
txt = '$\widetilde{Ratio}$: %.2f\n' % ratio_med
txt += '$\overline{Ratio}$: %.2f\n' % ratio_mean
txt += '$\sigma_{Ratio}$: %.2f\n' % ratio_std
txt += '$\sigma_{\overline{Ratio}}$: %.2f' % ratio_err
#for html plots, add flux labels for every data point
if flux_ratio_type == name2:
labels = ['{0} flux = {1:.2f} mJy, {2} flux = {3:.2f} mJy'.format(name1,flux1,name2,flux2)\
for flux1,flux2 in zip(self.cat.flux[name1][indices]*1e3,self.cat.flux[name2][indices]*1e3)]
else:
labels = ['{0} flux = {1:.2f} mJy, {2} flux = {3:.2f} mJy'.format(name1,flux1,flux_ratio_type,flux2)\
for flux1,flux2 in zip(self.cat.flux[name1][indices]*1e3,self.cat.df[fitted_flux_col][indices]*1e3)]
#write the ratio to html report table
if flux_ratio_type == name2:
type = 'measured'
else:
type = 'extrapolated'
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f} ({2})<br>""".format(ratio_med,ratio_mad,type))
#plot the flux ratio as a function of S/N
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=(self.y1,self.ratio_err_min,self.ratio_err_max),
title=title,
xlabel=xlabel,
ylabel=ylabel,
text=txt,
loc='tr',
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#plot the flux ratio across the sky
fig = plt.figure(**self.fig_size)
title += " by sky position"
xlabel = 'RA (deg)'
ylabel = 'DEC (deg)'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_ratio_sky.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.ra[name2],yaxis=self.cat.dec[name2],caxis=logRatio)
#format labels according to destination of figure
if self.plot_to == 'html':
clabel = u'log\u2081\u2080 Flux Ratio'
else:
clabel = r'$\log_{10}$ Flux Ratio'
#for html plots, add flux ratio labels for every data point
labels = [u'{0} = {1:.2f}'.format('Flux Ratio',cval) for cval in ratio[indices]]
#plot the flux ratio across the sky
self.plot(x,
y=y,
c=c,
figure=fig,
title=title,
xlabel=xlabel,
ylabel=ylabel,
clabel=clabel,
reverse_x=True,
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#derive spectral index column name and check if exists
si_column = '{0}_{1}_alpha'.format(name1,name2)
if not si_column in self.cat.df.columns:
print "Can't plot spectral index between {0} and {1}, since it was not derived.".format(name1,name2)
alpha_med = -111 #null flag
self.html.write('<td>')
else:
#plot the spectral index
fig = plt.figure(**self.fig_size)
plt.xlim(-3,2)
title = "{0}-{1} Spectral Index".format(name1,name2)
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_spectal_index.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(si_column)
#format labels according to destination of figure
freq1 = int(round(min(self.cat.freq[name1],self.cat.freq[name2])))
freq2 = int(round(max(self.cat.freq[name1],self.cat.freq[name2])))
if self.plot_to == 'html':
xlabel = u'\u03B1 [{0}-{1} MHz]'.format(freq1,freq2)
else:
xlabel = r'$\alpha_{%s}^{%s}$' % (freq1,freq2)
#derive the statistics of x and store in string
alpha_med,alpha_mean,alpha_std,alpha_err,alpha_mad = get_stats(x)
txt = '$\widetilde{\\alpha}$: %.2f\n' % alpha_med
txt += '$\overline{\\alpha}$: %.2f\n' % alpha_mean
txt += '$\sigma_{\\alpha}$: %.2f\n' % alpha_std
txt += '$\sigma_{\overline{\\alpha}}$: %.2f' % alpha_err
#write the ratio to html report table
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f}<br>""".format(alpha_med,alpha_mad))
#plot the spectral index
self.plot(x,
figure=fig,
title=title,
xlabel=xlabel,
ylabel='N',
axis_perc=0,
filename=filename,
text=txt,
loc='tl',
redo=redo)
#write the end of the html report table row
self.html.write("""</td>
</tr>""")
alpha_med = self.cat.med_si
alpha_type = '{0}'.format(name1)
#create dictionary of validation metrics and where they come from
metric_val = { 'Flux Ratio' : ratio_med,
'Flux Ratio Uncertainty' : ratio_mad,
'RA Offset' : dRAmed,
'DEC Offset' : dDECmed,
'Positional Offset' : sep_med,
'Positional Offset Uncertainty' : sep_mad,
'Spectral Index' : alpha_med}
metric_source = {'Flux Ratio' : flux_ratio_type,
'Flux Ratio Uncertainty' : flux_ratio_type,
'RA Offset' : name2,
'DEC Offset' : name2,
'Positional Offset' : name2,
'Positional Offset Uncertainty' : name2,
'Spectral Index' : alpha_type}
count = self.cat.count[name2]
#overwrite values if they are valid and come from a larger catalogue
for key in metric_val.keys():
if count > self.metric_count[key] and metric_val[key] != -111:
self.metric_count[key] = count
self.metric_val[key] = metric_val[key]
self.metric_source[key] = metric_source[key]
| 69,888 | 39.63314 | 156 | py |
dataqa | dataqa-master/continuum/validation_tool/dynamic_range.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 14:09:37 2019
@author: AK
"""
import pandas as pd
import astropy.io.fits as fits
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astropy.wcs import WCS
import numpy as np
import matplotlib.pyplot as plt
def get_image_center_beam(image):
fts = fts = fits.open(image)[0]
center = SkyCoord(ra=fts.header['CRVAL1'], dec=fts.header['CRVAL2'], unit='deg,deg')
# TODO: is the BPA in degrees?
beam = Angle([fts.header['BMAJ'], fts.header['BMIN'], fts.header['BPA']], unit=u.deg)
return center, beam
def sources_within_radius(pybdsfcat, image, radius=30):
"""
The DataFarame of sources within the given radius (arcmin) from the image center
based on pybdsf catalog
"""
a = pd.read_csv(pybdsfcat, skip_blank_lines=True, skiprows=5, skipinitialspace=True)
center, _ = get_image_center_beam(image)
coords = SkyCoord(ra=a.RA, dec=a.DEC, unit='deg')
seps = center.separation(coords).to('arcmin').value
a['Center_sep'] = seps
res = a.query('Center_sep < @radius')
return res
def source_dynamic_range(pybdsfcat, image, radius=30):
"""
Get the highest dynamic range for sources within 1/4 beam radius
based on pybdsf catalog (Peak_flux / Resid_Isl_rms)
"""
d = sources_within_radius(pybdsfcat, image, radius=radius)
# take 10 brightest sources:
d = d.sort_values('Peak_flux', ascending=False)[:5]
dr = d.Peak_flux/d.Resid_Isl_rms
return dr.min(), dr.max()
def local_dynamic_range(pybdsfcat, resimage, radius=30, box=50):
"""
Get the highest peak-to-artefact ratio in box of +-@box pixels
for sources within the @radius of the center
based on pybdsf catalog and residual image
"""
d = sources_within_radius(pybdsfcat, resimage, radius=radius)
# take 10 brightest sources:
d = d.sort_values('Peak_flux', ascending=False)[:5]
center, beam = get_image_center_beam(resimage)
fts = fits.open(resimage)[0]
data = fts.data
wcs = WCS(fts.header).celestial
data = data[0,0,:,:]
res = []
for ra, dec, peak in zip(d.RA, d.DEC, d.Peak_flux):
pxra, pxdec = wcs.wcs_world2pix([[ra, dec]], 1)[0]
boxdata = data[int(pxdec-box):int(pxdec+box), int(pxra-box):int(pxra+box)]
res.append(peak/np.max(abs(boxdata)))
return min(res), max(res)
if __name__ == "__main__":
cat = '/home/kutkin/tmp/tmp_validation/image_mf_00_pybdsf_comp.csv'
resimage = '/home/kutkin/tmp/tmp_validation/image_mf_00_pybdsf_gaus_resid.fits'
dr1 = source_dynamic_range(cat, resimage)
print dr1
dr2 = local_dynamic_range(cat, resimage)
print dr2
# d.hist(range=[0,500], bins=30)
| 2,743 | 29.153846 | 89 | py |
dataqa | dataqa-master/continuum/validation_tool/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.