prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
from sktime.transformations.panel.rocket import MiniRocket as MiniRKT
from sktime.classification.shapelet_based import MrSEQLClassifier
from convst.utils import load_sktime_arff_file_resample_id, return_all_dataset_names, UCR_stratified_resample
from convst.transformers import ConvolutionalShapeletTransformer
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_validate
from sklearn.metrics import f1_score, make_scorer
from sklearn.metrics import accuracy_score
from wildboar.ensemble import ShapeletForestClassifier
from numba import set_num_threads
#Can use this to resume to last dataset if a problem occured
resume = False
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Modify this to your path to the UCR resamples, check README for how to get them.
# Another splitter is also provided in dataset_utils to make random resamples
base_UCR_resamples_path = r"/home/prof/guillaume/Shapelets/resamples/"
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print("Imports OK")
#n_cv = 1 to test on original train test split.
n_cv=30
n_trees=200
max_ft=1.0
P=80
n_bins=11
random_state = None
run_RKT = True
run_CST = True
run_MrSEQL = True
run_SFC = True
#Machine parameters, to change with yours.
available_memory_bytes = 60 * 1e9
n_cores = 32
def get_n_jobs_n_threads(nbytes, size_mult=3000):
nbytes *= size_mult
n_jobs = min(max(available_memory_bytes//nbytes,1),n_cv//2)
n_threads = min(max(n_cores//n_jobs,1),n_cores//2)
return int(n_jobs), int(n_threads)
csv_name = 'CV_{}_results_({},{})_{}_{}.csv'.format(
n_cv, n_trees, max_ft, n_bins, P)
dataset_names = return_all_dataset_names()
if resume:
df = pd.read_csv(csv_name)
df = df.set_index('Unnamed: 0')
df = df.drop(df.index[np.where(~df.index.isin(dataset_names))[0]], axis=0)
df.to_csv(csv_name)
else:
df = pd.DataFrame(index=dataset_names)
df['CST_f1_mean'] = pd.Series(0, index=df.index)
df['CST_f1_std'] = pd.Series(0, index=df.index)
df['CST_acc_mean'] = pd.Series(0, index=df.index)
df['CST_acc_std'] = pd.Series(0, index=df.index)
df['CST_runtime'] = pd.Series(0, index=df.index)
df['MiniRKT_f1_mean'] = | pd.Series(0, index=df.index) | pandas.Series |
from os import listdir
from os.path import isfile, join
import Orange
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from parameters import order, alphas, regression_measures, datasets, rank_dir, output_dir, graphics_dir, result_dir
from regression_algorithms import regression_list
results_dir = './../results/'
class Performance:
def __init__(self):
pass
def average_results(self, rfile, release):
'''
Calculates average results
:param rfile: filename with results
:param kind: biclass or multiclass
:return: avarege_results in another file
'''
df = pd.read_csv(rfile)
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER',
'ALPHA', 'R2score', 'MAE', 'MSE', 'MAX'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'R2score'] = group['R2score'].mean()
dfr.at[i, 'MAE'] = group['MAE'].mean()
dfr.at[i, 'MSE'] = group['MSE'].mean()
dfr.at[i, 'MAX'] = group['MAX'].mean()
i = i + 1
print('Total lines in a file: ', i)
dfr.to_csv(results_dir + 'regression_average_results_' + str(release) + '.csv', index=False)
def run_rank_choose_parameters(self, filename, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
for o in order:
for a in alphas:
GEOMETRY = '_dto_smoter_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto])
self.rank_by_algorithm(df, o, str(a), release)
self.rank_dto_by(o + '_' + str(a), release)
def rank_by_algorithm(self, df, order, alpha, release, smote=False):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param GEOMETRY:
:return:
'''
df_table = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE', 'DTO', 'RANK_DTO', 'GEOMETRY',
'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
if smote == False:
df.to_csv(rank_dir + release + '_' + order + '_' + str(alpha) + '.csv', index=False)
else:
df.to_csv(rank_dir + release + '_smote_' + order + '_' + str(alpha) + '.csv', index=False)
j = 0
measures = regression_measures
for d in datasets:
for m in measures:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_table.at[j, 'DATASET'] = d
df_table.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_table.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_table.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_table.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_table.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_table.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_table.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_table.at[j, 'DTO'] = aux.at[indice, m]
df_table.at[j, 'GEOMETRY'] = order
df_table.at[j, 'ALPHA'] = alpha
df_table.at[j, 'unit'] = m
j += 1
df_r2 = df_table[df_table['unit'] == 'R2score']
df_mae = df_table[df_table['unit'] == 'MAE']
df_mse = df_table[df_table['unit'] == 'MSE']
df_max = df_table[df_table['unit'] == 'MAX']
r2 = df_r2[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mae = df_mae[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mse = df_mse[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
max = df_max[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
r2 = r2.reset_index()
r2.drop('index', axis=1, inplace=True)
mae = mae.reset_index()
mae.drop('index', axis=1, inplace=True)
mse = mse.reset_index()
mse.drop('index', axis=1, inplace=True)
max = max.reset_index()
max.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
r2_rank = r2.rank(axis=1, ascending=False)
mae_rank = mae.rank(axis=1, ascending=True)
mse_rank = mse.rank(axis=1, ascending=True)
max_rank = max.rank(axis=1, ascending=True)
df_r2 = df_r2.reset_index()
df_r2.drop('index', axis=1, inplace=True)
df_r2['RANK_ORIGINAL'] = r2_rank['ORIGINAL']
df_r2['RANK_SMOTE'] = r2_rank['SMOTE']
df_r2['RANK_SMOTE_SVM'] = r2_rank['SMOTE_SVM']
df_r2['RANK_BORDERLINE1'] = r2_rank['BORDERLINE1']
df_r2['RANK_BORDERLINE2'] = r2_rank['BORDERLINE2']
df_r2['RANK_GEOMETRIC_SMOTE'] = r2_rank['GEOMETRIC_SMOTE']
df_r2['RANK_DTO'] = r2_rank['DTO']
df_mae = df_mae.reset_index()
df_mae.drop('index', axis=1, inplace=True)
df_mae['RANK_ORIGINAL'] = mae_rank['ORIGINAL']
df_mae['RANK_SMOTE'] = mae_rank['SMOTE']
df_mae['RANK_SMOTE_SVM'] = mae_rank['SMOTE_SVM']
df_mae['RANK_BORDERLINE1'] = mae_rank['BORDERLINE1']
df_mae['RANK_BORDERLINE2'] = mae_rank['BORDERLINE2']
df_mae['RANK_GEOMETRIC_SMOTE'] = mae_rank['GEOMETRIC_SMOTE']
df_mae['RANK_DTO'] = mae_rank['DTO']
df_mse = df_mse.reset_index()
df_mse.drop('index', axis=1, inplace=True)
df_mse['RANK_ORIGINAL'] = mse_rank['ORIGINAL']
df_mse['RANK_SMOTE'] = mse_rank['SMOTE']
df_mse['RANK_SMOTE_SVM'] = mse_rank['SMOTE_SVM']
df_mse['RANK_BORDERLINE1'] = mse_rank['BORDERLINE1']
df_mse['RANK_BORDERLINE2'] = mse_rank['BORDERLINE2']
df_mse['RANK_GEOMETRIC_SMOTE'] = mse_rank['GEOMETRIC_SMOTE']
df_mse['RANK_DTO'] = mse_rank['DTO']
df_max = df_max.reset_index()
df_max.drop('index', axis=1, inplace=True)
df_max['RANK_ORIGINAL'] = max_rank['ORIGINAL']
df_max['RANK_SMOTE'] = max_rank['SMOTE']
df_max['RANK_SMOTE_SVM'] = max_rank['SMOTE_SVM']
df_max['RANK_BORDERLINE1'] = max_rank['BORDERLINE1']
df_max['RANK_BORDERLINE2'] = max_rank['BORDERLINE2']
df_max['RANK_GEOMETRIC_SMOTE'] = max_rank['GEOMETRIC_SMOTE']
df_max['RANK_DTO'] = max_rank['DTO']
# avarege rank
media_r2_rank = r2_rank.mean(axis=0)
media_mae_rank = mae_rank.mean(axis=0)
media_mse_rank = mse_rank.mean(axis=0)
media_max_rank = max_rank.mean(axis=0)
media_r2_rank_file = media_r2_rank.reset_index()
media_r2_rank_file = media_r2_rank_file.sort_values(by=0)
media_mae_rank_file = media_mae_rank.reset_index()
media_mae_rank_file = media_mae_rank_file.sort_values(by=0)
media_mse_rank_file = media_mse_rank.reset_index()
media_mse_rank_file = media_mse_rank_file.sort_values(by=0)
media_max_rank_file = media_max_rank.reset_index()
media_max_rank_file = media_max_rank_file.sort_values(by=0)
if smote == False:
# Grava arquivos importantes
df_r2.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
df_mae.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
df_mse.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
df_max.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_' + 'media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_max_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
'DTO']
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_r2.pdf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mae.pdf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mse.pdf')
plt.close()
avranks = list(media_max_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_max.pdf')
plt.close()
print('Delaunay Type= ', GEOMETRY)
print('Algorithm= ', name)
else:
# Grava arquivos importantes
df_r2.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
df_mae.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
df_mse.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
df_max.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_smote_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_max_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
GEOMETRY]
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_max_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_f1.pdf')
plt.close()
print('SMOTE Delaunay Type= ', GEOMETRY)
print('SMOTE Algorithm= ', name)
def rank_dto_by(self, geometry, release, smote=False):
M = ['_r2.csv', '_mae.csv', '_mse.csv', '_max.csv']
df_media_rank = pd.DataFrame(columns=['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE',
'RANK_SMOTE_SVM', 'RANK_BORDERLINE1', 'RANK_BORDERLINE2',
'RANK_GEOMETRIC_SMOTE', 'RANK_DTO', 'unit'])
if smote == False:
name = rank_dir + release + '_total_rank_' + geometry + '_'
else:
name = rank_dir + release + '_smote_total_rank_' + geometry + '_'
for m in M:
i = 0
for c in regression_list:
df = pd.read_csv(name + c + m)
rank_original = df.RANK_ORIGINAL.mean()
rank_smote = df.RANK_SMOTE.mean()
rank_smote_svm = df.RANK_SMOTE_SVM.mean()
rank_b1 = df.RANK_BORDERLINE1.mean()
rank_b2 = df.RANK_BORDERLINE2.mean()
rank_geo_smote = df.RANK_GEOMETRIC_SMOTE.mean()
rank_dto = df.RANK_DTO.mean()
df_media_rank.loc[i, 'ALGORITHM'] = df.loc[0, 'ALGORITHM']
df_media_rank.loc[i, 'RANK_ORIGINAL'] = rank_original
df_media_rank.loc[i, 'RANK_SMOTE'] = rank_smote
df_media_rank.loc[i, 'RANK_SMOTE_SVM'] = rank_smote_svm
df_media_rank.loc[i, 'RANK_BORDERLINE1'] = rank_b1
df_media_rank.loc[i, 'RANK_BORDERLINE2'] = rank_b2
df_media_rank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = rank_geo_smote
df_media_rank.loc[i, 'RANK_DTO'] = rank_dto
df_media_rank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank = df_media_rank.copy()
dfmediarank = dfmediarank.sort_values('RANK_DTO')
dfmediarank.loc[i, 'ALGORITHM'] = 'avarage'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].mean()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].mean()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_DTO'] = df_media_rank['RANK_DTO'].mean()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank.loc[i, 'ALGORITHM'] = 'std'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].std()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].std()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].std()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].std()
dfmediarank.loc[i, 'RANK_DTO'] = df_media_rank['RANK_DTO'].std()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
dfmediarank['RANK_ORIGINAL'] = pd.to_numeric(dfmediarank['RANK_ORIGINAL'], downcast="float").round(2)
dfmediarank['RANK_SMOTE'] = pd.to_numeric(dfmediarank['RANK_SMOTE'], downcast="float").round(2)
dfmediarank['RANK_SMOTE_SVM'] = pd.to_numeric(dfmediarank['RANK_SMOTE_SVM'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE1'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE1'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE2'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE2'], downcast="float").round(2)
dfmediarank['RANK_GEOMETRIC_SMOTE'] = pd.to_numeric(dfmediarank['RANK_GEOMETRIC_SMOTE'],
downcast="float").round(2)
dfmediarank['RANK_DTO'] = pd.to_numeric(dfmediarank['RANK_DTO'], downcast="float").round(2)
if smote == False:
dfmediarank.to_csv(output_dir + release + '_results_media_rank_' + geometry + m,
index=False)
else:
dfmediarank.to_csv(output_dir + release + '_smote_results_media_rank_' + geometry + m,
index=False)
def grafico_variacao_alpha(self, release):
M = ['_r2', '_mae', '_mse', '_max']
df_alpha_variations_rank = pd.DataFrame()
df_alpha_variations_rank['alphas'] = alphas
df_alpha_variations_rank.index = alphas
df_alpha_all = pd.DataFrame()
df_alpha_all['alphas'] = alphas
df_alpha_all.index = alphas
for m in M:
for o in order:
for a in alphas:
filename = output_dir + release + '_results_media_rank_' + o + '_' + str(
a) + m + '.csv'
print(filename)
df = pd.read_csv(filename)
mean = df.loc[8, 'RANK_DTO']
df_alpha_variations_rank.loc[a, 'AVARAGE_RANK'] = mean
if m == '_r2':
measure = 'R2'
if m == '_mae':
measure = 'MAE'
if m == '_mse':
measure = 'MSE'
if m == '_max':
measure = 'MAX'
df_alpha_all[o + '_' + measure] = df_alpha_variations_rank['AVARAGE_RANK'].copy()
fig, ax = plt.subplots()
ax.set_title('DTO AVARAGE RANK\n ' + 'GEOMETRY = ' + o + '\nMEASURE = ' + measure, fontsize=10)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
ax.plot(df_alpha_variations_rank['AVARAGE_RANK'], marker='d', label='Avarage Rank')
ax.legend(loc="upper right")
plt.xticks(range(11))
fig.savefig(graphics_dir + release + '_pic_' + o + '_' + measure + '.png', dpi=125)
plt.show()
plt.close()
# figure(num=None, figsize=(10, 10), dpi=800, facecolor='w', edgecolor='k')
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = R2', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_R2']
ft2 = df_alpha_all['min_solid_angle_R2']
ft3 = df_alpha_all['solid_angle_R2']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_r2.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_r2.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MAE']
ft2 = df_alpha_all['min_solid_angle_MAE']
ft3 = df_alpha_all['solid_angle_MAE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_mae.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_mae.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MSE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MSE']
ft2 = df_alpha_all['min_solid_angle_MSE']
ft3 = df_alpha_all['solid_angle_MSE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_mse.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_mse.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAX', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MAX']
ft2 = df_alpha_all['min_solid_angle_MAX']
ft3 = df_alpha_all['solid_angle_MAX']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_max.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_max.csv', index=False)
def best_alpha(self, kind):
# Best alpha calculation
# GEO
df1 = pd.read_csv(output_dir + 'v1' + '_pic_all_geo.csv')
df2 = pd.read_csv(output_dir + 'v2' + '_pic_all_geo.csv')
df3 = pd.read_csv(output_dir + 'v3' + '_pic_all_geo.csv')
if kind == 'biclass':
col = ['area_GEO', 'volume_GEO', 'area_volume_ratio_GEO',
'edge_ratio_GEO', 'radius_ratio_GEO', 'aspect_ratio_GEO',
'max_solid_angle_GEO', 'min_solid_angle_GEO', 'solid_angle_GEO',
'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA', 'edge_ratio_IBA',
'radius_ratio_IBA', 'aspect_ratio_IBA', 'max_solid_angle_IBA',
'min_solid_angle_IBA', 'solid_angle_IBA', 'area_AUC', 'volume_AUC',
'area_volume_ratio_AUC', 'edge_ratio_AUC', 'radius_ratio_AUC',
'aspect_ratio_AUC', 'max_solid_angle_AUC', 'min_solid_angle_AUC',
'solid_angle_AUC']
else:
col = ['area_GEO', 'volume_GEO',
'area_volume_ratio_GEO', 'edge_ratio_GEO', 'radius_ratio_GEO',
'aspect_ratio_GEO', 'max_solid_angle_GEO', 'min_solid_angle_GEO',
'solid_angle_GEO', 'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA',
'edge_ratio_IBA', 'radius_ratio_IBA', 'aspect_ratio_IBA',
'max_solid_angle_IBA', 'min_solid_angle_IBA', 'solid_angle_IBA']
df_mean = pd.DataFrame()
df_mean['alphas'] = df1.alphas
for c in col:
for i in np.arange(0, df1.shape[0]):
df_mean.loc[i, c] = (df1.loc[i, c] + df2.loc[i, c] + df3.loc[i, c]) / 3.0
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = GEO', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_GEO']
ft2 = df_mean['volume_GEO']
ft3 = df_mean['area_volume_ratio_GEO']
ft4 = df_mean['edge_ratio_GEO']
ft5 = df_mean['radius_ratio_GEO']
ft6 = df_mean['aspect_ratio_GEO']
ft7 = df_mean['max_solid_angle_GEO']
ft8 = df_mean['min_solid_angle_GEO']
ft9 = df_mean['solid_angle_GEO']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_geo.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_geo.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = IBA', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_IBA']
ft2 = df_mean['volume_IBA']
ft3 = df_mean['area_volume_ratio_IBA']
ft4 = df_mean['edge_ratio_IBA']
ft5 = df_mean['radius_ratio_IBA']
ft6 = df_mean['aspect_ratio_IBA']
ft7 = df_mean['max_solid_angle_IBA']
ft8 = df_mean['min_solid_angle_IBA']
ft9 = df_mean['solid_angle_IBA']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_iba.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_iba.csv', index=False)
if kind == 'biclass':
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = AUC', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_AUC']
ft2 = df_mean['volume_AUC']
ft3 = df_mean['area_volume_ratio_AUC']
ft4 = df_mean['edge_ratio_AUC']
ft5 = df_mean['radius_ratio_AUC']
ft6 = df_mean['aspect_ratio_AUC']
ft7 = df_mean['max_solid_angle_AUC']
ft8 = df_mean['min_solid_angle_AUC']
ft9 = df_mean['solid_angle_AUC']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_auc.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_auc.csv', index=False)
def run_global_rank(self, filename, kind, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
o = 'solid_angle'
if kind == 'biclass':
a = 7.0
else:
a = 7.5
GEOMETRY = '_delaunay_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto])
self.rank_by_algorithm(df, kind, o, str(a), release, smote=True)
self.rank_dto_by(o + '_' + str(a), kind, release, smote=True)
def overall_rank(self, ext, kind, alpha):
df1 = pd.read_csv(
output_dir + 'v1_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
df2 = pd.read_csv(
output_dir + 'v2_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
df3 = pd.read_csv(
output_dir + 'v3_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
col = ['RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1'
, 'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
df_mean = pd.DataFrame()
df_mean['ALGORITHM'] = df1.ALGORITHM
df_mean['unit'] = df1.unit
for c in col:
for i in np.arange(0, df1.shape[0]):
df_mean.loc[i, c] = (df1.loc[i, c] + df2.loc[i, c] + df3.loc[i, c]) / 3.0
df_mean['RANK_ORIGINAL'] = pd.to_numeric(df_mean['RANK_ORIGINAL'], downcast="float").round(2)
df_mean['RANK_SMOTE'] = pd.to_numeric(df_mean['RANK_SMOTE'], downcast="float").round(2)
df_mean['RANK_SMOTE_SVM'] = pd.to_numeric(df_mean['RANK_SMOTE_SVM'], downcast="float").round(2)
df_mean['RANK_BORDERLINE1'] = | pd.to_numeric(df_mean['RANK_BORDERLINE1'], downcast="float") | pandas.to_numeric |
# -*- coding: utf-8 -*-
from lxml import objectify
import pandas as pd
from pandas import DataFrame
from datetime import datetime
import sys
from logging import getLogger
import logging.config
def main(args):
log = getLogger()
logging.config.fileConfig("config/logging.conf")
log.debug('Parse開始')
parsed = objectify.parse(open('書き出したデータ.xml'))
# parsed = objectify.parse(open('sample.xml'))
log.debug('Parse完了')
log.debug('Data配列作成開始')
dict_array = create_dict_array(parsed)
log.debug('Data配列作成完了')
log.debug('DataFrame作成開始')
df = DataFrame(dict_array)
log.debug('DataFrame作成完了')
log.debug('Index作成開始')
df.index = df['startDate']
# df.index = pd.to_datetime(df.index, utc=True).tz_convert('Asia/Tokyo')
df.index = pd.to_datetime(df.index).tz_localize('UTC').tz_convert('Asia/Tokyo')
log.debug('Index作成完了')
print(df.head(100))
body_mass = df[(df['type'] == 'HKQuantityTypeIdentifierBodyMass') &
(~ df['sourceName'].str.contains('SmartBand 2'))].copy() # 体重
bfp = df[df['type'] == 'HKQuantityTypeIdentifierBodyFatPercentage'].copy() # 体脂肪率
bmi = df[df['type'] == 'HKQuantityTypeIdentifierBodyMassIndex'].copy() # BMI
# cycling = df[
# (df['type'] == 'HKQuantityTypeIdentifierDistanceCycling') & (
# (df['sourceName'] == 'Apple Watch') | (df['sourceName'] == 'Apple Watch 4'))].copy() # Cycling
cycling = df[
(df['type'] == 'HKQuantityTypeIdentifierDistanceCycling') &
(df['sourceName'].str.contains('Apple Watch'))].copy() # Cycling
log.debug('各種DataFrame完了')
body_mass = body_mass.astype({'value': float})
bfp = bfp.astype({'value': float})
bmi = bmi.astype({'value': float})
cycling = cycling.astype({'value': float})
cycling_sum = cycling.groupby(pd.Grouper(freq='D')).sum()
body_mass_mean = body_mass.groupby(pd.Grouper(freq='D')).mean()
bmi_mean = bmi.groupby(pd.Grouper(freq='D')).mean()
bfp_mean = bfp.groupby( | pd.Grouper(freq='D') | pandas.Grouper |
from collections import defaultdict
from sklearn import preprocessing
import signal
import influxdb_client
from influxdb_client import InfluxDBClient
from datetime import datetime
from sklearn.preprocessing import KBinsDiscretizer
import argparse
import ntopng_constants as ntopng_c
import numpy as np
import pandas as pd
import pathlib
import logging
import pyfluxc.pyfluxc as flux
from pyts.approximation import SymbolicAggregateApproximation as SAX
import re
import requests
import sys
import time
import importlib
importlib.reload(flux)
# ----- ----- PREPROCESSING ----- ----- #
# ----- ----- ------------- ----- ----- #
MORNING = np.array([1, 0, 0, 0])
AFTERNOON = np.array([0, 1, 0, 0])
EVENING = np.array([0, 0, 1, 0])
NIGHT = np.array([0, 0, 0, 1])
hour2ts = [(MORNING, range(6, 12)),
(AFTERNOON, range(12, 17)),
(EVENING, range(17, 22)),
(NIGHT, list(range(22, 24))+list(range(0, 6)))]
hour2ts = { h: t for t, hrange in hour2ts for h in hrange }
class Preprocessor():
def __init__(self, deltas=True, discretize=True, flevel="NF_BLMISC"):
self.flevel = flevel
self.compute_deltas = deltas
self.compute_discrtz = discretize
self.discretizer = KBinsDiscretizer(n_bins=15, encode="ordinal")
@staticmethod
def date_as_feature(df):
time_indx = df.index.get_level_values("_time")
weekend_map = defaultdict(lambda: 0, { 5: 1, 6: 1 })
df["time:is_weekend"] = time_indx.dayofweek.map(weekend_map)
mapped_hours = time_indx.hour.map(hour2ts).values.tolist()
hours_df = pd.DataFrame(mapped_hours, columns=["time:morning", "time:afternoon", "time:evening", "time:night"], index=df.index)
df = pd.concat([df, hours_df], axis=1)
return df
@staticmethod
def fillzero(df):
"""Replace zero traffic holes with rolling window mean
"""
missing_traffic = (df == 0).all(axis=1)
df[missing_traffic].replace(0, np.NaN)
r_mean = df.rolling(min_periods=1, window=3, center=True).sum().shift(-1) / 2
df.loc[missing_traffic] = r_mean[missing_traffic]
return df
def discretize(self, df, fit=False):
tc = [c for c in df.columns if (("ndpi_flows:num_flows" not in c) and ("time:" not in c))]
values = df[tc].values
if fit:
df[tc] = self.discretizer.fit_transform(values)
else:
df[tc] = self.discretizer.transform(values)
return df
def preprocessing(self, df):
smart_features = set(ntopng_c.FEATURE_LEVELS[self.flevel])
available_features = set(df.columns)
available_cols = available_features.intersection(smart_features)
if available_cols != smart_features:
missing_c = smart_features - available_cols
logging.warning(f"Missing columns: {missing_c}")
df = df[available_cols].copy(deep=True)
df[df<0] = 0
df = df.fillna(0)
df = Preprocessor.fillzero(df)
# DPI unit length normalization ..... #
ndpi_num_flows_c = [c for c in df.columns if "ndpi_flows:num_flows" in c]
ndpi = df[ndpi_num_flows_c]
ndpi_sum = ndpi.sum(axis=1)
df.loc[:, ndpi_num_flows_c] = ndpi.divide(ndpi_sum + 1e-3, axis=0)
# Non decreasing delta discretization ..... #
if self.compute_deltas:
# Filter selected non-stationary features
non_decreasing = [c for c in df.columns if c in ntopng_c.NON_DECREASING]
df[non_decreasing] = df[non_decreasing].groupby(level=["device_category", "host"], group_keys=False).apply(lambda group: group.diff())
df = df.fillna(0)
# Feature discretization ..... #
# Note: we avoided using pandas qcut/cut due to the decoupling between fit and transform
# offered by scikit. In the future {KBinsDiscretizer} will be fitted once a week or so
# with weekly data and used multiple times while the model is running
# if self.compute_discrtz:
# df = self.discretize(df, fit)
# Date/hour as a feature ..... #
df = Preprocessor.date_as_feature(df)
return df
# ----- ----- HOST DATA GENERATOR ----- ----- #
# ----- ----- ------------------- ----- ----- #
class FluxDataGenerator():
def __init__(self, bucket, windowsize, fluxclient, start, ntopng_conf):
self.last_timestamp = start
self.samples = None
self.fluxclient = fluxclient
self.bucket = bucket
self.ntopng_conf = ntopng_conf
self.host_map = {}
self.windowsize = windowsize
wndsize_val, wndsize_unit = re.match(r'([0-9]+)([a-zA-Z]+)', self.windowsize).groups()
self.window_timedelta = np.timedelta64(wndsize_val, wndsize_unit)
def to_pandas(self):
if self.samples is None:
raise ValueError('No samples available')
return self.samples.copy(deep=True)
def pull(self, start=None, stop=None):
if not start:
start = self.last_timestamp
utcnow = pd.Timestamp.utcnow()
q = self.query(start, stop)
client = InfluxDBClient(url="http://localhost:8086", token="<PASSWORD>", org="my-org")
query_api = client.query_api()
try:
query_reply = query_api.query_data_frame(str(q))
except influxdb_client.rest.ApiException as e:
if e != b'{"error":"failed to initialize execute state: no database"}\n':
logging.warning(e)
self.last_timestamp = utcnow if stop is None else stop
return self.last_timestamp, None
except Exception as e:
logging.warning("Exception: probably database not ready")
return self.last_timestamp, None
new_samples = pd.concat(query_reply) if type(query_reply)==list else query_reply
if new_samples.empty:
self.last_timestamp = utcnow if stop is None else stop
return self.last_timestamp, None
new_samples = new_samples.drop(columns=["result", "table"])
# Transforming existing ndpi flows to measurements ..... #
host_ndpi_flows_measurements = new_samples["_measurement"]=="host:ndpi_flows"
host_ndpi_flows = new_samples.loc[host_ndpi_flows_measurements]
host_ndpi_flows_cat = host_ndpi_flows["protocol"].str.lower().map(ntopng_c.NDPI_VALUE2CAT)
new_samples.loc[host_ndpi_flows_measurements, "_field"] += ("__" + host_ndpi_flows_cat)
# Transforming existing ndpi bytes to measurements ..... #
host_ndpi_bytes_measurements = new_samples["_measurement"]=="host:ndpi"
host_ndpi_bytes = new_samples.loc[host_ndpi_bytes_measurements]
host_ndpi_bytes_cat = host_ndpi_bytes["protocol"].str.lower().map(ntopng_c.NDPI_VALUE2CAT)
new_samples.loc[host_ndpi_bytes_measurements, "_field"] += ("__" + host_ndpi_bytes_cat)
# Device category ..... #
new_samples['device_category'] = self.category_map(new_samples)
# Building dframe ..... #
new_samples['_key'] = new_samples['_measurement'].str.replace('host:', '') + ':' + new_samples['_field']
new_samples = new_samples.pivot_table(index=["device_category", "host", "_time"],
columns="_key", values="_value", aggfunc=np.sum)
new_samples.columns = new_samples.columns.rename(None)
# Drop cutted samples. E.g. range(start=13:46:58, stop:13:49:00) have almost for sure NaN in the first 2 seconds)
# Thus we drop NaN values from bytes_rcvd which should never be NaN
new_samples.dropna(subset=["traffic:bytes_rcvd"])
# Adding missing columns ..... #
missing_columns = []
available_columns = set(new_samples.columns)
missing_columns += ntopng_c.NDPI_FLOWS_COMPLETE - available_columns
missing_columns += ntopng_c.NDPI_BYTES_RCVD_COMPLETE - available_columns
missing_columns += ntopng_c.NDPI_BYTES_SENT_COMPLETE - available_columns
new_samples = new_samples.reindex(columns=new_samples.columns.tolist() + missing_columns, fill_value=0)
# Updating ..... #
# Checking to have only valid columns
new_samples = new_samples[ntopng_c.FEATURES_COMPLETE]
# Removing duplicate timestamps
if self.samples is not None:
max_old_samples = self.samples.groupby(level=["device_category", "host"]).apply(lambda x: x.index.max())
min_new_samples = new_samples.groupby(level=["device_category", "host"]).apply(lambda x: x.index.min())
dup_samples = np.intersect1d(max_old_samples, min_new_samples)
new_samples = new_samples.drop(dup_samples)
# Merging and updating time
self.samples = pd.concat([self.samples, new_samples])
self.last_timestamp = new_samples.index.get_level_values("_time").max()
return self.last_timestamp, new_samples
def save(self, datapath:pathlib.Path = None):
if not datapath:
datapath = datetime.now().strftime("%m.%d.%Y_%H.%M.%S_data")
datapath = pathlib.Path(datapath)
# Storing a generic query
with open(datapath / 'query.txt', 'w+') as f:
f.write(self.query(12345))
self.samples.to_pickle(datapath / 'timeseries.pkl')
def load(self, dname:pathlib.Path):
with open(dname / 'query.txt') as f:
if f.read() != self.query(12345):
raise ValueError('Trying to load from different query')
self.samples = | pd.read_pickle(dname / 'timeseries.pkl') | pandas.read_pickle |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": | bdate_range("2012-01-01", periods=300) | pandas.bdate_range |
''' EVENT DETECTION (FIXATIONS & SACCADES)'''
import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from helper import *
# Categorize fixations and saccades based on their order:
for i,k in itertools.product(sub_id, img_id):
file='Sub_'+str(i)+'_Image_'+str(k)+'.csv'
dataset=pd.read_csv(os.path.join(TRIALS_PATH,file),low_memory=False)
category = dataset['Fix_or_Sac']
watch_next = category != category.shift()
rank_order = watch_next.cumsum().groupby(category).rank(method='dense')
dataset['Group'] = category +"_"+ rank_order.astype(int).astype(str)
dataset.to_csv(os.path.join(TRIALS_PATH,file), index=False)
#Create separate files for each participant's fixations:
for i,k in itertools.product(sub_id, img_id):
file='Sub_'+str(i)+'_Image_'+str(k)+'.csv'
trials=pd.read_csv(os.path.join(TRIALS_PATH,file),low_memory=False)
events=pd.DataFrame()
events['Fixation_Start']=trials.groupby('Group')['TIME'].min()
events['Event_ID']= trials.groupby('Group')['Group'].first()
only_fixations=trials.query("Group.str.startswith('F').values")
time_fix_max=only_fixations.groupby('Group')['TIME'].max()
time_fix_min=only_fixations.groupby('Group')['TIME'].min()
events['FPOG_DUR']=time_fix_max-time_fix_min
events['FPOG_X']=only_fixations.groupby('Group')['BPOGX'].mean()
events['FPOG_Y']=only_fixations.groupby('Group')['BPOGY'].mean()
#events['event'] = events['Event_ID'].str.rsplit("_").str[0]
events['Idx'] = events['Event_ID'].str.rsplit("_").str[-1].astype(int)
events.sort_values('Idx',inplace=True)
events.drop('Idx', axis = 1,inplace=True)
#final=events[(events['FPOG_DUR']> 0.050) | (events['SAC_DUR'] > 0.010)]
to_write=events[events['FPOG_DUR']> 0.050]
to_write.loc[to_write.index[0], 'Trial_Start'] = trials['TIME'].iloc[0]
to_write.loc[to_write.index[0], 'Clutter'] = trials['CLUTTER'].iloc[0]
to_write.to_csv(os.path.join(EVENTS_PATH,file), index=False)
#Add columns about saccades to the files created above:
for i,k in itertools.product(sub_id, img_id):
file='Sub_'+str(i)+'_Image_'+str(k)+'.csv'
events=pd.read_csv(os.path.join(EVENTS_PATH,file),low_memory=False)
trial_start=events.iloc[0,5]
first_fixation_start=events.iloc[0,0]
first_fixation_dur=events.iloc[0,2]
events.loc[events.index[0],'SAC_LATENCY']=first_fixation_dur \
if trial_start==first_fixation_start \
else (first_fixation_start-trial_start) + first_fixation_dur
x=events['FPOG_X'].diff()
y=events['FPOG_Y'].diff()
events['SAC_AMPLITUDE']=(x ** 2 + y ** 2) ** 0.5
fix_dur_wo_last=events.iloc[:-1,2].reset_index(drop=True)
fix_start_dif=events['Fixation_Start'].diff().dropna().reset_index(drop=True)
events['SAC_DUR']=fix_start_dif-fix_dur_wo_last
events.to_csv(os.path.join(EVENTS_PATH,file), index=False)
'''sac_dur_min=dataset['SAC_DUR'].min() *1000
if sac_dur_min<10:
print(f'subject: {str(i)}, Image: {str(k)}')'''
#Visualize scanpath for all participants based on I-VT fixations:
for h,j in itertools.product(sub_id,img_id):
file='Sub_'+str(h)+'_Image_'+str(j)+'.csv'
events=pd.read_csv(os.path.join(EVENTS_PATH,file),low_memory=False)
x=events['FPOG_X']
y=events['FPOG_Y']
fix_dur=events['FPOG_DUR']
fig, ax = plt.subplots(figsize=(20, 11))
ax.scatter(x,
y,
zorder=1
,marker='o',
s=fix_dur*10000,
color='lime',
alpha=0.5)
ax.plot(x,
y,
'-o',
linewidth=3,
color='blue')
img = plt.imread(IMG_PATH+"\S"+str(j)+".jpg")
plt.imshow(img,
zorder=0,
extent=[-960, 960, -540, 540],
aspect='auto')
for i in range(len(fix_dur)):
ax.annotate(str(i+1),
xy=(fix_dur.iloc[i],
fix_dur.iloc[i]),
xytext=(x.iloc[i],
y.iloc[i]),
fontsize=30,
color='black',
ha='center',
va='center')
plt.xlabel('X coordinates (in pixels)', size=20)
plt.ylabel('Y coordinates (in pixels)', size=20)
plt.title('Scanpath for Subject '+str(h)+' , Image '+str(j), size=30)
#draw a rectangle around the location of the star
target_coords= | pd.read_csv(BEHAVIORAL_FILE) | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
from scipy.spatial import distance
from math import factorial, atan2, degrees, acos, sqrt, pi
from lizardanalysis.utils import auxiliaryfunctions
#TODO: check why files only contain species names but no measurements!!
analyze_again = True
# utility functions
def calc_distance_between_points_two_vectors_2d(v1, v2):
'''calc_distance_between_points_two_vectors_2d [pairwise distance between vectors points]
Arguments:
v1 {[np.array]} -- [description]
v2 {[type]} -- [description]
Raises:
ValueError -- [description]
ValueError -- [description]
ValueError -- [description]
Returns:
[type] -- [description]
testing:
>>> v1 = np.zeros((2, 5))
>>> v2 = np.zeros((2, 5))
>>> v2[1, :] = [0, 10, 25, 50, 100]
>>> d = calc_distance_between_points_two_vectors_2d(v1.T, v2.T)
'''
# Check dataformats
if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray):
raise ValueError('Invalid argument data format')
if not v1.shape[1] == 2 or not v2.shape[1] == 2:
raise ValueError('Invalid shape for input arrays')
if not v1.shape[0] == v2.shape[0]:
raise ValueError('Error: input arrays should have the same length')
# Calculate distance
dist = [distance.euclidean(p1, p2) for p1, p2 in zip(v1, v2)]
return dist
def angle_between_points_2d_anticlockwise(p1, p2):
'''angle_between_points_2d_clockwise [Determines the angle of a straight line drawn between point one and two.
The number returned, which is a double in degrees, tells us how much we have to rotate
a horizontal line anti-clockwise for it to match the line between the two points.]
Arguments:
p1 {[np.ndarray, list]} -- np.array or list [ with the X and Y coordinates of the point]
p2 {[np.ndarray, list]} -- np.array or list [ with the X and Y coordinates of the point]
Returns:
[int] -- [clockwise angle between p1, p2 using the inner product and the deterinant of the two vectors]
Testing: - to check: print(zero, ninety, oneeighty, twoseventy)
>>> zero = angle_between_points_2d_clockwise([0, 1], [0, 1])
>>> ninety = angle_between_points_2d_clockwise([1, 0], [0, 1])
>>> oneeighty = angle_between_points_2d_clockwise([0, -1], [0, 1])
>>> twoseventy = angle_between_points_2d_clockwise([-1, 0], [0, 1])
>>> ninety2 = angle_between_points_2d_clockwise([10, 0], [10, 1])
>>> print(ninety2)
'''
"""
Determines the angle of a straight line drawn between point one and two.
The number returned, which is a double in degrees, tells us how much we have to rotate
a horizontal line anit-clockwise for it to match the line between the two points.
"""
xDiff = p2[0] - p1[0]
yDiff = p2[1] - p1[1]
ang = degrees(atan2(yDiff, xDiff))
if ang < 0: ang += 360
# if not 0 <= ang <+ 360: raise ValueError('Ang was not computed correctly')
return ang
def calc_angle_between_vectors_of_points_2d(v1, v2):
'''calc_angle_between_vectors_of_points_2d [calculates the clockwise angle between each set of point for two 2d arrays of points]
Arguments:
v1 {[np.ndarray]} -- [2d array with X,Y position at each timepoint]
v2 {[np.ndarray]} -- [2d array with X,Y position at each timepoint]
Returns:
[np.ndarray] -- [1d array with clockwise angle between pairwise points in v1,v2]
Testing:
>>> v1 = np.zeros((2, 4))
>>> v1[1, :] = [1, 1, 1, 1, ]
>>> v2 = np.zeros((2, 4))
>>> v2[0, :] = [0, 1, 0, -1]
>>> v2[1, :] = [1, 0, -1, 0]
>>> a = calc_angle_between_vectors_of_points_2d(v2, v1)
'''
# Check data format
if v1 is None or v2 is None or not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray):
raise ValueError('Invalid format for input arguments')
if len(v1) != len(v2):
raise ValueError('Input arrays should have the same length, instead: ', len(v1), len(v2))
if not v1.shape[0] == 2 or not v2.shape[0] == 2:
raise ValueError('Invalid shape for input arrays: ', v1.shape, v2.shape)
# Calculate
n_points = v1.shape[1]
angs = np.zeros(n_points)
for i in range(v1.shape[1]):
p1, p2 = v1[:, i], v2[:, i]
angs[i] = angle_between_points_2d_anticlockwise(p1, p2)
return angs
# Process single bone
def analyzebone(bp1, bp2):
"""[Computes length and orientation of the bone at each frame]
Arguments:
bp1 {[type]} -- [description]
bp2 {[type]} -- [description]
"""
print("bone: ", bp1, bp2)
bp1_pos = np.vstack([bp1.x.values, bp1.y.values]).T
bp2_pos = np.vstack([bp2.x.values, bp2.y.values]).T
# get bone length and orientation
bone_length = calc_distance_between_points_two_vectors_2d(bp1_pos, bp2_pos)
bone_orientation = calc_angle_between_vectors_of_points_2d(bp1_pos.T, bp2_pos.T)
# keep the smallest of the two likelihoods
likelihoods = np.vstack([bp2.likelihood.values, bp2.likelihood.values]).T
likelihood = np.min(likelihoods, 1)
# Create dataframe and return
df = pd.DataFrame.from_dict(dict(
length=bone_length,
orientation=bone_orientation,
likelihood=likelihood,
))
# df.index.name=name
print(df)
return df
# MAIN FUNC
def calc_morphometrics(config, save_as_csv=True, **kwargs):
"""
Extracts length and orientation of each "bone" of the skeleton as defined in the config file.
Parameter
----------
config : string
Full path of the config.yaml file as a string.
save_as_csv: bool, optional
Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``
destfolder: string, optional
Specifies the destination folder for analysis data (default is the analysis results -> morphometrics folder).
"""
import errno
import os
import tqdm
import re
from glob import glob
current_path = os.getcwd()
destfolder = None
# Load config file, scorer and videos
config_file = Path(config).resolve()
cfg = auxiliaryfunctions.read_config(config_file)
project_dir = f"{cfg['task']}-{cfg['scorer']}-{cfg['species']}-{cfg['date']}"
# try to make folder for storing results
if destfolder is None:
destfolder = os.path.join(str(config_file).rsplit(os.path.sep, 1)[0], "analysis-results",
"lizard_morphometrics")
try:
os.makedirs(destfolder)
# print("folder for curve_fitting plots created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
# create filelist of all files:
files = cfg['file_sets'].keys() # object type ('CommentedMapKeysView' object), does not support indexing
filelist = [] # store filepaths as list
for file in files:
filelist.append(file)
# get data and filename:
individuals = []
for i in range(1, len(filelist)+1):
print("file in progress: {} of TOTAL: {} --- Progress: {}%".format(i, len(filelist), np.round(i/(len(filelist))*100)))
filename = filelist[i-1].rsplit(os.sep, 1)[1]
filename = filename.rsplit(".", 1)[0]
temp = re.compile("([a-zA-Z]+)([0-9]+)")
res = temp.match(filename).groups()
individual = str(res[0]+res[1])
#print(individual)
if individual not in individuals:
individuals.append(individual)
file_path_2 = os.path.join(project_dir, "files", os.path.basename(filelist[i-1]))
file_path = os.path.join(current_path, file_path_2)
if analyze_again:
data = pd.read_csv(file_path, delimiter=",",
header=[0, 1, 2]) # reads in first csv file in filelist to extract all available labels
data_rows_count = data.shape[0] # number of rows already excluded the 3 headers
scorer = data.columns[1][0]
# Process skeleton
if cfg['skeleton'] is None:
print('no skeleton defined in config. Copy skeleton from DLC config file to lizardanalysis config file.')
break
bones = {}
for bp1, bp2 in cfg['skeleton']:
name = "{}_{}".format(bp1, bp2)
bones[name] = analyzebone(data[scorer][bp1], data[scorer][bp2])
skeleton = pd.concat(bones, axis=1)
# save
if save_as_csv:
skeleton.to_csv(os.path.join(destfolder, '{}_morph.csv'.format(filename)))
# calculate the means for each individual:
individual_filelists = {}
print('\nnumber of individuals in group: ', len(individuals))
filelist_morph = glob(os.path.join(destfolder, '*_morph.csv'))
#print("{" + "\n".join("{!r}: {!r},".format(k, v) for k, v in filelist_morph.items()) + "}")
for individual in individuals:
individual_filelists[individual] = [file for file in filelist_morph if individual in file]
mean_of_individuals = {}
for individual, list_of_runs in individual_filelists.items():
print("\nINDIVIDUAL: ", individual)
means_of_run = {}
i = 0
for run in list_of_runs:
print("progress: {}/{}".format(i, len(list_of_runs)))
data_morph = pd.read_csv(run, delimiter=",", header=[0, 1]) # first two rows as header
data_morph = data_morph.drop(data_morph.columns[[0]], axis=1)
data_morph_rows_count = data_morph.shape[0] # number of rows already excluded the 2 headers
#print("number of rows: ", data_morph_rows_count)
data_morph.rename(columns=lambda x: x.strip(), inplace=True) # remove whitespaces from column names
scorer = data_morph.columns[1][0]
if i == 0:
bones = get_bone_names(data_morph)
i += 1
for bone in bones:
list_for_mean_bone = []
for row in range(data_morph_rows_count-1):
list_for_mean_bone.append(data_morph.loc[row, (bone, 'length')])
means_of_run[bone] = np.mean(list_for_mean_bone)
mean_of_individuals[individual] = [(k, np.mean(v)) for k,v in means_of_run.items()]
#print("mean_of_individuals: ", mean_of_individuals)
# save means of individuals in summary file:
morph_csv_columns = ['individual']
individual = individuals[0]
for bone_tuple in mean_of_individuals[individual]:
morph_csv_columns.append(bone_tuple[0])
print("morph_columns: ", morph_csv_columns)
print("length of columns: ", len(morph_csv_columns))
df_morph_means = | pd.DataFrame(columns=morph_csv_columns) | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not pd.isnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"UK_sectors",
"UK_divisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not pd.isnull(row[k])
else None)
for k in cols_of_interest
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if pd.isnull(director["director_name"]):
continue
if not pd.isnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.append(director)
else:
directors = []
document["directors"] = directors
assert not pd.isnull(row["tenancies"])
tenancies = []
regions = []
for tenancy in row["tenancies"].split(separator):
tenancies.append(tenancy)
if tenancy == "Made in the Midlands":
regions.append("midlands")
else:
assert tenancy == "Made in Yorkshire", tenancy
regions.append("yorkshire")
document["tenancies"] = tenancies
document["regions"] = regions
for award in ("badge", "accreditation"):
award_name = f"{award}s"
if not pd.isnull(row[award_name]):
awards = []
for a in row[award_name].split(separator):
awards.append(a)
document[award_name] = awards
insert_document(db, collection, document)
i += 1
def add_SIC_hierarchy_to_members(db=None):
'''
USE SIC CODES TO MAP TO SECTOR USING FILE:
data/class_to_sector.json
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
get_sic_codes_query = f'''
FOR m IN Members
FILTER m.sic_codes != NULL
RETURN {{
_key: m._key,
sic_codes: m.sic_codes,
}}
'''
members = aql_query(db, get_sic_codes_query)
class_to_sector_map = read_json("data/class_to_sector.json")
for member in members:
sic_codes = member["sic_codes"]
sic_codes = [sic_code.split(" - ")[1]
for sic_code in sic_codes]
classes = set()
groups = set()
divisions = set()
sectors = set()
for sic_code in sic_codes:
if sic_code not in class_to_sector_map:
continue
classes.add(sic_code)
groups.add(class_to_sector_map[sic_code]["group"])
divisions.add(class_to_sector_map[sic_code]["division"])
sectors.add(class_to_sector_map[sic_code]["sector"])
document = {
"_key" : member["_key"],
"UK_classes": sorted(classes),
"UK_groups": sorted(groups),
"UK_divisions": sorted(divisions),
"UK_sectors": sorted(sectors),
}
insert_document(db, collection, document, verbose=True)
def populate_users(
data_dir="data_for_graph",
cols_of_interest=[
"id",
"full_name",
"email",
"company_name",
"company_position",
"company_role",
],
db=None):
'''
CREATE AND ADD USER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Users", db, )
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
i = 0
for _, row in users.iterrows():
user_name = row["full_name"]
if pd.isnull(user_name):
continue
document = {
"_key" : str(i),
"name": user_name,
**{
k: (row[k] if not pd.isnull(row[k]) else None)
for k in cols_of_interest
}
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_works_at(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("UserWorksAt", db, className="Edges")
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
company_id = row["company_id"]
if user_id not in user_name_to_id:
continue
if company_id not in member_name_to_id:
continue
document = {
"_key" : str(i),
"name": "works_at",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[company_id],
"company_position": row["company_position"]
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_follows(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
user_follows_collection = connect_to_collection("UserFollows", db, className="Edges")
user_follows_members_collection = connect_to_collection("MemberMemberFollows", db, className="Edges")
user_follows_filename = os.path.join(data_dir, "all_user_follows.csv")
users = pd.read_csv(user_follows_filename, index_col=0)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
if user_id not in user_name_to_id:
continue
user_name = row["full_name"]
employer_id = row["employer_id"]
followed_member_id = row["followed_member_id"]
if followed_member_id not in member_name_to_id:
continue
# user -> member
document = {
"_key" : str(i),
"name": "follows",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[followed_member_id]
}
print ("inserting data", document)
insert_document(db, user_follows_collection, document)
# member -> member
if employer_id in member_name_to_id:
document = {
"_key" : str(i),
"name": "follows",
"_from": member_name_to_id[employer_id],
"_to": member_name_to_id[followed_member_id],
"followed_by": user_name,
}
print ("inserting data", document)
insert_document(db, user_follows_members_collection, document)
i += 1
def populate_member_sectors(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("InSector", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
sector_name_to_id = name_to_id(db, "Sectors", "sector_name")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
sectors = row["sectors"]
if pd.isnull(sectors):
continue
sectors = sectors.split(separator)
for sector in sectors:
document = {
"_key" : str(i),
"name": "in_sector",
"_from": member_name_to_id[member_id],
"_to": sector_name_to_id[sector],
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_member_commerces(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("PerformsCommerce", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
commerce_name_to_id = name_to_id(db, "Commerces", "commerce")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
for commerce_type in ("buys", "sells"):
commerce = row[commerce_type]
if not pd.isnull(commerce):
commerce = commerce.split(separator)
for c in commerce:
if c=="":
assert False
continue
document = {
"_key" : str(i),
"name": commerce_type,
"_from": member_name_to_id[member_id],
"_to": commerce_name_to_id[c],
"commerce_type": commerce_type
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_messages(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Messages", db, className="Edges")
message_filename = os.path.join(data_dir, "all_messages.csv")
messages = pd.read_csv(message_filename, index_col=0)
messages = messages.drop_duplicates()
i = 0
user_name_to_id = name_to_id(db, "Users", "id")
for _, row in messages.iterrows():
sender_id = row["sender_id"]
if sender_id not in user_name_to_id:
continue
subject = row["subject"]
message = row["message"]
message = remove_html_tags(message)
timestamp = str(row["created_at"])
# TODO characterise messages
# recipients = json.loads(row["all_recipients"])
# for recipient in recipients:
# receiver = recipient["name"]
receiver_id = row["recipient_id"]
# receiver_member = row["recipient_member_name"]
if receiver_id not in user_name_to_id:
continue
if sender_id == receiver_id:
continue
document = {
"_key": str(i),
"name": "messages",
"_from": user_name_to_id[sender_id],
"_to": user_name_to_id[receiver_id],
"subject": subject,
"message": message,
"sent_at": convert_to_iso8601(timestamp),
}
insert_document(db, collection, document)
i += 1
def populate_member_member_business(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("MemberMemberBusiness", db, className="Edges")
member_name_to_id = name_to_id(db, "Members", "member_name")
i = 0
# articles
for region in ("yorkshire", "midlands"):
filename = os.path.join("members", f"member_member_partnerships - {region}_matched.csv")
member_member_business = pd.read_csv(filename, index_col=None)
for _, row in member_member_business.iterrows():
member_1 = row["member_1_best_matching_member"]
member_2 = row["member_2_best_matching_member"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
article_title = row["article_title"]
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_article"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
# "_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "article",
"article_title": article_title,
"region": region
}
insert_document(db, collection, document)
i += 1
# survey connections
connections_filename="survey/final_processed_connections.csv"
survey_connections = pd.read_csv(connections_filename, index_col=0)
for _, row in survey_connections.iterrows():
member_1 = row["best_matching_member_name"]
member_2 = row["submitted_partner_best_matching_member_name"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_survey"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
"_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "survey",
}
insert_document(db, collection, document)
i += 1
def populate_events(
data_dir="data_for_graph",
cols_of_interest = [
"id",
"event_name",
"event_type",
"tenants",
"members",
"description",
"status",
"venue",
"starts_at",
"ends_at",
],
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Events", db,)
events_df_filename = os.path.join(data_dir, "all_events.csv")
events_df = pd.read_csv(events_df_filename, index_col=0)
# events_df = events_df.drop_duplicates(["event_name", "starts_at"])
i = 0
for _, row in events_df.iterrows():
event_name = row["event_name"]
document = {
"_key" : str(i),
"name": event_name,
**{
k: (convert_to_iso8601(row[k]) if not pd.isnull(row[k]) and k in ("starts_at", "ends_at", )
else row[k].split(separator) if not pd.isnull(row[k]) and k in ("tenants", "distinct_event_tags", "members")
else row[k] if not | pd.isnull(row[k]) | pandas.isnull |
import pandas as pd
import os
import re
#add one column with the name of the df aka condition
print(snakemake.input["whippet_mapping_dc"])
def retrieve_mapping_stats(list_of_paths, key1, key2, key3):
mapping_summary={}
sample_name_list=[]
Mapped_Percent_list=[]
Multimap_Percent_list=[]
Novel_Junc_Percent_list=[]
for i in list_of_paths:
#read in as csv
print(i)
sample_name=os.path.basename(i)
print(sample_name)
sample_name=re.sub("\.map$", "", sample_name)
print(sample_name)
sample_name_list.append(sample_name)
#f"{sample_name}"={}
with open(f"./{i}", 'r') as file:
text = file.read()
text=re.sub(r'\n', ' ', text)
text=re.sub(r'\t', ' ', text)
hit_key1=re.search(rf'{key1} (\S+)', text)
hit_key1=hit_key1.group(1)
hit_key2=re.search(rf'{key2} (\S+)', text)
hit_key2=hit_key2.group(1)
hit_key3=re.search(rf'{key3} (\S+)', text)
hit_key3=hit_key3.group(1)
Mapped_Percent_list.append(hit_key1)
Multimap_Percent_list.append(hit_key2)
Novel_Junc_Percent_list.append(hit_key3)
#mapping_summary[sample_name]={}
#mapping_summary[sample_name]={"Multimap_Percent":after_key2}
#f"{sample_name}"[f"key1"]=after_key1
#f"{sample_name}"[f"key2"]=after_key2
#f"{sample_name}"[f"key3"]=after_key3
#mapping_summary.append(f"{sample_name}") #this needs to be a dictionary
print(mapping_summary)
mapping_summary={"Run": sample_name_list, "Mapped_Percent":Mapped_Percent_list, "Multimap_Percent":Multimap_Percent_list, "Novel_Junc_Percent":Novel_Junc_Percent_list}
mapping_summary= | pd.DataFrame.from_records(mapping_summary) | pandas.DataFrame.from_records |
from datetime import datetime
import time
import pandas as pd
import typer
import subprocess
import numpy as np
import random
import os.path
import noise_mechanism as nm
DEFAULT_DATA_DIRECTORY = "./tests/data"
DEFAULT_NOISE_DATA_DIRECTORY = "./data/mob-dp"
DEFAULT_FIPS = "36" # New York
DEFAULT_ITERATIONS = 1000
DEFAULT_MECHANISM = "laplace"
DEFAULT_EPSILON = 0.1
DEFAULT_DELTA = 1
app = typer.Typer()
def load(
data_dir: str, fips: str, start_date: datetime, end_date: datetime
) -> pd.DataFrame:
"""
Reads data from the data directory, assuming that the format is a bunch of subdirectories
in the form `activity_day=%Y-%m-%d` which will be converted to a Python date in the resulting
data frame.
"""
typer.echo(f"Reading data from {data_dir}")
df = pd.read_parquet(data_dir)
# Filter first, then convert to save some compute.
# This cases activity_day to string, but it *may* be faster to convert to a list of
# categories that match the dates.
if start_date:
str_start_date = start_date.strftime("%Y-%m-%d")
typer.echo(f"Filtering start date after {str_start_date}")
df.drop(df[df["activity_day"].astype(str) < str_start_date].index, inplace=True)
if end_date:
str_end_date = end_date.strftime("%Y-%m-%d")
typer.echo(f"Filtering start date strictly before {str_end_date}")
df.drop(df[df["activity_day"].astype(str) >= str_end_date].index, inplace=True)
typer.echo("Filtering by state")
df.drop(
df[(df["from_state_fips"] != fips) | (df["to_state_fips"] != fips)].index,
inplace=True,
)
df["activity_day"] = df["activity_day"].apply(
lambda str_day: datetime.strptime(str_day, "%Y-%m-%d").date()
)
return df
def noisy_df(df: pd.DataFrame, iterations: int = DEFAULT_ITERATIONS, mechanism: str = DEFAULT_MECHANISM, epsilon: int = DEFAULT_EPSILON, delta: int = DEFAULT_DELTA):
dlen = len(df)
n = []
if mechanism == "laplace":
typer.echo(f"Applying DP noise on columns over {iterations} iterations")
with typer.progressbar(range(iterations)) as steps:
for _ in steps:
noisy_val = df['transitions'].apply(nm.laplaceMechanism, args=(epsilon,))
n.append(noisy_val)
m = np.average(n, axis=0)
df['transitions']= | pd.Series(m) | pandas.Series |
from flask import Flask, render_template, request, redirect, make_response, url_for
app_onc = Flask(__name__)
import astrodbkit
from astrodbkit import astrodb
from SEDkit import sed
from SEDkit import utilities as u
import os
import sys
import re
from io import StringIO
from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import ColumnDataSource, HoverTool, OpenURL, TapTool, Range1d
from bokeh.models.widgets import Panel, Tabs
from astropy import units as q
from astropy.coordinates import SkyCoord
import astropy.constants as ac
from scipy.ndimage.interpolation import zoom
import pandas as pd
import numpy as np
TABLE_CLASSES = 'display no-wrap hover table'
app_onc.vars = dict()
app_onc.vars['query'] = ''
app_onc.vars['search'] = ''
app_onc.vars['specid'] = ''
app_onc.vars['source_id'] = ''
db_file = os.environ['ONC_database']
db = astrodb.Database(db_file)
pd.set_option('max_colwidth', -1)
# Redirect to the main page
@app_onc.route('/')
@app_onc.route('/index')
# Page with a text box to take the SQL query
@app_onc.route('/index', methods=['GET', 'POST'])
def onc_query():
defquery = 'SELECT * FROM sources'
if app_onc.vars['query']=='':
app_onc.vars['query'] = defquery
# Get list of the catalogs
source_count, = db.list("SELECT Count(*) FROM sources").fetchone()
catalogs = db.query("SELECT * FROM publications", fmt='table')
cat_names = ''.join(['<li><a href="https://ui.adsabs.harvard.edu/?#abs/{}/abstract">{}</a></li>'.format(cat['bibcode'],cat['description'].replace('VizieR Online Data Catalog: ','')) for cat in catalogs])
table_names = db.query("select * from sqlite_master where type='table' or type='view'")['name']
tables = '\n'.join(['<option value="{0}" {1}> {0}</option>'.format(t,'selected=selected' if t=='browse' else '') for t in table_names])
columns_html = []
columns_js = []
for tab in table_names:
cols = list(db.query("pragma table_info('{}')".format(tab))['name'])
col_html = ''.join(['<input type="checkbox" value="{0}" name="selections"> {0}<br>'.format(c) for c in cols])
columns_html.append('<div id="{}" class="columns" style="display:none">{}</div>'.format(tab,col_html))
col_js = ','.join(["{id:'"+c+"',label:'"+c+"',type:'string'}" for c in cols])
columns_js.append(col_js)
column_select = ''.join(columns_html)
column_script = ''.join(columns_js)
return render_template('index.html', cat_names=cat_names, source_count=source_count,
defsearch=app_onc.vars['search'], specid=app_onc.vars['specid'],
source_id=app_onc.vars['source_id'], version=astrodbkit.__version__,
tables=tables, column_select=column_select, column_script=col_js)
# Grab results of query and display them
@app_onc.route('/runquery', methods=['POST','GET'])
def onc_runquery():
# db = astrodb.Database(db_file)
app_onc.vars['query'] = request.form['query_to_run']
htmltxt = app_onc.vars['query'].replace('<', '<')
# Only SELECT commands are allowed
if not app_onc.vars['query'].lower().startswith('select'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Only SELECT queries are allowed. You typed:</p><p>'+htmltxt+'</p>')
# Run the query
stdout = sys.stdout # Keep a handle on the real standard output
sys.stdout = mystdout = StringIO() # Choose a file-like object to write to
try:
t = db.query(app_onc.vars['query'], fmt='table', use_converters=False)
except ValueError:
t = db.query(app_onc.vars['query'], fmt='array', use_converters=False)
except:
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+htmltxt+'</p>')
sys.stdout = stdout
# Check for any errors from mystdout
if mystdout.getvalue().lower().startswith('could not execute'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystdout.getvalue().replace('<', '<')+'</p>')
# Check how many results were found
if type(t)==type(None):
return render_template('error.html', headermessage='No Results Found',
errmess='<p>No entries found for query:</p><p>' + htmltxt +
'</p><p>'+mystdout.getvalue().replace('<', '<')+'</p>')
# Remane RA and Dec columns
for idx,name in enumerate(t.colnames):
if name.endswith('.ra'):
t[name].name = 'ra'
if name.endswith('.dec'):
t[name].name = 'dec'
if name.endswith('.id'):
t[name].name = 'id'
if name.endswith('.source_id'):
t[name].name = 'source_id'
# Convert to Pandas data frame
try:
data = t.to_pandas()
except AttributeError:
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error for query:</p><p>'+htmltxt+'</p>')
try:
sources = data[['ra','dec','source_id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
# Create checkbox first column
data = add_checkboxes(data)
# Toggle columns
cols = 'Toggle Column: '+' - '.join(['<a class="toggle-vis" />{}</a>'.format(name) for i,name in enumerate(t.colnames)])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".format(export)
# Add links to columns
data = link_columns(data, db, ['id','source_id','spectrum','image'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if any([isinstance(i, (int, float)) for i in t[c]])]
axes = '\n'.join(['<option value="{}"> {}</option>'.format(repr(b)+","+repr(list(t[b])), b) for b in columns])
table_html = data.to_html(classes='display', index=False).replace('<','<').replace('>','>')
print(table_html)
return render_template('results.html', table=table_html, query=app_onc.vars['query'], cols=cols,
sources=sources, axes=axes, export=export)
# Grab results of query and display them
@app_onc.route('/buildquery', methods=['POST', 'GET'])
def onc_buildquery():
# Build the query from all the input
entries = request.form
print(entries)
selections, builder_rules = [], []
for key in entries.keys():
for value in entries.getlist(key):
if key=='selections':
selections.append(value)
if key.startswith('builder_rule'):
builder_rules.append((key,value))
# Translate the builder rules into a SQL WHERE clause
where_clause = ''
for k,v in builder_rules:
pass
if where_clause:
where_clause = ' WHERE {}'.format(where_clause)
build_query = "SELECT {} FROM {}{}".format(','.join(selections), entries['table'], where_clause)
# db = astrodb.Database(db_file)
app_onc.vars['query'] = build_query
htmltxt = app_onc.vars['query'].replace('<', '<')
# Only SELECT commands are allowed
if not app_onc.vars['query'].lower().startswith('select'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Only SELECT queries are allowed. You typed:</p><p>' + htmltxt + '</p>')
# Run the query
stdout = sys.stdout # Keep a handle on the real standard output
sys.stdout = mystdout = StringIO() # Choose a file-like object to write to
try:
t = db.query(app_onc.vars['query'], fmt='table', use_converters=False)
except ValueError:
t = db.query(app_onc.vars['query'], fmt='array', use_converters=False)
except:
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>' + htmltxt + '</p>')
sys.stdout = stdout
# Check for any errors from mystdout
if mystdout.getvalue().lower().startswith('could not execute'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>' + mystdout.getvalue().replace('<', '<') + '</p>')
# Check how many results were found
if type(t) == type(None):
return render_template('error.html', headermessage='No Results Found',
errmess='<p>No entries found for query:</p><p>' + htmltxt +
'</p><p>' + mystdout.getvalue().replace('<', '<') + '</p>')
# Remane RA and Dec columns
for idx, name in enumerate(t.colnames):
if name.endswith('.ra'):
t[name].name = 'ra'
if name.endswith('.dec'):
t[name].name = 'dec'
if name.endswith('.id'):
t[name].name = 'id'
# Convert to Pandas data frame
try:
data = t.to_pandas()
except AttributeError:
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error for query:</p><p>' + htmltxt + '</p>')
# Create checkbox first column
data = add_checkboxes(data)
try:
script, div, warning_message = onc_skyplot(t)
except:
script = div = warning_message = ''
# Add links to columns
data = link_columns(data, db, ['id', 'source_id', 'spectrum', 'image'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if isinstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.format(repr(b) + "," + repr(list(t[b])), b) for b in columns])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".format(export)
# Generate HTML
table_html = data.to_html(classes='display', index=False).replace('<', '<').replace('>', '>')
return render_template('results.html', table=table_html, query=app_onc.vars['query'],
script=script, plot=div, warning=warning_message, axes=axes, export=export)
# Grab results of query and display them
@app_onc.route('/plot', methods=['POST','GET'])
def onc_plot():
# Get the axes to plot
xaxis, xdata = eval(request.form['xaxis'])
yaxis, ydata = eval(request.form['yaxis'])
# Make the plot
tools = "resize,crosshair,pan,wheel_zoom,box_zoom,reset"
p = figure(tools=tools, x_axis_label=xaxis, y_axis_label=yaxis, plot_width=800)
p.circle(xdata, ydata)
title = '{} v. {}'.format(xaxis,yaxis)
script, div = components(p)
# Also make a table
table = pd.DataFrame(np.array([xdata,ydata]).T, columns=[xaxis,yaxis])
table = table.to_html(classes='display', index=False).replace('<','<').replace('>','>')
return render_template('plot.html', title=title, script=script, plot=div, table=table)
# Grab selected inventory and plot SED
@app_onc.route('/sed', methods=['POST'])
@app_onc.route('/inventory/sed', methods=['POST'])
def onc_sed():
# Get the ids of all the data to use
entries = request.form
age = (float(entries['age_min'])*q.Myr, float(entries['age_max'])*q.Myr)
radius = (float(entries['radius'])*ac.R_sun,float(entries['radius_unc'])*ac.R_sun)
source_id = int(entries['sources'])
spt_id = int(entries.get('spectral_types', 0))
plx_id = int(entries.get('parallaxes', 0))
# Collect all spec_ids and phot_ids
phot_ids, spec_ids = [], []
for key in entries.keys():
for value in entries.getlist(key):
if key=='photometry':
phot_ids.append(int(value))
elif key=='spectra':
spec_ids.append(int(value))
# Make the astropy tables
sed_dict = {}
sed_dict['sources'] = source_id
if spt_id:
sed_dict['spectral_types'] = spt_id
if plx_id:
sed_dict['parallaxes'] = plx_id
if spec_ids:
sed_dict['spectra'] = spec_ids
if phot_ids:
sed_dict['photometry'] = phot_ids
# Include ONC distance as default if no parallax
dist, warning = '', ''
if 'parallaxes' not in sed_dict:
dist = (388*q.pc,20*q.pc)
warning = "No distance given for this source. Using \(388\pm 20 pc\) from Kounkel et al. (2016)"
# Make the SED
try:
SED = sed.MakeSED(source_id, db, from_dict=sed_dict, dist=dist, age=age, radius=radius, phot_aliases='')
p = SED.plot(output=True)
except IOError:
return render_template('error.html', headermessage='SED Error', errmess='<p>At least one spectrum or photometric point is required to construct an SED.</p>')
# Generate the HTML
script, div = components(p)
# Get params to print
fbol, mbol, teff, Lbol, radius = ['NaN']*5
try:
fbol = '\({:.3e} \pm {:.3e}\)'.format(SED.fbol.value,SED.fbol_unc.value)
except:
pass
try:
mbol = '\({} \pm {}\)'.format(SED.mbol,SED.mbol_unc)
except:
pass
try:
teff = '\({} \pm {}\)'.format(int(SED.Teff.value),SED.Teff_unc.value if np.isnan(SED.Teff_unc.value) else int(SED.Teff_unc.value)) if SED.distance else '-'
except:
pass
try:
Lbol = '\({:.3f} \pm {:.3f}\)'.format(SED.Lbol_sun,SED.Lbol_sun_unc) if SED.distance else '-'
except:
pass
try:
radius = '\({:.3f} \pm {:.3f}\)'.format(SED.radius.to(ac.R_sun).value,SED.radius_unc.to(ac.R_sun).value) if SED.radius else '-'
except:
pass
results = [[title,tbl2html(tab, roles='grid', classes='dataframe display no_pagination dataTable no-footer')] for tab,title in zip([SED.sources,SED.spectral_types,SED.parallaxes,SED.photometry,SED.spectra],['sources','spectral_types','parallaxes','photometry','spectra']) if len(tab)>0]
return render_template('sed.html', script=script, plot=div, spt=SED.SpT or '-', mbol=mbol, fbol=fbol,
teff=teff, Lbol=Lbol, radius=radius, title=SED.name, warning=warning, results=results)
def error_bars(xs, ys, zs):
"""
Generate errorbars for the photometry since Bokeh doesn't do it
"""
# Create the coordinates for the errorbars
err_xs, err_ys = [], []
for x, y, yerr in zip(xs, ys, zs):
if not np.isnan(yerr):
err_xs.append((x, x))
err_ys.append((y-yerr, y+yerr))
return (err_xs, err_ys)
def link_columns(data, db, columns):
view = 'View' #<img class="view" src="{{url_for("static", filename="images/view.png")}}" />
# Change id to a link
if 'id' in columns and 'id' in data and 'source_id' not in data:
linklist = []
for i, elem in enumerate(data['id']):
link = '<a href="inventory/{0}">{1}</a>'.format(data.iloc[i]['id'], elem)
linklist.append(link)
data['id'] = linklist
# Change source_id column to a link
if 'source_id' in columns and 'source_id' in data:
linklist = []
for i, elem in enumerate(data['source_id']):
link = '<a href="inventory/{}">{}</a>'.format(data.iloc[i]['source_id'], elem)
linklist.append(link)
data['source_id'] = linklist
# Change spectrum column to a link
if 'spectrum' in columns and 'spectrum' in data:
speclist = []
for index, row in data.iterrows():
spec = '<a href="../spectrum/{}">{}</a>'.format(row['id'],view)
speclist.append(spec)
data['spectrum'] = speclist
# Change image column to a link
if 'image' in columns and 'image' in data:
imglist = []
for index, row in data.iterrows():
img = '<a href="../image/{}">{}</a>'.format(row['id'],view)
imglist.append(img)
data['image'] = imglist
# Change vizier URL to a link
if 'record' in columns and 'record' in data:
reclist = []
for index, row in data.iterrows():
if row['record'] is None:
rec = None
else:
rec = '<a href="{}">{}</a>'.format(row['record'],view)
reclist.append(rec)
data['record'] = reclist
return data
@app_onc.route('/export', methods=['POST'])
def onc_export():
# Get all the checked rows
checked = request.form
# Get column names
print(checked.get('cols'))
results = [list(eval(checked.get('cols')))]
for k in sorted(checked):
if k.isdigit():
# Convert string to list and strip HTML
vals = eval(checked[k])
for i,v in enumerate(vals):
try:
vals[i] = str(v).split('>')[1].split('<')[0]
except:
pass
results.append(vals)
# Make an array to export
results = np.array(results, dtype=str)
filename = 'ONCdb_results.txt'
np.savetxt(filename, results, delimiter='|', fmt='%s')
with open(filename, 'r') as f:
file_as_string = f.read()
os.remove(filename) # Delete the file after it's read
response = make_response(str(file_as_string))
response.headers["Content-type"] = 'text; charset=utf-8'
response.headers["Content-Disposition"] = "attachment; filename={}".format(filename)
return response
def add_checkboxes(data, type='checkbox', id_only=False, table_name='', all_checked=False):
"""
Create checkbox first column in Pandas dataframe
"""
buttonlist = []
for index, row in data.iterrows():
val = strip_html(repr(list(row)))
if id_only:
val = val.split(',')[0].replace('[','')
tab = table_name or str(index)
button = '<input type="{}" name="{}" value="{}"{}>'.format(type,tab,val,' checked' if (index==0 and type=='radio') or (all_checked and type=='checkbox') else ' checked')
buttonlist.append(button)
data['Select'] = buttonlist
cols = data.columns.tolist()
cols.pop(cols.index('Select'))
data = data[['Select']+cols]
return data
# Perform a search
@app_onc.route('/search', methods=['POST'])
def onc_search():
# db = astrodb.Database(db_file)
app_onc.vars['search'] = request.form['search_to_run']
search_table = request.form['table']
search_value = app_onc.vars['search']
search_radius = 1/60.
# Process search
search_value = search_value.replace(',', ' ').split()
if len(search_value) == 1:
search_value = search_value[0]
else:
try:
search_value = [float(s) for s in search_value]
search_radius = float(request.form['radius'])/60.
except:
return render_template('error.html', headermessage='Error in Search',
errmess='<p>Could not process search input:</p>' +
'<p>' + app_onc.vars['search'] + '</p>')
# Run the search
stdout = sys.stdout # Keep a handle on the real standard output
sys.stdout = mystdout = StringIO() # Choose a file-like object to write to
# Get table of results
t = db.search(search_value, search_table, radius=search_radius, fetch=True)
sys.stdout = stdout
try:
data = t.to_pandas()
except AttributeError:
return render_template('error.html', headermessage='Error in Search',
errmess=mystdout.getvalue().replace('<', '<'))
try:
sources = data[['ra','dec','source_id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
if not data.empty:
# Create checkbox first column
data = add_checkboxes(data)
# Toggle columns
cols = 'Toggle Column: '+' - '.join(['<a class="toggle-vis" />{}</a>'.format(name) for i,name in enumerate(t.colnames)])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".format(export)
# Add links to columns
data = link_columns(data, db, ['id', 'source_id', 'image','spectrum','record'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if isinstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.format(repr(b)+","+repr(list(t[b])), b) for b in columns])
return render_template('results.html', table=data.to_html(classes='display', index=False).replace('<','<').replace('>','>'), query=search_value,
sources=sources, cols=cols, axes=axes, export=export)
else:
return render_template('error.html', headermessage='Error in Search',
errmess='<p>This input returns no results:</p>' +
'<p>' + app_onc.vars['search'] + '</p>')
# Plot a spectrum
@app_onc.route('/spectrum', methods=['POST'])
@app_onc.route('/spectrum/<int:specid>')
def onc_spectrum(specid=None):
# db = astrodb.Database(db_file)
if specid is None:
app_onc.vars['specid'] = request.form['spectrum_to_plot']
path = ''
else:
app_onc.vars['specid'] = specid
path = '../'
# If not a number, error
if not str(app_onc.vars['specid']).isdigit():
return render_template('error.html', headermessage='Error in Input',
errmess='<p>Input was not a number.</p>')
# Grab the spectrum
stdout = sys.stdout # Keep a handle on the real standard output
sys.stdout = mystdout = StringIO() # Choose a file-like object to write to
query = 'SELECT * FROM spectra WHERE id={}'.format(app_onc.vars['specid'])
t = db.query(query, fmt='table')
sys.stdout = stdout
# Check for errors first
if mystdout.getvalue().lower().startswith('could not execute'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystdout.getvalue().replace('<', '<')+'</p>')
# Check if found anything
if isinstance(t, type(None)):
return render_template('error.html', headermessage='No Result', errmess='<p>No spectrum found.</p>')
# Get data
wav = 'Wavelength ('+t[0]['wavelength_units']+')'
flux = 'Flux ('+t[0]['flux_units']+')'
spec = t[0]['spectrum']
filepath = spec.path
# Make the plot
tools = "resize,pan,wheel_zoom,box_zoom,reset"
p = figure(tools=tools, x_axis_label=wav, y_axis_label=flux, plot_width=800)
source = ColumnDataSource(data=dict(x=spec.data[0], y=spec.data[1]))
hover = HoverTool(tooltips=[( 'wavelength', '$x'),( 'flux', '$y')], mode='vline')
p.add_tools(hover)
p.line('x', 'y', source=source)
script, div = components(p)
t['spectrum'] = [sp.path for sp in t['spectrum']]
meta = t.to_pandas().to_html(classes='display', index=False)
return render_template('spectrum.html', script=script, plot=div, meta=meta, download=filepath)
# Display an image
@app_onc.route('/image', methods=['POST'])
@app_onc.route('/image/<int:imgid>')
def onc_image(imgid=None):
# db = astrodb.Database(db_file)
if imgid is None:
app_onc.vars['imgid'] = request.form['image_to_plot']
path = ''
else:
app_onc.vars['imgid'] = imgid
path = '../'
# If not a number, error
if not str(app_onc.vars['imgid']).isdigit():
return render_template('error.html', headermessage='Error in Input',
errmess='<p>Input was not a number.</p>')
# Grab the spectrum
stdout = sys.stdout # Keep a handle on the real standard output
sys.stdout = mystdout = StringIO() # Choose a file-like object to write to
query = 'SELECT * FROM images WHERE id={}'.format(app_onc.vars['imgid'])
t = db.query(query, fmt='table')
sys.stdout = stdout
# Check for errors first
if mystdout.getvalue().lower().startswith('could not execute'):
return render_template('error.html', headermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystdout.getvalue().replace('<', '<')+'</p>')
# Check if found anything
if isinstance(t, type(None)):
return render_template('error.html', headermessage='No Result', errmess='<p>No image found.</p>')
try:
img = t[0]['image'].data
# Down sample so the figure displays faster
img = zoom(img, 0.05, prefilter=False)
filepath = t[0]['image'].path
# Make the plot
tools = "resize,crosshair,pan,wheel_zoom,box_zoom,reset"
# create a new plot
p = figure(tools=tools, plot_width=800)
# Make the plot
p.image(image=[img], x=[0], y=[0], dw=[img.shape[0]], dh=[img.shape[1]])
p.x_range = Range1d(0, img.shape[0])
p.y_range = Range1d(0, img.shape[1])
script, div = components(p)
t['image'] = [sp.path for sp in t['image']]
meta = t.to_pandas().to_html(classes='display', index=False)
except IOError:
script, div, filepath = '', '', ''
return render_template('image.html', script=script, plot=div, meta=meta, download=filepath)
# Check inventory
@app_onc.route('/inventory', methods=['POST'])
@app_onc.route('/inventory/<int:source_id>')
def onc_inventory(source_id=None):
# db = astrodb.Database(db_file)
if source_id is None:
app_onc.vars['source_id'] = request.form['id_to_check']
path = ''
else:
app_onc.vars['source_id'] = source_id
path = '../'
# Grab inventory
stdout = sys.stdout
sys.stdout = mystdout = StringIO()
t = db.inventory(app_onc.vars['source_id'], fetch=True, fmt='table')
sys.stdout = stdout
t = {name:t[name][[col for col in t[name].colnames if col!='source_id']] for name in t.keys()}
# Check for errors (no results)
if mystdout.getvalue().lower().startswith('no source'):
return render_template('error.html', headermessage='No Results Found',
errmess='<p>'+mystdout.getvalue().replace('<', '<')+'</p>')
# Empty because of invalid input
if len(t) == 0:
return render_template('error.html', headermessage='Error',
errmess="<p>You typed: "+app_onc.vars['source_id']+"</p>")
# Grab object information
allnames = t['sources']['names'][0]
altname = None
if allnames is not None:
altname = allnames.split(',')[0]
objname = t['sources']['designation'][0] or altname or 'Source {}'.format(app_onc.vars['source_id'])
ra = t['sources']['ra'][0]
dec = t['sources']['dec'][0]
c = SkyCoord(ra=ra*q.degree, dec=dec*q.degree)
coords = c.to_string('hmsdms', sep=':', precision=2)
# Grab distance
try:
distance = 1000./t['parallaxes']['parallax']
dist_string = ', '.join(['{0:.2f}'.format(i) for i in distance])
dist_string += ' pc'
except:
dist_string = 'N/A'
# Grab spectral type
try:
sptype_txt = []
for row in t['spectral_types'][['spectral_type','spectral_type_unc','suffix','gravity','luminosity_class']]:
spt = u.specType(list(row))
sptype_txt.append(spt.replace('None',''))
sptype_txt = ' / '.join(sptype_txt)
except:
sptype_txt = 'N/A'
# Grab comments
comments = t['sources']['comments'][0] or ''
# Get external queries
smbd = 'http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={}+%2B{}&CooFrame=ICRS&CooEpoch=2000&CooEqui=2000&CooDefinedFrames=none&Radius=10&Radius.unit=arcsec&submit=submit+query'.format(ra,dec)
vzr = 'http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=&-out.add=_r&-out.add=_RAJ%2C_DEJ&-sort=_r&-to=&-out.max=20&-meta.ucd=2&-meta.foot=1&-c.rs=20&-c={}+{}'.format(ra,dec)
# Add order to names for consistent printing
ordered_names = ['sources','spectral_types','parallaxes','photometry','spectra','images']
# Make the HTML
html_tables = []
for name in ordered_names:
if name in t:
# Convert to pandas
table = t[name].to_pandas()
# Add checkboxes for SED creation
type = 'radio' if name in ['sources','spectral_types','parallaxes'] else 'checkbox'
table = add_checkboxes(table, type=type, id_only=True, table_name=name)
# Add links to the columns
table = link_columns(table, db, ['source_id', 'image','spectrum', 'record'])
# Convert to HTML
table = table.to_html(classes='display no_pagination no_wrap', index=False).replace('<', '<').replace('>', '>')
else:
table = '<p style="padding-top:25px;">No records in the <code>{}</code> table for this source.</p>'.format(name)
table = '<h2 style="position:relative; bottom:-25px">{}</h2>'.format(name)+table
html_tables.append(table)
if 'photometry' in t:
phots = [[p['ra'],p['dec'],p['band'],'{}, {}'.format(p['ra'],p['dec']), '{} ({})'.format(p['magnitude'],p['magnitude_unc'])] for p in t['photometry']]
else:
phots = []
delta_ra = delta_dec = 0.025
sources = db.query("SELECT id,ra,dec,names FROM sources WHERE (ra BETWEEN {1}-{0} AND {1}+{0}) AND (dec BETWEEN {3}-{2} AND {3}+{2}) AND (ra<>{1} AND dec<>{3})".format(delta_ra, ra, delta_dec, dec), fmt='array')
if sources is None:
sources = []
warning = ''
if any(['d{}'.format(i) in comments for i in range(20)]):
warning = "Warning: This source is confused with its neighbors and the data listed below may not be trustworthy."
print(html_tables)
return render_template('inventory.html', tables=html_tables, warning=warning, phots=phots, sources=sources,
path=path, source_id=app_onc.vars['source_id'], name=objname, coords=coords, allnames=allnames,
distance=dist_string, comments=comments, sptypes=sptype_txt, ra=ra, dec=dec, simbad=smbd, vizier=vzr)
# Check Schema
# @app_onc.route('/schema.html', methods=['GET', 'POST'])
@app_onc.route('/schema', methods=['GET', 'POST'])
def onc_schema():
# db = astrodb.Database(db_file)
# Get table names and their structure
try:
table_names = db.query("SELECT name FROM sqlite_sequence", unpack=True)[0]
except:
table_names = db.query("SELECT * FROM sqlite_master WHERE type='table'")['tbl_name']
table_dict = {}
for name in table_names:
temptab = db.query('PRAGMA table_info('+name+')', fmt='table')
table_dict[name] = temptab
table_html = [[db.query("select count(id) from {}".format(x))[0][0], table_dict[x].to_pandas().to_html(classes=TABLE_CLASSES, index=False)] for x in sorted(table_dict.keys())]
titles = ['na']+sorted(table_dict.keys())
return render_template('schema.html', tables=table_html, titles=titles)
@app_onc.route('/browse', methods=['GET', 'POST'])
def onc_browse():
"""Examine the full source list with clickable links to object summaries"""
table = request.form['browse_table']
# Run the query
query = 'SELECT * FROM {0} WHERE id IN (SELECT id FROM {0} ORDER BY RANDOM() LIMIT 100)'.format(table)
t = db.query(query, fmt='table')
try:
script, div, warning_message = onc_skyplot(t)
except IOError:
script = div = warning_message = ''
# Convert to Pandas data frame
data = t.to_pandas()
data.index = data['id']
try:
sources = data[['ra','dec','source_id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.tolist()
sources = [[i[0], i[1], 'Source {}'.format(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
# Change column to a link
data = link_columns(data, db, ['id','source_id','spectrum','image', 'record'])
# Create checkbox first column
data = add_checkboxes(data)
cols = [strip_html(str(i)) for i in data.columns.tolist()[1:]]
cols = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".format(cols)
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if isinstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.format(repr(b)+","+repr(list(t[b])), b) for b in columns])
return render_template('results.html', table=data.to_html(classes='display', index=False).replace('<','<').replace('>','>'), query=query,
sources=sources, cols=cols, axes=axes)
def strip_html(s):
return re.sub(r'<[^<]*?/?>','',s)
def tbl2html(table, classes='', ids='', roles=''):
"""
Sloppily converts an astropy table to html (when mixin columns won't let you do table.)
"""
# Get header
columns = ''.join(['<th>{}</th>'.format(col) for col in table.colnames])
# Build table and header
out = "<table class='table {}' id='{}' role='{}'><thead>{}</thead><tbody>".format(classes,ids,roles,columns)
# Add rows
for row in np.array(table):
out += '<tr><td>'+'</td><td>'.join(list(map(str,row)))+'</td></tr>'
out += "</tbody></table>"
return out
def onc_skyplot(t):
"""
Create a sky plot of the database objects
"""
# Convert to Pandas data frame
data = t.to_pandas()
data.index = data['id']
script, div, warning_message = '', '', ''
if 'ra' in data and 'dec' in data:
# Remove objects without RA/Dec
num_missing = np.sum(pd.isnull(data.get('ra')))
if num_missing > 0:
warning_message = 'Note: {} objects had missing coordinate information and were removed.'.format(num_missing)
data = data[pd.notnull(data.get('ra'))]
else:
warning_message = ''
# Coerce to numeric
data['ra'] = | pd.to_numeric(data['ra']) | pandas.to_numeric |
import collections.abc
from pathlib import Path
import pandas as pd
import xml.etree.ElementTree as ET
from io import BytesIO
from typing import List, Union, Dict, Iterator
from pandas import DataFrame
from .types import UploadException, UploadedFile
from .config import column_names
import logging
logger = logging.getLogger(__name__)
class _BufferedUploadedFile(collections.abc.Mapping):
def __init__(self, file, name, description):
self.name = name
self.description = description
self.file = Path(file)
if not self.file.is_file():
raise FileNotFoundError(f"{self.file} not found.")
def __getitem__(self, k):
if k == "name":
return self.name
elif k == "description":
return self.description
elif k == "fileText":
with open(self.file, 'rb') as file:
return file.read()
else:
raise AttributeError(f'{k} not found')
def __len__(self) -> int:
return 3
def __iter__(self) -> Iterator:
pass
def read_from_text(raw_files: List[UploadedFile]) -> Dict[str, DataFrame]:
"""
Reads from a raw list of files passed from javascript. These files are of
the form e.g.
[
{name: 'filename.csv', fileText: <file contents>, description: <upload metadata>}
]
This function will try to catch most basic upload errors, and dispatch other errors
to either the csv or xml reader based on the file extension.
"""
if len(raw_files) == 0:
raise UploadException('No files uploaded!')
extensions = list(set([f["name"][-3:].lower() for f in raw_files]))
if len(extensions) != 1:
raise UploadException(f'Mix of CSV and XML files found ({extensions})! Please reupload.')
else:
if extensions[0] == 'csv':
return read_csvs_from_text(raw_files)
elif extensions[0] == 'xml':
return read_xml_from_text(raw_files[0]['fileText'])
else:
raise UploadException(f'Unknown file type {extensions[0]} found.')
def read_files(files: Union[str, Path]) -> List[UploadedFile]:
uploaded_files: List[_BufferedUploadedFile] = []
for filename in files:
uploaded_files.append(_BufferedUploadedFile(file=filename, name=filename, description="This year"))
return uploaded_files
def read_csvs_from_text(raw_files: List[UploadedFile]) -> Dict[str, DataFrame]:
def _get_file_type(df) -> str:
for table_name, expected_columns in column_names.items():
if set(df.columns) == set(expected_columns):
logger.info(f'Loaded {table_name} from CSV. ({len(df)} rows)')
return table_name
else:
raise UploadException(f'Failed to match provided data ({list(df.columns)}) to known column names!')
files = {}
for file_data in raw_files:
csv_file = BytesIO(file_data["fileText"])
df = pd.read_csv(csv_file)
file_name = _get_file_type(df)
if 'This year' in file_data['description']:
name = file_name
elif 'Prev year' in file_data['description']:
name = file_name + '_last'
else:
raise UploadException(f'Unrecognized file description {file_data["description"]}')
files[name] = df
return files
def read_xml_from_text(xml_string) -> Dict[str, DataFrame]:
header_df = []
episodes_df = []
uasc_df = []
oc2_df = []
oc3_df = []
ad1_df = []
reviews_df = []
sbpfa_df = []
prev_perm_df = []
missing_df = []
def read_data(table):
# The CHILDID tag needs to be renamed to CHILD to match the CSV
# The PL tag needs to be renamed to PLACE to match the CSV
conversions = {
'CHILDID': 'CHILD',
'PL': 'PLACE',
}
return {
conversions.get(node.tag, node.tag): node.text
for node in table.iter() if len(node) == 0
}
def get_fields_for_table(all_data, table_name):
def read_value(k):
val = all_data.get(k, None)
try:
val = int(val)
except:
pass
return val
return pd.Series({k: read_value(k) for k in column_names[table_name]})
for child in ET.fromstring(xml_string):
all_data = read_data(child)
header_df.append(get_fields_for_table(all_data, 'Header'))
if all_data.get('UASC', None) is not None:
uasc_df.append(get_fields_for_table(all_data, 'UASC'))
if all_data.get('IN_TOUCH', None) is not None:
oc3_df.append(get_fields_for_table(all_data, 'OC3'))
if all_data.get('DATE_INT', None) is not None:
ad1_df.append(get_fields_for_table(all_data, 'AD1'))
for table in child:
if table.tag == 'EPISODE':
data = read_data(table)
episodes_df.append(get_fields_for_table({**all_data, **data}, 'Episodes'))
elif table.tag == 'HEADER':
for child_table in table:
if child_table.tag == 'AREVIEW':
data = read_data(child_table)
reviews_df.append(get_fields_for_table({**all_data, **data}, 'Reviews'))
elif child_table.tag == 'AMISSING':
data = read_data(child_table)
missing_df.append(get_fields_for_table({**all_data, **data}, 'Missing'))
elif child_table.tag == 'OC2':
data = read_data(child_table)
oc2_df.append(get_fields_for_table({**all_data, **data}, 'OC2'))
elif child_table.tag == 'PERMANENCE':
data = read_data(child_table)
prev_perm_df.append(get_fields_for_table({**all_data, **data}, 'PrevPerm'))
elif child_table.tag == 'AD_PLACED':
data = read_data(child_table)
sbpfa_df.append(get_fields_for_table({**all_data, **data}, 'PlacedAdoption'))
data = {
'Header': pd.DataFrame(header_df),
'Episodes': pd.DataFrame(episodes_df),
'UASC': pd.DataFrame(uasc_df),
'Reviews': pd.DataFrame(reviews_df),
'OC2': pd.DataFrame(oc2_df),
'OC3': | pd.DataFrame(oc3_df) | pandas.DataFrame |
# Created by MeaningCloud Support Team
# Copyright 2020 MeaningCloud LLC
# Date: 23/02/2020
import sys
import os
import meaningcloud
import pandas as pd
# @param license_key - Your license key (found in the subscription section in https://www.meaningcloud.com/developer/)
license_key = '<<<your license key>>>'
# @param text_column - Name of the column where the texts will be
text_column = 'Review'
# auxiliary variables to follow progress of the process
index_count = 1
# Analyzes the text passed as a parameter
def analyzeText(text):
global index_count
print("Analyzing text " + str(index_count))
# this is where we are going to store our results
polarity = ''
entities = ''
concepts = ''
iab2 = ''
try:
# We are going to make a request to the Sentiment Analysis API
print("\tGetting sentiment analysis...")
sentiment_response = meaningcloud.SentimentResponse(meaningcloud.SentimentRequest(license_key, lang='en', txt=text, txtf='markup').sendReq())
if sentiment_response.isSuccessful():
polarity = sentiment_response.getGlobalScoreTag()
else:
print('Request to sentiment was not succesful: ' + sentiment_response.getStatusMsg())
# We are going to make a request to the Topics Extraction API
print("\tGetting entities and concepts...")
topics_req = meaningcloud.TopicsRequest(license_key, txt=text, lang='en', topicType='ec', otherparams={'txtf':'markup'})
topics_response = meaningcloud.TopicsResponse(topics_req.sendReq())
# If there are no errors in the request, we extract the entities and concepts
if topics_response.isSuccessful():
entities_list = topics_response.getEntities()
formatted_entities = []
if entities_list:
for entity in entities_list:
if int(topics_response.getTopicRelevance(entity)) >= 100: #we limit the entities to those with relevance higher than 100
formatted_entities.append(topics_response.getTopicForm(entity) + ' (' + topics_response.getTypeLastNode(topics_response.getOntoType(entity)) + ')')
entities = ', '.join(formatted_entities)
concepts_list = topics_response.getConcepts()
formatted_concepts = []
if concepts_list:
for concept in concepts_list:
if int(topics_response.getTopicRelevance(concept)) >= 100: #we limit the entities to those with relevance higher than 100
formatted_concepts.append(topics_response.getTopicForm(concept))
concepts = ', '.join(list(dict.fromkeys(formatted_concepts)))
else:
print('Request to topics was not succesful: ' + topics_response.getStatusMsg())
# We are going to make a request to the Deep Categorization API
print("\tGetting IAB 2.0 classification...")
deepcat_response = meaningcloud.DeepCategorizationResponse(meaningcloud.DeepCategorizationRequest(license_key, model='IAB_2.0_en', txt=text, otherparams={'txtf':'markup'}).sendReq())
if deepcat_response.isSuccessful():
categories = deepcat_response.getCategories()
iab2 = (', '.join(deepcat_response.getCategoryCode(cat) for cat in categories[:1])) if categories else ''
else:
print('Request to Deep Categorization was not succesful: ' + deepcat_response.getStatusMsg())
except ValueError:
e = sys.exc_info()[0]
print("\nException: " + str(e))
index_count += 1
return | pd.Series([polarity, entities, concepts, iab2]) | pandas.Series |
#__________________________________________________________________________________________________________________________________________________________
"""Working Code, Do Not Change"""
#__________________________________________________________________________________________________________________________________________________________
"""
This script reads the raw data from the RaDeCC text (.txt) files and the performs corrections and uncertainty propagations described by
Garcia-Solsona et al. (2008) (Marine Chemistry, 109, pp. 198-219) in order to quantify 223Ra and 224Ra. It also calculates the slope of
total counts per minute for later estimation of 226Ra via 222Rn ingrowth (Geibert et al. (2013), Limnol. Oceanogr. Methods, 11).
The input is a RaDeCC read file.
The output is a list of numbers, strings and lists including final CPMs and associated metadata.
"""
import os
import scipy.stats as sci
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from pathlib import Path
from get_digits import get_digits
from collections import Counter
def interval_calculator (list_, runtime): #This finds the counts within each interval. The range is length of list -1 as the last
list1 = [] #value of each list is the summary value.
if len(list_) >2:
for i in range (len(list_)-1):
if i == 0:
list1.append(list_[i]/(runtime[i]-0))
else:
list1.append(list_[i]/(runtime[i]-runtime[i-1]))
list1.append((list_[-1]-np.sum(list_[:-1]))/(runtime[-1]-runtime[-2]))
else:
list1.append(-999)
# print((list_[-1]-np.sum(list_[:-1]))/(runtime[-1]-runtime[-2]))
return (list1)
def cc_calculator (list1, list2, list3, cc_value): #calculates channel corrections
listx = []
for i in range (len(list1)):
listx.append((((list1[i] - list2[i] - list3[i])**2)*cc_value)/(1-((list1[i] - list2[i] - list3[i])*cc_value)))
return (listx)
def add_on_summary_line(cnt_list):
cnt_list_with_summary_values = cnt_list[:-1]
cnt_list_with_summary_values.append(cnt_list[-1]-np.sum(cnt_list[:-1]))
#print(cnt_list[-1], cnt_list_with_summary_values[-1])
return(cnt_list_with_summary_values)
def slope_calculator (output_directory, detector_dict, arg_file, spike_sensitivity, equilibration_time_variable, DDMMYYY_DateFormat, thstd = 'thstd', acstd = 'acstd', blank = 'blank'):
runtimecopy = []
CPM219copy = []
cnt219copy = []
CPM220copy = []
cnt220copy = []
CPMTotcopy = []
cntTotcopy = []
runtime = []
CPM219 = []
cnt219 = []
CPM220 = []
cnt220 = []
CPMTot = []
cntTot = []
spike_list = []
error_list = []
file_name = arg_file
spike = 0
#print (file_name)
#Open txt file
f = open(file_name, 'r')
print (file_name)
with open (file_name) as f:
#_________________________________________________________________________________________________________________________________________________
#RAW [RUNTIME, CPM219, cnt219, CPM220, cnt220, CPMTot, cntTot]____________________________________________________________________________________
for line in f:
line = line.replace('"','')
if line[0].isnumeric(): #Find lines containing data rather than titles
x = line.split() #Split data within each line#Split data within each line#Split data within each line
runtimecopy.append(float(x[0].replace(",", "."))) #Append data to appropriate variable lists
CPM219copy.append(float(x[1].replace(",", ".")))
cnt219copy.append(int(x[2].replace(",", ".")))
CPM220copy.append(float(x[3].replace(",", ".")))
cnt220copy.append(int(x[4].replace(",", ".")))
CPMTotcopy.append(float(x[5].replace(",", ".")))
cntTotcopy.append(float(x[6].replace(",", ".")))
#_______________________________________________________________________________________________________________________________________________
#START DATETIME_________________________________________________________________________________________________________________________________
# Find and extract the start date and time of the read from the radecc read file
if line[0:5] == 'Start':
date_time = pd.to_datetime(line [10:], dayfirst = DDMMYYY_DateFormat)
if len(runtimecopy)>1:
interval_length_mins = int(runtimecopy[1])
else:
interval_length_mins = -999
#Find and remove spikes in counts (here a spike is defined by a count that is more than 100 counts higher than the last count period as default)
spike_dict = {}
spike_list_219 = []
spike_list_220 = []
spike_list_tot = []
for i in range (len(cntTotcopy)):
if i != len(runtimecopy) - 1:
if (cnt219copy[i]-cnt219copy[i-1])>spike_sensitivity or (cnt220copy[i]-cnt220copy[i-1])>spike_sensitivity or (cntTotcopy[i]-cntTotcopy[i-1])>spike_sensitivity:
# print ('Spike detected and removed in file:',arg_file,'\ncnt219 cnts',cnt219copy[i],'\ncnt220 cnts',cnt220copy[i],'\ntot cnts :', cntTotcopy[i])
spike_dict.update({runtimecopy[i]:cntTotcopy[i]})
error_list.append('S1')
spike_list_219.append(cnt219copy[i])
spike_list_220.append(cnt220copy[i])
spike_list_tot.append(cntTotcopy[i])
else:
runtime.append(runtimecopy[i])
CPM219.append(CPM219copy[i])
cnt219.append(cnt219copy[i])
CPM220.append(CPM220copy[i])
cnt220.append(cnt220copy[i])
CPMTot.append(CPMTotcopy[i])
cntTot.append(cntTotcopy[i])
if i == len(runtimecopy) - 1:
runtime.append(runtimecopy[i])
CPM219.append(CPM219copy[i])
cnt219.append(cnt219copy[i])
CPM220.append(CPM220copy[i])
cnt220.append(cnt220copy[i])
CPMTot.append(CPMTotcopy[i])
cntTot.append(cntTotcopy[i])
f.close()
interval_length = round(runtime[1]-runtime[0])
# print(interval_length)
equilibration_number_of_intervals = int(equilibration_time_variable/interval_length)
#________________________________________________________________________________________________________________________________________________
#Data_quality_checks_(DIEGO-FELIU et al. 2020)____________________________________________________________________________________________________________
CR220219 = np.array(CPM220)/np.array(CPM219)
if acstd not in str(file_name) and thstd not in str(file_name):
for i in range(len(CPMTot)):
if CPMTot[i] < 200:
if CPMTot[i] > 100:
error_list.append('Err224_Tot100')
if CR220219[i] > 10:
error_list.append('Err223_CR10')
# print ('CR10')
if CR220219[i] < 10 and CR220219[i]>4:
if CPM220[i] > 5:
error_list.append('Err223_CPM220_5')
# print ('Err223_CPM220_5')
if CR220219[i] < 8 and CR220219[i] > 2:
if CPM219[i] > 1:
error_list.append('Err224_CPM219_1')
# print ('Err224_CPM219_1')
if CR220219[i] < 2:
error_list.append('Err224_CR2')
else:
error_list.append('Err223_Tot200')
error_list = dict(Counter(error_list))
for key in error_list.keys():
if key != 'S1':
error_list[key] = str(int(error_list[key]/len(CPMTot)*100))+'%'
# error_list = list(set(error_list))
#________________________________________________________________________________________________________________________________________________
#END DATETIME____________________________________________________________________________________________________________________________________
#calculate the end date and time of the read by adding the read-time to the start datetime
end_date_time = date_time + datetime.timedelta(minutes = runtime[-1])
#________________________________________________________________________________________________________________________________________________
#CPM220,CPM219,CPMTot for each 10 min time interval______________________________________________________________________________________________
#Calculate the counts per minute (CPM) for each time period (interval) for each channel, using interval_calculator
CPM219_interval = interval_calculator(cnt219, runtime)
CPM220_interval = interval_calculator(cnt220, runtime)
CPMTot_interval = interval_calculator(cntTot, runtime)
# #________________________________________________________________________________________________________________________________________________
# #CHANNEL CORRECTIONS ON EACH INTERVAL____________________________________________________________________________________________________________
y_220_cc = cc_calculator(CPMTot_interval, CPM220_interval, CPM219_interval, 0.01) #220 channel correction (value = 0.01)
CMP220_corr = []
for i in range (len(CPM220_interval)): #Find corrected 220 cpm
CMP220_corr.append(CPM220_interval[i] -y_220_cc[i])
#print (y_220_cc[i], CMP220_corr[i])
y_219_cc = cc_calculator(CPMTot_interval, CMP220_corr, CPM219_interval, 0.0000935) #219 channel correction (value = 0.0000935)
CPM219_corr = []
for i in range (len(CPM219_interval)):
CPM219_corr.append(CPM219_interval[i]-y_219_cc[i]) #Find corrected 219 cpm
CPM220_final = []
for i in range (len(CPM219_corr)):
CPM220_final.append(CMP220_corr[i] - ((((1.65*CPM219_corr[i])**2)*0.01)/(1-((1.65*CPM219_corr[i])*0.01)))) #Find Final 220 cpm
CPM219_final = []
for i in range(len(CPM219_corr)):
CPM219_final.append(CPM219_corr[i] - (CMP220_corr[i] * 0.0255)) #Find final 219CPM (corrected 219 cpm - (CMP220_corr[i] * 0.0255))
#print (np.average(y_219_cc))
CPMTot_corr = []
for i in range(len (CPMTot_interval)):
CPMTot_corr.append(CPMTot_interval[i] - 2*CPM220_final[i] - 2*CPM219_final[i]) #Find corrected Total CPM
#print (arg_file)
#________________________________________________________________________________________________________________________________________________
#CALCULATE LINEAR REGRESSION OF CPMTot_corr______________________________________________________________________________________________________
if thstd in str(file_name) or acstd in str(file_name) or blank in str(file_name):
# if len(runtime[:]) == len(CPMTot_interval[:]):
# slope = sci.linregress(runtime[:], CPMTot_interval[:])
# error_list['Standard_read'] = True
# # slope_220 = sci.linregress(runtime[:], CPM220_interval[:])
# # if slope_220[0]<-0.0001:
# # print ('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',slope_220[0])
# else:
# print ('\n***ERROR***\nThe read file ',arg_file,'does not contain enough data points to perform a linear regression: 222Rn ingrowth slope set to 999\n')
error_list['Standard_read'] = True
slope = [-999,-999,-999,-999,-999]
else:
if len(runtime[equilibration_number_of_intervals:-1]) == len(CPMTot_interval[equilibration_number_of_intervals:-1]) and float(runtime[-1]) > 590:
slope = sci.linregress(runtime[equilibration_number_of_intervals:-1], CPMTot_interval[equilibration_number_of_intervals:-1])
# slope_220 = sci.linregress(runtime[:], CPM220_interval[:])
# if slope_220[0]<-0.0001:
# print ('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',slope_220[0])
else:
# print ('\n***ERROR***\nThe read file ',arg_file,'does not contain enough data points to perform a linear regression: 222Rn ingrowth slope set to 999\n')
error_list['Err226_short_read'] = True
slope = [-999,-999,-999,-999,-999]
#________________________________________________________________________________________________________________________________________________
#Propagation of Uncertainties___________________________________________________________________________________________
cntTot = add_on_summary_line(cntTot)
cntTot[-1] = cntTot[-1] - np.sum(spike_list_tot)
cnt219 = add_on_summary_line(cnt219)
cnt219[-1] = cnt219[-1] - np.sum(spike_list_219)
cnt220 = add_on_summary_line(cnt220)
cnt220[-1] = cnt220[-1] - np.sum(spike_list_220)
# runtime = add_on_summary_line(runtime)
if np.sum(cnt219) == 0:
err_219 = 0
if np.sum(cnt219) != 0:
err_219 = np.sqrt(np.sum(cnt219))/np.sum(cnt219) #[-1] index is the final list value which is the summary line in the txt file
if np.sum(cnt220) == 0:
err_220 = 0
if np.sum(cnt220) != 0:
err_220 = np.sqrt(np.sum(cnt220))/np.sum(cnt220)
err_Tot = np.sqrt(np.sum(cnt220))/np.sum(cnt220)
cpm_219 = np.sum(cnt219)/(runtime[-1]-(interval_length_mins*len(spike_dict.keys())))
cnt219_abserr = err_219*cpm_219
cpm_220 = np.sum(cnt220)/(runtime[-1]-(interval_length_mins*len(spike_dict.keys())))
cnt220_abserr = err_220*cpm_220
cpm_Tot = np.sum(cntTot)/(runtime[-1]-(interval_length_mins*len(spike_dict.keys())))
cntTot_abserr = err_Tot*cpm_Tot
y_220 = (cpm_Tot-cpm_220-cpm_219)
y_220_err = (np.sqrt((cntTot_abserr)**2+(cnt220_abserr)**2+(cnt219_abserr)**2))
y220cc = ((y_220**2)*0.01)/(1-(y_220*0.01))
y220cc_err = y_220_err*(((2*0.01*y_220)-(0.01*y_220)**2)/(1-0.01*y_220)**2)
corr220 = cpm_220 - y220cc
corr220_err = np.sqrt(cnt220_abserr**2 +y220cc_err**2)
y_219 = (cpm_Tot-corr220-cpm_219)
y_219_err = (np.sqrt((cntTot_abserr)**2+(corr220_err)**2+(cnt219_abserr)**2))
y219cc = ((y_219**2)*0.000093)/(1-(y_219*0.000093))
y219cc_err = y_219_err*(((2*0.000093*y_219)-(0.000093*y_219)**2)/(1-0.000093*y_219)**2)
corr219 = cpm_219 - y219cc
corr219_err = np.sqrt(cnt219_abserr**2 +y219cc_err**2)
final219 = corr219 - (corr220*0.0255)
final219_err = np.sqrt(corr219_err**2 + (0.0255*corr220_err)**2)
final220 = corr220 - ((1.6*corr219)**2 * 0.01)/(1 + ((1.6*corr219)*0.01))
final220_err = np.sqrt(corr220_err**2 + ((((2*1.6)**2 *0.01*corr219-(1.6**3 * 0.01**2 * corr219**2))/(1-(1.6*0.01*corr219))**2)*corr219_err)**2)
# #________________________________________________________________________________________________________________________________________________
# #cnttotnet____(= cntTot - 2*cnt219 - 2*cnt220)___________________________________________________________________________________________________
# cnttotnet = cntTot[-1] - 2*cnt219[-1] - 2*cnt220[-1]
#________________________________________________________________________________________________________________________________________________
#erslope_abs__________________________________________________________________________________________________
errslope_rel = slope[4]
errslope_abs = slope[4]/slope[0]
# print (slope[0], slope[4]/slope[0], errslope_rel)
#________________________________________________________________________________________________________________________________________________
#Detector Name (detname), Cartridge type, Read Number_________________________________________________________________________________________________
try:
tempname=arg_file.parts
tempname2 = tempname[-1].split('-')
cart_type = tempname2[-3][-1].lower()
except:
cart_type = None
#find detector name
detname_checklist = []
for key in detector_dict.keys():
if key.lower() in tempname[-1].lower():
detname_checklist.append(key)
# print(detname_checklist)
if len(detname_checklist)>1:
detname = 'Err_MultipleDetectorNames'
if len(detname_checklist)==1:
detname = detname_checklist[0]
else:
detname = 'Err_DetectorNotFound'
# tempname3=tempname2[-1].split('.')
# detname=tempname3[0].lower()
#find read number
read_number = get_digits(tempname2[0])
base_dir = output_directory/'Read_Plots'
#print (base_dir)
sample_dir = Path(os.path.join(*arg_file.parts[-4:-1]))
if sample_dir.parts[-1].split('_')[-1] == 'folder':
sample_dir = Path('Standards_and_Blanks')/sample_dir.parts[-1]
read_name = arg_file.parts[-1].split('.')[0]
#print (sample_dir)
if (base_dir/(str(sample_dir)+'_plots')).exists() == False:
os.makedirs(base_dir/(str(sample_dir)+'_plots'))
if (base_dir/sample_dir/read_name).exists() == False:
if len(runtime)>3:
#fig = plt.figure()
# CPMTot_interval[-1] = CPMTot_interval[-1]-np.sum(CPMTot_interval[:-1])
ax = plt.subplot(111)
CPMTot_lineplot = ax.plot(runtime[:-1], CPMTot_interval[:-1], '-', label = 'Total CPM line')
CPMTot_scatter = ax.scatter(runtime[:-1], CPMTot_interval[:-1], label = 'Total CPM')
# if len(runtime[equilibration_number_of_intervals:]) == len(CPMTotcopy[equilibration_number_of_intervals:]) and len(CPMTot_corr[equilibration_number_of_intervals:])>3:
# ax.plot(runtime[0:equilibration_number_of_intervals], CPMTotcopy[0:equilibration_number_of_intervals], '-')
# ax.scatter(runtime[0:equilibration_number_of_intervals], CPMTotcopy[0:equilibration_number_of_intervals], label = 'Total CPM (Equilibration Time)')
CPM220_line_plot = ax.plot(runtime[:-1], CPM220_interval[:-1], '-', label = '220 CPM line')
CPM220_scatter = ax.scatter(runtime[:-1], CPM220_interval[:-1], label = '220 CPM')
CPM219_line_plot = ax.plot(runtime[:-1], CPM219_interval[:-1], '-', label = '219 CPM line')
CPM219_scatter = ax.scatter(runtime[:-1], CPM219_interval[:-1], label = '219 CPM')
ax.legend((CPMTot_scatter,CPM220_scatter, CPM219_scatter),('Total CPM', '220 CPM', '219 CPM'),
loc='upper center', bbox_to_anchor=(0.5, -0.15),
shadow=True, ncol=2)
plt.title(read_name)
plt.xlabel('Read Time (mins)')
plt.ylabel('Counts per minute (cpm)')
plt.savefig(base_dir/(str(sample_dir)+'_plots')/read_name, dpi = 250, bbox_inches = 'tight')
# plt.show()
plt.clf()
if len(spike_dict.keys())>0:
ax1 = plt.subplot(111)
total_counts_line = ax1.plot(runtimecopy[:-1], cntTotcopy[:-1], '-', label = 'Total Counts line')
total_counts_scatter = ax1.scatter(runtimecopy[:-1], cntTotcopy[:-1], label = 'Total Counts')
spike_scatter = ax1.scatter(spike_dict.keys(), spike_dict.values(), label = 'Removed Spike')
ax1.legend((total_counts_scatter, spike_scatter),('Total Counts line', 'Removed Spike'),loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2)
plt.title('Spike_Plot_'+read_name)
plt.xlabel('Read Time (mins)')
plt.ylabel('Counts')
plt.savefig(base_dir/(str(sample_dir)+'_plots')/('Spike_Plot_'+str(read_name)), dpi = 250, bbox_inches = 'tight')
plt.clf()
if cpm_Tot > 10:
error_list['Err226_cpm<10'] = True
if runtime[-1] > 10.0:
return (date_time, end_date_time, slope[0], slope[4], sum(cnt219), cnt219_abserr, sum(cnt220), cnt220_abserr, cpm_219, err_219, cpm_220,
err_220, cpm_Tot, err_Tot, y219cc, y219cc_err, y220cc, y220cc_err, corr219, corr219_err, corr220, corr220_err, final219, final220,
runtime[-1], final219_err, final220_err, cntTot_abserr, errslope_abs, detname, cart_type, read_number, spike_dict, error_list)
else:
# return (pd.to_datetime('01/01/1900 00:00:00'), pd.to_datetime('01/01/1900 00:00:00'), -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, -999, 'no_read', 'no_read', -999, -999, ['no read'])
return (pd.to_datetime('01/01/1900 00:00:00'), | pd.to_datetime('01/01/1900 00:00:00') | pandas.to_datetime |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write('\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file, output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError('The format ' + str(format) + ' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
save_tensor_csv(net.params[layerName][1].data, os.path.join(output_dir, layerName + '_bias.mtx'), shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
else:
raise ValueError('Unsupported number of parameters:' + str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(sc._jsc, deploy_file, caffemodel_file, output_dir, format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb, caffe, cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1,2,0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = | pd.concat([pd1, pd2], axis=1) | pandas.concat |
'''
permit.py
---------
This file collects raw building permit data, and summarizes the information
for each census tract. It is called by make_zone_facts.py
The resulting dataset from this file looks like:
zone_type | zone | construction_permits | total_permits
----------|---------|----------------------|--------------
tract | 000100 | 231 | 575
tract | 000201 | 2 | 6
tract | 000202 | 145 | 363
tract | 000300 | 102 | 351
tract | 000400 | 77 | 204
'''
from . import utils
import pandas as pd
def get_permit_for_year(path):
df = pd.read_csv(path)
df.columns = df.columns.str.lower()
df['construction_permits'] = df['permit_type_name'].apply(
lambda x: 1 if x == 'CONSTRUCTION' else 0)
df['total_permits'] = 1
# filter out permits from more than a year ago.
df = utils.filter_date(df, 'issue_date')
# Get census tract
df = utils.get_census_tract_for_data(df, 'longitude', 'latitude')
df = df[['tract', 'ward', 'neighborhoodcluster', 'construction_permits', 'total_permits']]
df = df.rename(columns={'neighborhoodcluster': 'neighborhood_cluster'})
df['neighborhood_cluster'] = utils.just_digits(df['neighborhood_cluster'])
return df
def get_permit_data():
paths = utils.get_paths_for_data('permits', years=utils.get_years())
df = pd.concat([get_permit_for_year(path) for path in paths])
data = []
for geo in ['tract', 'neighborhood_cluster', 'ward']:
temp = df.groupby(geo)[['construction_permits', 'total_permits']].sum()
temp['zone_type'] = geo
temp['zone'] = temp.index
data.append(temp)
return | pd.concat(data) | pandas.concat |
import numpy as np
import pandas as pd
attribute_dict = {}
a_file = open("attribute_names.txt")
for line in a_file:
key, value = line.strip('\n').split(":")
key = key.strip()
attribute_dict[key] = value
df_german_data = | pd.read_csv('german_data.csv', index_col=0) | pandas.read_csv |
from packaging.version import Version
from scprep.plot.histogram import _symlog_bins
from scprep.plot.jitter import _JitterParams
from scprep.plot.scatter import _ScatterParams
from tools import data
from tools import utils
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scprep
import sys
import unittest
def try_remove(filename):
try:
os.remove(filename)
except FileNotFoundError:
pass
def test_default_matplotlibrc():
for key in [
"axes.labelsize",
"axes.titlesize",
"figure.titlesize",
"legend.fontsize",
"legend.title_fontsize",
"xtick.labelsize",
"ytick.labelsize",
]:
assert scprep.plot.utils._is_default_matplotlibrc() is True
default = plt.rcParams[key]
plt.rcParams[key] = "xx-large"
assert scprep.plot.utils._is_default_matplotlibrc() is False
plt.rcParams[key] = default
assert scprep.plot.utils._is_default_matplotlibrc() is True
def test_parse_fontsize():
for key in [
"axes.labelsize",
"axes.titlesize",
"figure.titlesize",
"legend.fontsize",
"legend.title_fontsize",
"xtick.labelsize",
"ytick.labelsize",
]:
assert scprep.plot.utils.parse_fontsize("x-large", "large") == "x-large"
assert scprep.plot.utils.parse_fontsize(None, "large") == "large"
default = plt.rcParams[key]
plt.rcParams[key] = "xx-large"
assert scprep.plot.utils.parse_fontsize("x-large", "large") == "x-large"
assert scprep.plot.utils.parse_fontsize(None, "large") is None
plt.rcParams[key] = default
assert scprep.plot.utils.parse_fontsize("x-large", "large") == "x-large"
assert scprep.plot.utils.parse_fontsize(None, "large") == "large"
def test_generate_colorbar_str():
cb = scprep.plot.tools.generate_colorbar(cmap="viridis")
assert cb.cmap.name == "viridis"
def test_generate_colorbar_colormap():
cb = scprep.plot.tools.generate_colorbar(cmap=plt.cm.viridis)
assert cb.cmap.name == "viridis"
def test_generate_colorbar_list():
cb = scprep.plot.tools.generate_colorbar(cmap=["red", "blue"])
assert cb.cmap.name == "scprep_custom_cmap"
def test_generate_colorbar_dict():
if Version(matplotlib.__version__) >= Version("3.2"):
errtype = ValueError
msg = "is not a valid value for name; supported values are"
else:
errtype = TypeError
msg = "unhashable type: 'dict'"
utils.assert_raises_message(
errtype,
msg,
scprep.plot.tools.generate_colorbar,
cmap={"+": "r", "-": "b"},
)
def test_tab30():
cmap = scprep.plot.colors.tab30()
np.testing.assert_array_equal(
cmap.colors[:15],
np.array(matplotlib.cm.tab20c.colors)[
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18]
],
)
np.testing.assert_array_equal(
cmap.colors[15:],
np.array(matplotlib.cm.tab20b.colors)[
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18]
],
)
def test_tab40():
cmap = scprep.plot.colors.tab40()
np.testing.assert_array_equal(cmap.colors[:20], matplotlib.cm.tab20c.colors)
np.testing.assert_array_equal(cmap.colors[20:], matplotlib.cm.tab20b.colors)
def test_tab10_continuous():
cmap = scprep.plot.colors.tab10_continuous(n_colors=10, n_step=2, reverse=True)
np.testing.assert_allclose(
cmap.colors,
np.hstack([matplotlib.cm.tab20.colors, np.ones((20, 1))]),
atol=0.06,
)
def test_tab10_continuous_no_reverse():
cmap = scprep.plot.colors.tab10_continuous(n_colors=10, n_step=2, reverse=False)
colors = np.array(cmap.colors)
for i in range(len(colors) // 2):
tmp = np.array(colors[2 * i])
colors[2 * i] = colors[2 * i + 1]
colors[2 * i + 1] = tmp
np.testing.assert_allclose(
colors, np.hstack([matplotlib.cm.tab20.colors, np.ones((20, 1))]), atol=0.06
)
def test_tab10_continuous_invalid_n_colors():
utils.assert_raises_message(
ValueError,
"Expected 0 < n_colors <= 10. Got 0",
scprep.plot.colors.tab10_continuous,
n_colors=0,
)
utils.assert_raises_message(
ValueError,
"Expected 0 < n_colors <= 10. Got 11",
scprep.plot.colors.tab10_continuous,
n_colors=11,
)
utils.assert_raises_message(
ValueError,
"Expected n_step >= 2. Got 1",
scprep.plot.colors.tab10_continuous,
n_step=1,
)
def test_tab_exact():
assert scprep.plot.colors.tab(1) is plt.cm.tab10
np.testing.assert_array_equal(
scprep.plot.colors.tab(10).colors, plt.cm.tab10.colors
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(20).colors, plt.cm.tab20.colors
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(30).colors, scprep.plot.colors.tab30().colors
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(40).colors, scprep.plot.colors.tab40().colors
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(50).colors,
scprep.plot.colors.tab10_continuous(n_colors=10, n_step=5).colors,
)
def test_tab_first10():
np.testing.assert_array_equal(
scprep.plot.colors.tab(19).colors[:10], plt.cm.tab10.colors
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(29).colors[:10], scprep.plot.colors.tab30().colors[::3]
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(39).colors[:10], scprep.plot.colors.tab40().colors[::4]
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(49).colors[:10],
scprep.plot.colors.tab10_continuous(n_colors=10, n_step=5).colors[::5],
)
def test_tab_first20():
np.testing.assert_array_equal(
scprep.plot.colors.tab(29).colors[10:20],
scprep.plot.colors.tab30().colors[1::3],
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(39).colors[10:20],
scprep.plot.colors.tab40().colors[1::4],
)
def test_tab_first30():
np.testing.assert_array_equal(
scprep.plot.colors.tab(39).colors[20:30],
scprep.plot.colors.tab40().colors[2::4],
)
def test_tab_overhang():
np.testing.assert_array_equal(
scprep.plot.colors.tab(9).colors, plt.cm.tab10.colors[:9]
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(19).colors[10:], plt.cm.tab20.colors[1:-1:2]
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(29).colors[20:],
scprep.plot.colors.tab30().colors[2:-1:3],
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(39).colors[30:],
scprep.plot.colors.tab40().colors[3:-1:4],
)
np.testing.assert_array_equal(
scprep.plot.colors.tab(49).colors[40:],
scprep.plot.colors.tab10_continuous(n_colors=10, n_step=5).colors[4:-1:5],
)
def test_tab_invalid():
utils.assert_raises_message(
ValueError, "Expected n >= 1. Got 0", scprep.plot.colors.tab, n=0
)
def test_is_color_array_none():
assert not scprep.plot.utils._is_color_array(None)
def test_symlog_bins():
# all negative
assert np.all(_symlog_bins(-10, -1, 1, 10) < 0)
# all positive
assert np.all(_symlog_bins(1, 10, 1, 10) > 0)
# ends at zero
assert np.all(_symlog_bins(-10, 0, 1, 10) <= 0)
assert _symlog_bins(-10, 0, 1, 10)[-1] == 0
# starts at zero
assert np.all(_symlog_bins(0, 10, 1, 10) >= 0)
assert _symlog_bins(0, 10, 1, 10)[0] == 0
# identically zero
assert np.all(_symlog_bins(0, 0, 0.1, 10) == [-1, -0.1, 0.1, 1])
class TestScatterParams(unittest.TestCase):
@classmethod
def setUpClass(self):
self.X = np.random.normal(0, 1, [500, 4])
self.x = self.X[:, 0]
self.y = self.X[:, 1]
self.z = self.X[:, 2]
self.c = self.X[:, 3]
self.array_c = np.vstack([self.c, self.c, self.c, self.c]).T
self.array_c = self.array_c - np.min(self.array_c)
self.array_c = self.array_c / np.max(self.array_c)
def test_size(self):
params = _ScatterParams(x=self.x, y=self.y)
assert params.size == len(self.x)
def test_plot_idx_shuffle(self):
params = _ScatterParams(
x=self.x, y=self.y, z=self.z, c=self.c, s=np.abs(self.x)
)
assert not np.all(params.plot_idx == np.arange(params.size))
np.testing.assert_equal(params.x, self.x[params.plot_idx])
np.testing.assert_equal(params.y, self.y[params.plot_idx])
np.testing.assert_equal(params.z, self.z[params.plot_idx])
np.testing.assert_equal(params.c, self.c[params.plot_idx])
np.testing.assert_equal(params.s, np.abs(self.x)[params.plot_idx])
def test_plot_idx_no_shuffle(self):
params = _ScatterParams(
x=self.x, y=self.y, z=self.z, c=self.c, s=np.abs(self.x), shuffle=False
)
np.testing.assert_equal(params.plot_idx, np.arange(params.size))
np.testing.assert_equal(params.x, self.x)
np.testing.assert_equal(params.y, self.y)
np.testing.assert_equal(params.z, self.z)
np.testing.assert_equal(params.c, self.c)
np.testing.assert_equal(params.s, np.abs(self.x))
def test_plot_idx_mask(self):
params = _ScatterParams(
x=self.x, y=self.y, z=self.z, c=self.c, mask=self.x > 0, shuffle=False
)
np.testing.assert_equal(params.plot_idx, np.arange(params.size)[self.x > 0])
np.testing.assert_equal(params.x, self.x[self.x > 0])
np.testing.assert_equal(params.y, self.y[self.x > 0])
np.testing.assert_equal(params.z, self.z[self.x > 0])
np.testing.assert_equal(params.c, self.c[self.x > 0])
def test_plot_idx_mask_shuffle(self):
params = _ScatterParams(x=self.x, y=self.y, mask=self.x > 0, shuffle=True)
np.testing.assert_equal(
np.sort(params.plot_idx), np.arange(params.size)[self.x > 0]
)
assert np.all(params.x > 0)
def test_data_int(self):
params = _ScatterParams(x=1, y=2)
np.testing.assert_equal(params._data, [np.array([1]), np.array([2])])
assert params.subplot_kw == {}
def test_data_2d(self):
params = _ScatterParams(x=self.x, y=self.y)
np.testing.assert_equal(params._data, [self.x, self.y])
np.testing.assert_equal(
params.data, [self.x[params.plot_idx], self.y[params.plot_idx]]
)
assert params.subplot_kw == {}
def test_data_3d(self):
params = _ScatterParams(x=self.x, y=self.y, z=self.z)
np.testing.assert_equal(params._data, [self.x, self.y, self.z])
np.testing.assert_equal(
params.data,
[self.x[params.plot_idx], self.y[params.plot_idx], self.z[params.plot_idx]],
)
assert params.subplot_kw == {"projection": "3d"}
def test_s_default(self):
params = _ScatterParams(x=self.x, y=self.y)
assert params.s == 200 / np.sqrt(params.size)
def test_s_given(self):
params = _ScatterParams(x=self.x, y=self.y, s=3)
assert params.s == 3
def test_c_none(self):
params = _ScatterParams(x=self.x, y=self.y)
assert params.constant_c()
assert not params.array_c()
assert params.discrete is None
assert params.legend is False
assert params.vmin is None
assert params.vmax is None
assert params.cmap is None
assert params.cmap_scale is None
assert params.extend is None
def test_constant_c(self):
params = _ScatterParams(x=self.x, y=self.y, c="blue")
assert params.constant_c()
assert not params.array_c()
assert params.discrete is None
assert params.legend is False
assert params.vmin is None
assert params.vmax is None
assert params.cmap is None
assert params.cmap_scale is None
assert params.extend is None
assert params.labels is None
def test_array_c(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.array_c)
assert params.array_c()
assert not params.constant_c()
np.testing.assert_array_equal(params.x, params._x[params.plot_idx])
np.testing.assert_array_equal(params.y, params._y[params.plot_idx])
np.testing.assert_array_equal(params.c, params._c[params.plot_idx])
assert params.discrete is None
assert params.legend is False
assert params.vmin is None
assert params.vmax is None
assert params.cmap is None
assert params.cmap_scale is None
assert params.extend is None
assert params.labels is None
def test_continuous(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c)
assert not params.array_c()
assert not params.constant_c()
np.testing.assert_array_equal(params.x, params._x[params.plot_idx])
np.testing.assert_array_equal(params.y, params._y[params.plot_idx])
np.testing.assert_array_equal(params.c, params._c[params.plot_idx])
assert params.discrete is False
assert params.legend is True
assert params.cmap_scale == "linear"
assert params.cmap is plt.cm.inferno
params = _ScatterParams(
x=self.x, y=self.y, discrete=False, c=np.round(self.c % 1, 1)
)
assert not params.array_c()
assert not params.constant_c()
np.testing.assert_array_equal(params.x, params._x[params.plot_idx])
np.testing.assert_array_equal(params.y, params._y[params.plot_idx])
np.testing.assert_array_equal(params.c, params._c[params.plot_idx])
assert params.discrete is False
assert params.legend is True
assert params.labels is None
assert params.cmap_scale == "linear"
assert params.cmap is plt.cm.inferno
def test_discrete_tab10(self):
params = _ScatterParams(x=self.x, y=self.y, c=np.where(self.c > 0, "+", "-"))
assert not params.array_c()
assert not params.constant_c()
np.testing.assert_array_equal(params.x, params._x[params.plot_idx])
np.testing.assert_array_equal(params.y, params._y[params.plot_idx])
np.testing.assert_array_equal(params.c, params.c_discrete[params.plot_idx])
assert params.discrete is True
assert params.legend is True
assert params.vmin is None
assert params.vmax is None
assert params.cmap_scale is None
np.testing.assert_equal(params.cmap.colors, plt.cm.tab10.colors[:2])
def test_discrete_tab20(self):
params = _ScatterParams(x=self.x, y=self.y, c=10 * np.round(self.c % 1, 1))
assert not params.array_c()
assert not params.constant_c()
assert params.discrete is True
assert params.legend is True
assert params.vmin is None
assert params.vmax is None
assert params.cmap_scale is None
assert params.extend is None
assert isinstance(params.cmap, matplotlib.colors.ListedColormap)
np.testing.assert_equal(params.cmap.colors[:10], plt.cm.tab10.colors)
np.testing.assert_equal(
params.cmap.colors[10:],
plt.cm.tab20.colors[1 : 1 + (len(params.cmap.colors) - 10) * 2 : 2],
)
def test_continuous_less_than_20(self):
params = _ScatterParams(x=self.x, y=self.y, c=np.round(self.c % 1, 1))
assert not params.array_c()
assert not params.constant_c()
assert params.discrete is False
assert params.legend is True
assert params.vmin == 0
assert params.vmax == 1
assert params.cmap_scale == "linear"
assert params.extend == "neither"
assert params.cmap is matplotlib.cm.inferno
def test_continuous_tab20_str(self):
params = _ScatterParams(
x=self.x, y=self.y, discrete=False, cmap="tab20", c=np.round(self.c % 1, 1)
)
assert params.cmap is plt.cm.tab20
def test_continuous_tab20_obj(self):
params = _ScatterParams(
x=self.x,
y=self.y,
discrete=False,
cmap=plt.get_cmap("tab20"),
c=np.round(self.c % 1, 1),
)
assert params.cmap is plt.cm.tab20
def test_discrete_dark2(self):
params = _ScatterParams(
x=self.x,
y=self.y,
discrete=True,
cmap="Dark2",
c=np.where(self.c > 0, "+", "-"),
)
assert not params.array_c()
assert not params.constant_c()
assert params.discrete is True
assert params.legend is True
assert params.vmin is None
assert params.vmax is None
assert params.cmap_scale is None
assert params.extend is None
assert isinstance(params.cmap, matplotlib.colors.ListedColormap)
np.testing.assert_equal(params.cmap.colors, plt.cm.Dark2.colors[:2])
def test_c_discrete(self):
c = np.where(self.c > 0, "a", "b")
params = _ScatterParams(x=self.x, y=self.y, c=c)
np.testing.assert_equal(params.c_discrete, np.where(c == "a", 0, 1))
np.testing.assert_equal(params.labels, ["a", "b"])
def test_legend(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, legend=False)
assert params.legend is False
params = _ScatterParams(x=self.x, y=self.y, c=self.c, colorbar=False)
assert params.legend is False
def test_vmin_given(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, vmin=0)
assert params.vmin == 0
def test_vmin_default(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c)
assert params.vmin == np.min(self.c)
def test_vmax_given(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, vmax=0)
assert params.vmax == 0
def test_vmax_default(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c)
assert params.vmax == np.max(self.c)
def test_list_cmap(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, cmap=["red", "black"])
assert params.list_cmap()
np.testing.assert_equal(params.cmap([0, 255]), [[1, 0, 0, 1], [0, 0, 0, 1]])
def test_dict_cmap_fwd(self):
params = _ScatterParams(
x=self.x,
y=self.y,
c=np.where(self.c > 0, "+", "-"),
cmap={"+": "k", "-": "r"},
)
assert not params.list_cmap()
if sys.version_info[1] > 5:
np.testing.assert_equal(params.cmap.colors, [[0, 0, 0, 1], [1, 0, 0, 1]])
assert np.all(params._labels == np.array(["+", "-"]))
else:
try:
np.testing.assert_equal(
params.cmap.colors, [[0, 0, 0, 1], [1, 0, 0, 1]]
)
assert np.all(params._labels == np.array(["+", "-"]))
except AssertionError:
np.testing.assert_equal(
params.cmap.colors, [[1, 0, 0, 1], [0, 0, 0, 1]]
)
assert np.all(params._labels == np.array(["-", "+"]))
def test_dict_cmap_rev(self):
params = _ScatterParams(
x=self.x,
y=self.y,
c=np.where(self.c > 0, "+", "-"),
cmap={"-": "k", "+": "r"},
)
if sys.version_info[1] > 5:
np.testing.assert_equal(params.cmap.colors, [[0, 0, 0, 1], [1, 0, 0, 1]])
assert np.all(params._labels == np.array(["-", "+"]))
else:
try:
np.testing.assert_equal(
params.cmap.colors, [[0, 0, 0, 1], [1, 0, 0, 1]]
)
assert np.all(params._labels == np.array(["-", "+"]))
except AssertionError:
np.testing.assert_equal(
params.cmap.colors, [[1, 0, 0, 1], [0, 0, 0, 1]]
)
assert np.all(params._labels == np.array(["+", "-"]))
def test_dict_cmap_constant(self):
params = _ScatterParams(
x=self.x,
y=self.y,
c=np.full_like(self.c, "+", dtype=str),
cmap={"-": "k", "+": "r"},
)
np.testing.assert_equal(params.cmap.colors, [[1, 0, 0, 1]])
assert np.all(params._labels == np.array(["+"]))
def test_cmap_given(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, cmap="viridis")
assert params.cmap is matplotlib.cm.viridis
assert not params.list_cmap()
def test_cmap_scale_symlog(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, cmap_scale="symlog")
assert params.cmap_scale == "symlog"
assert isinstance(params.norm, matplotlib.colors.SymLogNorm), params.norm
def test_cmap_scale_log(self):
params = _ScatterParams(
x=self.x, y=self.y, c=np.abs(self.c) + 1, cmap_scale="log"
)
assert params.cmap_scale == "log"
assert isinstance(params.norm, matplotlib.colors.LogNorm), params.norm
def test_cmap_scale_sqrt(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, cmap_scale="sqrt")
assert params.cmap_scale == "sqrt"
assert isinstance(params.norm, matplotlib.colors.PowerNorm), params.norm
assert params.norm.gamma == 0.5
def test_extend(self):
params = _ScatterParams(x=self.x, y=self.y, c=self.c, vmin=np.mean(self.c))
assert params.extend == "min"
params = _ScatterParams(x=self.x, y=self.y, c=self.c, vmax=np.mean(self.c))
assert params.extend == "max"
params = _ScatterParams(
x=self.x,
y=self.y,
c=self.c,
vmin=(np.min(self.c) + np.mean(self.c)) / 2,
vmax=(np.max(self.c) + np.mean(self.c)) / 2,
)
assert params.extend == "both"
params = _ScatterParams(x=self.x, y=self.y, c=self.c)
assert params.extend == "neither"
def test_check_vmin_vmax(self):
utils.assert_warns_message(
UserWarning,
"Cannot set `vmin` or `vmax` with constant `c=None`. "
"Setting `vmin = vmax = None`.",
_ScatterParams,
x=self.x,
y=self.y,
vmin=0,
)
utils.assert_warns_message(
UserWarning,
"Cannot set `vmin` or `vmax` with discrete data. " "Setting to `None`.",
_ScatterParams,
x=self.x,
y=self.y,
c=np.where(self.c > 0, "+", "-"),
vmin=0,
)
def test_check_legend(self):
utils.assert_raises_message(
ValueError,
"Received conflicting values for synonyms "
"`legend=True` and `colorbar=False`",
_ScatterParams,
x=self.x,
y=self.y,
legend=True,
colorbar=False,
)
utils.assert_warns_message(
UserWarning,
"`c` is a color array and cannot be used to create a "
"legend. To interpret these values as labels instead, "
"provide a `cmap` dictionary with label-color pairs.",
_ScatterParams,
x=self.x,
y=self.y,
c=self.array_c,
legend=True,
)
utils.assert_warns_message(
UserWarning,
"Cannot create a legend with constant `c=None`",
_ScatterParams,
x=self.x,
y=self.y,
c=None,
legend=True,
)
def test_check_size(self):
utils.assert_raises_message(
ValueError,
"Expected all axes of data to have the same length" ". Got [500, 100]",
_ScatterParams,
x=self.x,
y=self.y[:100],
)
utils.assert_raises_message(
ValueError,
"Expected all axes of data to have the same length" ". Got [500, 500, 100]",
_ScatterParams,
x=self.x,
y=self.y,
z=self.z[:100],
)
def test_check_c(self):
utils.assert_raises_message(
ValueError,
"Expected c of length 500 or 1. Got 100",
_ScatterParams,
x=self.x,
y=self.y,
c=self.c[:100],
)
def test_check_discrete(self):
utils.assert_raises_message(
ValueError,
"Cannot treat non-numeric data as continuous.",
_ScatterParams,
x=self.x,
y=self.y,
c=np.where(self.c > 0, "+", "-"),
discrete=False,
)
def test_check_cmap(self):
utils.assert_raises_message(
ValueError,
"Expected list-like `c` with dictionary cmap." " Got <class 'str'>",
_ScatterParams,
x=self.x,
y=self.y,
c="black",
cmap={"+": "k", "-": "r"},
)
utils.assert_raises_message(
ValueError,
"Cannot use dictionary cmap with " "continuous data.",
_ScatterParams,
x=self.x,
y=self.y,
c=self.c,
discrete=False,
cmap={"+": "k", "-": "r"},
)
utils.assert_raises_message(
ValueError,
"Dictionary cmap requires a color "
"for every unique entry in `c`. "
"Missing colors for [+]",
_ScatterParams,
x=self.x,
y=self.y,
c=np.where(self.c > 0, "+", "-"),
cmap={"-": "r"},
)
utils.assert_raises_message(
ValueError,
"Expected list-like `c` with list cmap. " "Got <class 'str'>",
_ScatterParams,
x=self.x,
y=self.y,
c="black",
cmap=["k", "r"],
)
def test_check_cmap_scale(self):
utils.assert_warns_message(
UserWarning,
"Cannot use non-linear `cmap_scale` with " "`c` as a color array.",
_ScatterParams,
x=self.x,
y=self.y,
c=self.array_c,
cmap_scale="log",
)
utils.assert_warns_message(
UserWarning,
"Cannot use non-linear `cmap_scale` with constant " "`c=black`.",
_ScatterParams,
x=self.x,
y=self.y,
c="black",
cmap_scale="log",
)
utils.assert_warns_message(
UserWarning,
"Cannot use non-linear `cmap_scale` with discrete data.",
_ScatterParams,
x=self.x,
y=self.y,
cmap_scale="log",
c=np.where(self.c > 0, "+", "-"),
)
def test_series_labels(self):
params = _ScatterParams(x=pd.Series(self.x, name="x"), y=self.y, c=self.c)
assert params.xlabel == "x"
assert params.ylabel is None
assert params.zlabel is None
params = _ScatterParams(x=self.x, y=pd.Series(self.y, name="y"), c=self.c)
assert params.xlabel is None
assert params.ylabel == "y"
assert params.zlabel is None
params = _ScatterParams(
x=self.x, y=self.y, z=pd.Series(self.y, name="z"), c=self.c
)
assert params.xlabel is None
assert params.ylabel is None
assert params.zlabel == "z"
# xlabel overrides series
params = _ScatterParams(
x=pd.Series(self.x, name="x"), y=self.y, c=self.c, xlabel="y"
)
assert params.xlabel == "y"
assert params.ylabel is None
assert params.zlabel is None
# label_prefix overrides series
params = _ScatterParams(
x=pd.Series(self.x, name="x"), y=self.y, c=self.c, label_prefix="y"
)
assert params.xlabel == "y1"
assert params.ylabel == "y2"
assert params.zlabel is None
# xlabel overrides label_prefix
params = _ScatterParams(
x=pd.Series(self.x, name="x"),
y=self.y,
z=self.y,
c=self.c,
label_prefix="y",
xlabel="test",
)
assert params.xlabel == "test"
assert params.ylabel == "y2"
assert params.zlabel == "y3"
def test_jitter_x(self):
params = _JitterParams(x=np.where(self.x > 0, "+", "-"), y=self.y)
np.testing.assert_array_equal(params.x_labels, ["+", "-"])
np.testing.assert_array_equal(
params.x_coords, np.where(self.x > 0, 0, 1)[params.plot_idx]
)
class Test10X(unittest.TestCase):
@classmethod
def setUpClass(self):
self.X = data.load_10X(sparse=False)
self.X_filt = scprep.filter.filter_empty_cells(self.X)
self.X_pca, self.S = scprep.reduce.pca(
scprep.utils.toarray(self.X), n_components=10, return_singular_values=True
)
@classmethod
def tearDownClass(self):
try_remove("test.png")
try_remove("test.gif")
try_remove("test.mp4")
try_remove("test_jitter.png")
try_remove("test_histogram.png")
try_remove("test_library_size.png")
try_remove("test_variable_genes.png")
try_remove("test_gene_expression.png")
try_remove("test_scree.png")
def tearDown(self):
plt.close("all")
def test_histogram(self):
scprep.plot.plot_library_size(self.X_filt, cutoff=1000, log=True)
scprep.plot.plot_library_size(
self.X_filt, cutoff=1000, log=True, xlabel="x label", ylabel="y label"
)
def test_histogram_list_of_lists(self):
scprep.plot.plot_library_size(scprep.utils.toarray(self.X_filt).tolist())
def test_histogram_array(self):
scprep.plot.plot_library_size(scprep.utils.toarray(self.X_filt))
def test_histogram_multiple(self):
scprep.plot.histogram(
[scprep.select.select_rows(self.X, idx=0), [1, 2, 2, 2, 3]],
color=["r", "b"],
label=["one", "two"],
)
def test_histogram_multiple_cutoff(self):
scprep.plot.plot_library_size(self.X_filt, cutoff=[500, 1000], log=True)
def test_histogram_multiple_percentile(self):
scprep.plot.plot_library_size(self.X_filt, percentile=[10, 90], log=True)
def test_histogram_log_negative_min(self):
scprep.plot.histogram([-1, 1, 1, 1], log="x")
scprep.plot.histogram([-1, 1, 1, 1], log=True)
scprep.plot.histogram([-1, -0.1, -0.1, 1], log="x")
scprep.plot.histogram([-1, -0.1, -0.1, 1], log=True)
def test_histogram_log_negative_max(self):
scprep.plot.histogram([-1, -1, -1, -1], log="x")
scprep.plot.histogram([-1, -1, -1, -1], log=True)
scprep.plot.histogram([-1, -1, -1, -2], log="x")
scprep.plot.histogram([-1, -1, -1, -2], log=True)
def test_histogram_log_zero_min(self):
scprep.plot.histogram([0, 1, 1, 1], log="x")
scprep.plot.histogram([0, 1, 1, 1], log=True)
scprep.plot.histogram([0, 0, -0.1, 1], log="x")
scprep.plot.histogram([0, 0, -0.1, 1], log=True)
def test_histogram_log_zero_max(self):
scprep.plot.histogram([-1, -1, 0, -1], log="x")
scprep.plot.histogram([-1, -1, 0, -1], log=True)
scprep.plot.histogram([-1, -1, 0, -2], log="x")
scprep.plot.histogram([-1, -1, 0, -2], log=True)
def test_plot_library_size_multiple(self):
scprep.plot.plot_library_size(
[
self.X_filt,
scprep.select.select_rows(
self.X_filt, idx=np.arange(self.X_filt.shape[0] // 2)
),
],
color=["r", "b"],
filename="test_library_size.png",
)
assert os.path.exists("test_library_size.png")
def test_plot_gene_set_expression_multiple(self):
scprep.plot.plot_gene_set_expression(
[
self.X,
scprep.select.select_rows(self.X, idx=np.arange(self.X.shape[0] // 2)),
],
starts_with="D",
color=["r", "b"],
)
def test_gene_set_expression_list_of_lists(self):
scprep.plot.plot_gene_set_expression(
scprep.utils.toarray(self.X).tolist(), genes=[0, 1]
)
def test_gene_set_expression_array(self):
scprep.plot.plot_gene_set_expression(scprep.utils.toarray(self.X), genes=[0, 1])
def test_plot_gene_set_expression_single_gene(self):
scprep.plot.plot_gene_set_expression(
self.X, color=["red"], genes="Arl8b", filename="test_gene_expression.png"
)
assert os.path.exists("test_gene_expression.png")
def test_plot_variable_genes(self):
scprep.plot.plot_gene_variability(self.X, filename="test_variable_genes.png")
assert os.path.exists("test_variable_genes.png")
def test_variable_genes_list_of_lists(self):
scprep.plot.plot_gene_variability(scprep.utils.toarray(self.X).tolist())
def test_histogram_single_gene_dataframe(self):
scprep.plot.histogram(
scprep.select.select_cols(self.X, idx=["Arl8b"]), color=["red"]
)
def test_histogram_single_gene_series(self):
scprep.plot.histogram(
scprep.select.select_cols(self.X, idx="Arl8b"), color=["red"]
)
def test_histogram_custom_axis(self):
fig, ax = plt.subplots()
scprep.plot.plot_gene_set_expression(
self.X,
genes=scprep.select.get_gene_set(self.X, starts_with="D"),
percentile=90,
log="y",
ax=ax,
title="histogram",
filename="test_histogram.png",
)
assert os.path.exists("test_histogram.png")
assert ax.get_title() == "histogram"
def test_histogram_invalid_axis(self):
utils.assert_raises_message(
TypeError,
"Expected ax as a matplotlib.axes.Axes. Got ",
scprep.plot.plot_library_size,
self.X,
ax="invalid",
)
def test_scree(self):
ax = scprep.plot.scree_plot(self.S)
assert all([t == int(t) for t in ax.get_xticks()]), ax.get_xticks()
ax = scprep.plot.scree_plot(
self.S,
cumulative=True,
xlabel="x label",
ylabel="y label",
filename="test_scree.png",
)
assert all([t == int(t) for t in ax.get_xticks()]), ax.get_xticks()
assert os.path.isfile("test_scree.png")
def test_scree_custom_axis(self):
fig, ax = plt.subplots()
scprep.plot.scree_plot(self.S, ax=ax)
assert all([t == int(t) for t in ax.get_xticks()]), ax.get_xticks()
def test_scree_invalid_axis(self):
utils.assert_raises_message(
TypeError,
"Expected ax as a matplotlib.axes.Axes. Got ",
scprep.plot.scree_plot,
self.S,
ax="invalid",
)
def test_scatter_continuous(self):
scprep.plot.scatter2d(
self.X_pca, c=self.X_pca[:, 0], legend_title="test", title="title test"
)
def test_scatter2d_one_point(self):
scprep.plot.scatter2d(self.X_pca[0], c=["red"])
def test_scatter3d_one_point(self):
scprep.plot.scatter3d(self.X_pca[0], c=["red"])
def test_scatter_discrete(self):
ax = scprep.plot.scatter2d(
self.X_pca,
c=np.random.choice(["hello", "world"], self.X_pca.shape[0], replace=True),
legend_title="test",
legend_loc="center left",
legend_anchor=(1.02, 0.5),
)
assert ax.get_legend().get_title().get_text() == "test"
def test_scatter_discrete_str_int(self):
ax = scprep.plot.scatter2d(
self.X_pca,
c=np.random.choice(["1", "2", "3"], self.X_pca.shape[0], replace=True),
legend_title="test",
legend_loc="center left",
legend_anchor=(1.02, 0.5),
)
assert ax.get_legend().get_title().get_text() == "test"
def test_jitter_discrete(self):
ax = scprep.plot.jitter(
np.where(self.X_pca[:, 0] > 0, "+", "-"),
self.X_pca[:, 1],
c=np.random.choice(["hello", "world"], self.X_pca.shape[0], replace=True),
legend_title="test",
title="jitter",
filename="test_jitter.png",
)
assert os.path.exists("test_jitter.png")
assert ax.get_legend().get_title().get_text() == "test"
assert ax.get_title() == "jitter"
assert ax.get_xlim() == (-0.5, 1.5)
assert [t.get_text() for t in ax.get_xticklabels()] == ["+", "-"]
def test_jitter_continuous(self):
ax = scprep.plot.jitter(
np.where(self.X_pca[:, 0] > 0, "+", "-"),
self.X_pca[:, 1],
c=self.X_pca[:, 1],
title="jitter",
legend_title="test",
)
assert ax.get_figure().get_axes()[1].get_ylabel() == "test"
assert ax.get_title() == "jitter"
assert ax.get_xlim() == (-0.5, 1.5)
assert [t.get_text() for t in ax.get_xticklabels()] == ["+", "-"]
def test_jitter_axis_labels(self):
ax = scprep.plot.jitter(
np.where(self.X_pca[:, 0] > 0, "+", "-"), self.X_pca[:, 1], xlabel="test"
)
assert ax.get_xlabel() == "test"
assert ax.get_ylabel() == ""
ax = scprep.plot.jitter(
pd.Series(np.where(self.X_pca[:, 0] > 0, "+", "-"), name="x"),
pd.Series(self.X_pca[:, 1], name="y"),
ylabel="override",
)
assert ax.get_xlabel() == "x"
assert ax.get_ylabel() == "override"
def test_scatter_dict(self):
scprep.plot.scatter2d(
self.X_pca,
c=np.random.choice(["hello", "world"], self.X_pca.shape[0], replace=True),
cmap={"hello": "red", "world": "green"},
)
def test_scatter_dict_c_none(self):
utils.assert_raises_message(
ValueError,
"Expected list-like `c` with dictionary cmap. Got <class 'NoneType'>",
scprep.plot.scatter2d,
self.X_pca,
c=None,
cmap={"hello": "red", "world": "green"},
)
def test_scatter_dict_continuous(self):
utils.assert_raises_message(
ValueError,
"Cannot use dictionary cmap with continuous data",
scprep.plot.scatter2d,
self.X_pca,
c=self.X_pca[:, 0],
discrete=False,
cmap={"hello": "red", "world": "green"},
)
def test_scatter_dict_missing(self):
utils.assert_raises_message(
ValueError,
"Dictionary cmap requires a color for every unique entry in `c`. "
"Missing colors for [world]",
scprep.plot.scatter2d,
self.X_pca,
c=np.random.choice(["hello", "world"], self.X_pca.shape[0], replace=True),
cmap={"hello": "red"},
)
def test_scatter_list_discrete(self):
scprep.plot.scatter2d(
self.X_pca,
c=np.random.choice(["hello", "world"], self.X_pca.shape[0], replace=True),
cmap=["red", "green"],
)
def test_scatter_list_discrete_missing(self):
scprep.plot.scatter2d(
self.X_pca,
c=np.random.choice(
["hello", "great", "world"], self.X_pca.shape[0], replace=True
),
cmap=["red", "green"],
)
def test_scatter_list_continuous(self):
scprep.plot.scatter2d(self.X_pca, c=self.X_pca[:, 0], cmap=["red", "green"])
def test_scatter_list_single(self):
scprep.plot.scatter2d(self.X_pca, c=self.X_pca[:, 0], cmap=["red"])
def test_scatter_list_c_none(self):
utils.assert_raises_message(
ValueError,
"Expected list-like `c` with list cmap. Got <class 'NoneType'>",
scprep.plot.scatter2d,
self.X_pca,
c=None,
cmap=["red", "green"],
)
def test_scatter_discrete_greater_than_10(self):
scprep.plot.scatter2d(self.X_pca, c=np.arange(self.X_pca.shape[0]) % 11)
def test_scatter_solid(self):
scprep.plot.scatter3d(self.X_pca, c="green")
def test_scatter_none(self):
scprep.plot.scatter2d(self.X_pca, c=None)
def test_scatter_no_ticks(self):
ax = scprep.plot.scatter3d(self.X_pca, zticks=False)
assert len(ax.get_zticks()) == 0
def test_scatter_no_ticklabels(self):
ax = scprep.plot.scatter3d(self.X_pca, zticklabels=False)
assert np.all([lab.get_text() == "" for lab in ax.get_zticklabels()])
def test_scatter_custom_ticks(self):
ax = scprep.plot.scatter2d(self.X_pca, xticks=[0, 1, 2])
assert np.all(ax.get_xticks() == np.array([0, 1, 2]))
ax = scprep.plot.scatter3d(self.X_pca, zticks=False)
assert np.all(ax.get_zticks() == np.array([]))
def test_scatter_custom_ticklabels(self):
ax = scprep.plot.scatter2d(
self.X_pca, xticks=[0, 1, 2], xticklabels=["a", "b", "c"]
)
assert np.all(ax.get_xticks() == np.array([0, 1, 2]))
xticklabels = np.array([lab.get_text() for lab in ax.get_xticklabels()])
assert np.all(xticklabels == np.array(["a", "b", "c"]))
def test_scatter_axis_labels(self):
ax = scprep.plot.scatter2d(self.X_pca.tolist(), label_prefix="test")
assert ax.get_xlabel() == "test1"
assert ax.get_ylabel() == "test2"
ax = scprep.plot.scatter3d(self.X_pca.tolist(), label_prefix="test")
assert ax.get_xlabel() == "test1"
assert ax.get_ylabel() == "test2"
assert ax.get_zlabel() == "test3"
ax = scprep.plot.scatter2d(self.X_pca, label_prefix="test", xlabel="override")
assert ax.get_xlabel() == "override"
assert ax.get_ylabel() == "test2"
ax = scprep.plot.scatter(
x=self.X_pca[:, 0],
y=pd.Series(self.X_pca[:, 1], name="y"),
z= | pd.Series(self.X_pca[:, 2], name="z") | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
| tm.assert_isinstance(arr, Index) | pandas.util.testing.assert_isinstance |
import os
import sys
# -----------------------------------------------------------------------------
from datetime import datetime
import dateutil.parser
this_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.dirname(os.path.dirname(this_folder))
sys.path.append(root_folder + '/python')
sys.path.append(this_folder)
# -----------------------------------------------------------------------------
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
exchange = ccxt.kraken()
symbol = 'ETH/USD'
# each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ]
index = 4 # use close price from each ohlcv candle
# length = 80
# height = 15
from backtesting import Backtest, Strategy
from backtesting.lib import crossover
from backtesting.test import SMA, GOOG
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
# ----------------------------------------------------------------------------#
# Controllers.
# ----------------------------------------------------------------------------#
class SmaCross(Strategy):
def __init__(self, broker, data, params):
super().__init__(broker, data, params)
self.signals = [
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 1.34, "profit": -10.44776119402986,
"ticker": "ETHUSD", "time": "2015-09-02T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 1.2, "profit": 49.166666666666664,
"ticker": "ETHUSD", "time": "2015-09-11T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 0.61, "profit": 45.9016393442623,
"ticker": "ETHUSD", "time": "2015-10-26T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 0.89, "profit": -7.865168539325837,
"ticker": "ETHUSD", "time": "2015-11-07T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 0.96, "profit": -8.33333333333333,
"ticker": "ETHUSD", "time": "2015-11-09T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 0.88, "profit": -9.090909090909086,
"ticker": "ETHUSD", "time": "2015-11-12T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 0.96, "profit": -7.291666666666662,
"ticker": "ETHUSD", "time": "2015-11-19T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 0.89, "profit": -2.247191011235957,
"ticker": "ETHUSD", "time": "2015-11-26T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 0.91, "profit": -3.2967032967032996,
"ticker": "ETHUSD", "time": "2015-12-13T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 0.88, "profit": -5.681818181818187,
"ticker": "ETHUSD", "time": "2015-12-24T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 0.93, "profit": 365.59139784946234,
"ticker": "ETHUSD", "time": "2016-01-01T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 4.33, "profit": -15.704387990762116,
"ticker": "ETHUSD", "time": "2016-02-19T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 5.01, "profit": 107.18562874251501,
"ticker": "ETHUSD", "time": "2016-02-23T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 10.38, "profit": -12.8131021194605,
"ticker": "ETHUSD", "time": "2016-03-19T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.71, "profit": -6.575576430401378,
"ticker": "ETHUSD", "time": "2016-03-24T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 10.94, "profit": -5.575868372943338,
"ticker": "ETHUSD", "time": "2016-03-26T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.55, "profit": -3.549783549783551,
"ticker": "ETHUSD", "time": "2016-03-30T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 11.14, "profit": 18.850987432675055,
"ticker": "ETHUSD", "time": "2016-04-05T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 9.04, "profit": -5.97345132743362,
"ticker": "ETHUSD", "time": "2016-04-19T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 8.5, "profit": -0.8235294117647093,
"ticker": "ETHUSD", "time": "2016-04-22T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 8.57, "profit": 44.34072345390897,
"ticker": "ETHUSD", "time": "2016-05-02T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 12.37, "profit": -8.569118835893295,
"ticker": "ETHUSD", "time": "2016-05-27T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 13.43, "profit": -3.2762472077438534,
"ticker": "ETHUSD", "time": "2016-06-01T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 12.99, "profit": -8.468052347959967,
"ticker": "ETHUSD", "time": "2016-06-19T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 14.09, "profit": -5.2519517388218615,
"ticker": "ETHUSD", "time": "2016-06-27T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 13.35, "profit": 14.007490636704114,
"ticker": "ETHUSD", "time": "2016-06-29T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.48, "profit": 10.975609756097558,
"ticker": "ETHUSD", "time": "2016-07-15T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 12.74, "profit": 8.39874411302983,
"ticker": "ETHUSD", "time": "2016-07-30T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.67, "profit": -3.856041131105392,
"ticker": "ETHUSD", "time": "2016-08-10T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 11.22, "profit": -0.17825311942958622,
"ticker": "ETHUSD", "time": "2016-08-16T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.24, "profit": -1.7793594306049918,
"ticker": "ETHUSD", "time": "2016-08-26T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 11.04, "profit": -2.445652173913056,
"ticker": "ETHUSD", "time": "2016-08-30T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.31, "profit": 1.8567639257294346,
"ticker": "ETHUSD", "time": "2016-09-01T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 11.52, "profit": -2.256944444444443,
"ticker": "ETHUSD", "time": "2016-09-09T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 11.78, "profit": 10.016977928692713,
"ticker": "ETHUSD", "time": "2016-09-11T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 12.96, "profit": 3.8580246913580245,
"ticker": "ETHUSD", "time": "2016-10-07T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 12.46, "profit": -3.4510433386838,
"ticker": "ETHUSD", "time": "2016-10-19T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 12.03, "profit": 30.75644222776392,
"ticker": "ETHUSD", "time": "2016-10-25T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 8.33, "profit": -5.40216086434574,
"ticker": "ETHUSD", "time": "2016-12-10T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 7.88, "profit": -1.52284263959391,
"ticker": "ETHUSD", "time": "2016-12-17T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 8, "profit": 21.625000000000007,
"ticker": "ETHUSD", "time": "2016-12-30T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 9.73, "profit": -4.4193216855087325,
"ticker": "ETHUSD", "time": "2017-01-14T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 10.16, "profit": 3.2480314960629926,
"ticker": "ETHUSD", "time": "2017-01-18T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 10.49, "profit": -1.620591039084842,
"ticker": "ETHUSD", "time": "2017-01-30T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 10.66, "profit": 320.8255159474672,
"ticker": "ETHUSD", "time": "2017-02-01T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 44.86, "profit": -6.107891217119933,
"ticker": "ETHUSD", "time": "2017-04-04T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 47.6, "profit": 587.8991596638655,
"ticker": "ETHUSD", "time": "2017-04-14T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 327.44, "profit": 33.13889567554361,
"ticker": "ETHUSD", "time": "2017-06-22T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 218.93, "profit": -6.846937377243872,
"ticker": "ETHUSD", "time": "2017-07-21T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 203.94, "profit": -6.972639011473962,
"ticker": "ETHUSD", "time": "2017-07-27T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 218.16, "profit": 48.76237623762377,
"ticker": "ETHUSD", "time": "2017-08-02T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 324.54, "profit": 12.192641893141058,
"ticker": "ETHUSD", "time": "2017-09-05T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 284.97, "profit": -5.779555742709768,
"ticker": "ETHUSD", "time": "2017-09-21T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 268.5, "profit": -4.517690875232773,
"ticker": "ETHUSD", "time": "2017-09-23T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 280.63, "profit": 4.397249046787596,
"ticker": "ETHUSD", "time": "2017-09-24T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 292.97, "profit": -2.3927364576577776,
"ticker": "ETHUSD", "time": "2017-10-06T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 299.98, "profit": 3.250216681112074,
"ticker": "ETHUSD", "time": "2017-10-07T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 309.73, "profit": 1.4851644981112655,
"ticker": "ETHUSD", "time": "2017-10-20T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 305.13, "profit": -5.040474551830366,
"ticker": "ETHUSD", "time": "2017-10-31T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 289.75, "profit": -3.8239861949956806,
"ticker": "ETHUSD", "time": "2017-11-03T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 300.83, "profit": 41.02316923179205,
"ticker": "ETHUSD", "time": "2017-11-09T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 424.24, "profit": -5.407316613237789,
"ticker": "ETHUSD", "time": "2017-12-08T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 447.18, "profit": 47.464108412719696,
"ticker": "ETHUSD", "time": "2017-12-11T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 659.43, "profit": -12.372806818009499,
"ticker": "ETHUSD", "time": "2017-12-25T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 741.02, "profit": 35.992280910096895,
"ticker": "ETHUSD", "time": "2017-12-27T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 1007.73, "profit": -10.48495132624811,
"ticker": "ETHUSD", "time": "2018-01-18T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 1113.39, "profit": -10.807533748282285,
"ticker": "ETHUSD", "time": "2018-01-28T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 993.06, "profit": 12.041568485287891,
"ticker": "ETHUSD", "time": "2018-02-02T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 873.48, "profit": -4.2233365389018624,
"ticker": "ETHUSD", "time": "2018-02-15T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 836.59, "profit": 51.200707634564125,
"ticker": "ETHUSD", "time": "2018-02-23T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 408.25, "profit": 71.69381506429885,
"ticker": "ETHUSD", "time": "2018-04-11T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 700.94, "profit": 13.140925043512997,
"ticker": "ETHUSD", "time": "2018-05-12T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 608.83, "profit": -7.21876385854837,
"ticker": "ETHUSD", "time": "2018-06-04T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 564.88, "profit": 5.762285795213135,
"ticker": "ETHUSD", "time": "2018-06-11T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 532.33, "profit": -9.648150583284806,
"ticker": "ETHUSD", "time": "2018-06-21T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 480.97, "profit": 3.0105827806308123,
"ticker": "ETHUSD", "time": "2018-06-23T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 466.49, "profit": -4.844691204527432,
"ticker": "ETHUSD", "time": "2018-07-04T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 443.89, "profit": -5.564441640947079,
"ticker": "ETHUSD", "time": "2018-07-11T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 468.59, "profit": -2.2983845152478675,
"ticker": "ETHUSD", "time": "2018-07-17T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 457.82, "profit": -3.0448647940238516,
"ticker": "ETHUSD", "time": "2018-07-24T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 471.76, "profit": -2.327454637951505,
"ticker": "ETHUSD", "time": "2018-07-25T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 460.78, "profit": 36.85055774990233,
"ticker": "ETHUSD", "time": "2018-07-31T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 290.98, "profit": -4.642930785621002,
"ticker": "ETHUSD", "time": "2018-08-30T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 277.47, "profit": 21.425739719609332,
"ticker": "ETHUSD", "time": "2018-09-05T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 218.02, "profit": -5.2839189065223415,
"ticker": "ETHUSD", "time": "2018-09-17T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 206.5, "profit": -3.2687651331719128,
"ticker": "ETHUSD", "time": "2018-09-20T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 213.25, "profit": 1.1629542790152354,
"ticker": "ETHUSD", "time": "2018-09-21T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 215.73, "profit": -6.712093820979938,
"ticker": "ETHUSD", "time": "2018-09-27T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 230.21, "profit": -3.9268493983754054,
"ticker": "ETHUSD", "time": "2018-09-30T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 221.17, "profit": -2.233575982276089,
"ticker": "ETHUSD", "time": "2018-10-04T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 226.11, "profit": -4.281102118437931,
"ticker": "ETHUSD", "time": "2018-10-09T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 216.43, "profit": 6.154414822344403,
"ticker": "ETHUSD", "time": "2018-10-11T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 203.11, "profit": 1.654276008074435,
"ticker": "ETHUSD", "time": "2018-11-05T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 206.47, "profit": 52.5403206276941,
"ticker": "ETHUSD", "time": "2018-11-14T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 97.99, "profit": 33.69731605265843,
"ticker": "ETHUSD", "time": "2018-12-19T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 131.01, "profit": 13.31959392412792,
"ticker": "ETHUSD", "time": "2019-01-11T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 113.56, "profit": 19.41704825642833,
"ticker": "ETHUSD", "time": "2019-02-09T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 135.61, "profit": -0.29496349826707263,
"ticker": "ETHUSD", "time": "2019-02-28T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 136.01, "profit": -2.705683405631923,
"ticker": "ETHUSD", "time": "2019-03-08T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 132.33, "profit": -4.60968790145847,
"ticker": "ETHUSD", "time": "2019-03-12T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 138.43, "profit": -1.9215487972260323,
"ticker": "ETHUSD", "time": "2019-03-17T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 135.77, "profit": -1.1269057965677256,
"ticker": "ETHUSD", "time": "2019-03-25T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 137.3, "profit": 19.235251274581206,
"ticker": "ETHUSD", "time": "2019-03-29T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 163.71, "profit": -4.746197544438325,
"ticker": "ETHUSD", "time": "2019-04-14T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 171.48, "profit": -3.364823886167473,
"ticker": "ETHUSD", "time": "2019-04-19T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 165.71, "profit": 1.5750407338120893,
"ticker": "ETHUSD", "time": "2019-04-25T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 163.1, "profit": 52.02942979767015,
"ticker": "ETHUSD", "time": "2019-05-04T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 247.96, "profit": -3.4360380706565614,
"ticker": "ETHUSD", "time": "2019-06-05T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 256.48, "profit": 14.683406113537103,
"ticker": "ETHUSD", "time": "2019-06-14T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 294.14, "profit": -4.004895627932288,
"ticker": "ETHUSD", "time": "2019-07-02T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 305.92, "profit": -8.358394351464431,
"ticker": "ETHUSD", "time": "2019-07-09T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 280.35, "profit": 22.38273586588194,
"ticker": "ETHUSD", "time": "2019-07-12T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 217.6, "profit": -3.074448529411764,
"ticker": "ETHUSD", "time": "2019-08-03T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 210.91, "profit": 15.17708975392347,
"ticker": "ETHUSD", "time": "2019-08-10T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 178.9, "profit": 0.19564002235885652,
"ticker": "ETHUSD", "time": "2019-09-09T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 179.25, "profit": -0.1059972105997198,
"ticker": "ETHUSD", "time": "2019-09-25T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 179.44, "profit": -1.0309852875612986,
"ticker": "ETHUSD", "time": "2019-10-09T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 177.59, "profit": -1.0867729038797267,
"ticker": "ETHUSD", "time": "2019-10-17T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 179.52, "profit": 2.785204991087344,
"ticker": "ETHUSD", "time": "2019-10-27T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 184.52, "profit": 28.490136570561468,
"ticker": "ETHUSD", "time": "2019-11-15T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 131.95, "profit": 21.629405077680957,
"ticker": "ETHUSD", "time": "2019-12-30T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 160.49, "profit": -4.2619477849087195,
"ticker": "ETHUSD", "time": "2020-01-26T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 167.33, "profit": 49.81174923803262,
"ticker": "ETHUSD", "time": "2020-01-28T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 250.68, "profit": 3.949258018190524,
"ticker": "ETHUSD", "time": "2020-02-26T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 240.78, "profit": -11.238474956391729,
"ticker": "ETHUSD", "time": "2020-03-07T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 213.72, "profit": 35.97230020587684,
"ticker": "ETHUSD", "time": "2020-03-09T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 136.84, "profit": -5.999707687810588,
"ticker": "ETHUSD", "time": "2020-03-25T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 128.63, "profit": -5.053253517841872,
"ticker": "ETHUSD", "time": "2020-03-30T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 135.13, "profit": 15.866202915710796,
"ticker": "ETHUSD", "time": "2020-04-02T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 156.57, "profit": -5.492750846266842,
"ticker": "ETHUSD", "time": "2020-04-16T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 165.17, "profit": 18.556638614760555,
"ticker": "ETHUSD", "time": "2020-04-17T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 195.82, "profit": -3.390869165560216,
"ticker": "ETHUSD", "time": "2020-05-11T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 202.46, "profit": 0.42477526424972106,
"ticker": "ETHUSD", "time": "2020-05-18T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 203.32, "profit": -3.3395632500491934,
"ticker": "ETHUSD", "time": "2020-05-26T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 210.11, "profit": 12.65527580791013,
"ticker": "ETHUSD", "time": "2020-05-29T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 236.7, "profit": -1.1280101394169901,
"ticker": "ETHUSD", "time": "2020-06-13T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 239.37, "profit": -3.776580189664533,
"ticker": "ETHUSD", "time": "2020-06-23T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 230.33, "profit": -1.5542916684756583,
"ticker": "ETHUSD", "time": "2020-06-27T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 233.91, "profit": 0.3975888162113662,
"ticker": "ETHUSD", "time": "2020-07-07T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 234.84, "profit": -2.840231647078857,
"ticker": "ETHUSD", "time": "2020-07-17T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 241.51, "profit": 67.42992008612481,
"ticker": "ETHUSD", "time": "2020-07-22T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 404.36, "profit": -1.0559897121376944,
"ticker": "ETHUSD", "time": "2020-08-22T02:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 408.63, "profit": -6.277072167975914,
"ticker": "ETHUSD", "time": "2020-08-31T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 382.98, "profit": 2.9818789492923954,
"ticker": "ETHUSD", "time": "2020-09-05T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 371.56, "profit": -6.171277855528046,
"ticker": "ETHUSD", "time": "2020-09-14T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 348.63, "profit": -2.303301494421028,
"ticker": "ETHUSD", "time": "2020-09-22T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 356.66, "profit": -2.716873212583412,
"ticker": "ETHUSD", "time": "2020-09-30T02:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 346.97, "profit": -4.668991555465887,
"ticker": "ETHUSD", "time": "2020-10-03T14:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 363.17, "profit": 6.925131481124536,
"ticker": "ETHUSD", "time": "2020-10-10T14:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 388.32, "profit": -3.8164400494437563,
"ticker": "ETHUSD", "time": "2020-10-30T01:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 403.14, "profit": 32.527161780026795,
"ticker": "ETHUSD", "time": "2020-11-06T01:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 534.27, "profit": -5.852845939319068,
"ticker": "ETHUSD", "time": "2020-11-29T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 565.54, "profit": -0.16444460161968208,
"ticker": "ETHUSD", "time": "2020-11-30T13:00:00"},
{"action": "sell", "exchange": "KRAKEN", "interval": "720", "price": 564.61, "profit": -3.4501691433024595,
"ticker": "ETHUSD", "time": "2020-12-09T13:00:00"},
{"action": "buy", "exchange": "KRAKEN", "interval": "720", "price": 584.09, "profit": 0, "ticker": "ETHUSD",
"time": "2020-12-15T13:00:00"}]
def init(self):
price = self.data.Close
def next(self):
for s in self.signals:
if s['time'] == self.data.index and s['action'] == "buy":
self.buy()
if s['time'] == self.data.index and s['action'] == "sell":
self.sell()
def print_chart(exchange, symbol, timeframe):
# get a list of ohlcv candles
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since=10000000001)
print("\n" + exchange.name + ' ' + symbol + ' ' + timeframe + ' chart:')
df = pd.DataFrame(np.row_stack(ohlcv)).rename(
columns={0: "Date", 1: "Open", 2: "High", 3: "Low", 4: "Close", 5: "Volume"})
df['Date'] = df['Date'].apply(lambda x: datetime.fromtimestamp(float(x) / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f'))
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
# read Excel
df = | pd.read_excel('xacts.xlsx', sheetname='All Transactions') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 11:06:22 2021
@author: madeline
"""
'''
This script converts VCF files that have been annotated by snpEFF into GVF files, including the functional annotation.
Note that the strain is obtained by parsing the file name, expected to contain the substring "/strainnamehere_ids".
Required user input is either a single VCF file or a directory containing VCF files.
Eg:
python vcf2gvf.py --vcfdir ./22_07_2021/
To also output tsvs of the unmatched mutation names:
python vcf2gvf.py --vcfdir ./22_07_2021/ --names
'''
import argparse
import pandas as pd
import re
import glob
import os
import numpy as np
from cyvcf2 import VCF, Writer
def parse_args():
parser = argparse.ArgumentParser(
description='Converts snpEFF-annotated VCF files to GVF files with functional annotation')
#make --file or --directory options mutually exclusive
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--vcfdir', type=str, default=None,
help='Path to folder containing snpEFF-annotated VCF files')
group.add_argument('--vcffile', type=str, default=None,
help='Path to a snpEFF-annotated VCF file')
#filepath can be absolute (~/Desktop/test/22_07_2021/) or relative (./22_07_2021/)
parser.add_argument('--pokay', type=str, default='functional_annotation_V.0.2.tsv',
help='Anoosha\'s parsed pokay .tsv file')
parser.add_argument('--clades', type=str, default='clade_defining_mutations.tsv',
help='.tsv of clade-defining mutations')
parser.add_argument('--outdir', type=str, default='./gvf_files/',
help='Output directory for finished GVF files: folder will be created if it doesn\'t already exist')
parser.add_argument("--names", help="Save unmatched mutation names to .tsvs for troubleshooting naming formats", action="store_true")
return parser.parse_args()
gvf_columns = ['#seqid','#source','#type','#start','#end','#score','#strand','#phase','#attributes']
vcf_colnames = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'unknown']
def vcftogvf(var_data, strain):
df = pd.read_csv(var_data, sep='\t', names=vcf_colnames)
df = df[~df['#CHROM'].str.contains("#")] #remove pragmas
df = df.reset_index(drop=True) #restart index from 0
new_df = pd.DataFrame(index=range(0,len(df)),columns=gvf_columns)
#parse EFF column
eff_info = df['INFO'].str.findall('\((.*?)\)') #series: extract everything between parentheses as elements of a list
eff_info = eff_info.apply(pd.Series)[0] #take first element of list
eff_info = eff_info.str.split(pat='|').apply(pd.Series) #split at pipe, form dataframe
#hgvs names
hgvs = eff_info[3].str.rsplit(pat='c.').apply(pd.Series)
hgvs_protein = hgvs[0].str[:-1]
hgvs_protein.replace(r'^\s+$', np.nan, regex=True)
hgvs_nucleotide = 'c.' + hgvs[1]
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'Name=' + hgvs_protein + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'gene=' + eff_info[5] + ';' #gene names
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'mutation_type=' + eff_info[1] + ';' #mutation type
#columns copied straight from Zohaib's file
for column in ['REF','ALT']:
key = column.lower()
if key=='ref':
key = 'Reference_seq'
elif key=='alt':
key = 'Variant_seq'
new_df['#attributes'] = new_df['#attributes'].astype(str) + key + '=' + df[column].astype(str) + ';'
#add ao, dp, ro
info = df['INFO'].str.split(pat=';').apply(pd.Series) #split at ;, form dataframe
new_df['#attributes'] = new_df['#attributes'] + info[5].str.lower() + ';' #ao
new_df['#attributes'] = new_df['#attributes'] + info[7].str.lower() + ';' #dp
new_df['#attributes'] = new_df['#attributes'] + info[28].str.lower() + ';' #ro
#add strain name
new_df['#attributes'] = new_df['#attributes'] + 'viral_lineage=' + strain + ';'
#add WHO strain name
alt_strain_names = {'B.1.1.7': 'Alpha', 'B.1.351': 'Beta', 'P.1': 'Gamma', 'B.1.617.2': 'Delta', 'B.1.427': 'Epsilon', 'B.1.429': 'Epsilon', 'P.2': 'Zeta', 'B.1.525': 'Eta', 'P.3': 'Theta', 'B.1.526': 'Iota', 'B.1.617.1': 'Kappa'}
new_df['#attributes'] = new_df['#attributes'] + 'who_label=' + alt_strain_names.get(strain) + ';'
#add VOC/VOI designation
if strain in {'Alpha', 'Beta', 'Gamma', 'Delta'}:
new_df['#attributes'] = new_df['#attributes'] + 'status=VOC;'
else:
new_df['#attributes'] = new_df['#attributes'] + 'status=VOI;'
#remove starting NaN; leave trailing ';'
new_df['#attributes'] = new_df['#attributes'].str[3:]
#fill in other GVF columns
new_df['#seqid'] = df['#CHROM']
new_df['#source'] = '.'
new_df['#type'] = info[40].str.split(pat='=').apply(pd.Series)[1]
new_df['#start'] = df['POS']
new_df['#end'] = (df['POS'].astype(int) + df['ALT'].str.len() - 1).astype(str) #this needs fixing
new_df['#score'] = '.'
new_df['#strand'] = '+'
new_df['#phase'] = '.'
new_df = new_df[gvf_columns] #only keep the columns needed for a gvf file
return new_df
#takes 3 arguments: an output file of vcftogvf.py, Anoosha's annotation file from Pokay, and the clade defining mutations tsv.
def add_functions(gvf, annotation_file, clade_file, strain):
#load files into Pandas dataframes
df = pd.read_csv(annotation_file, sep='\t', header=0) #load functional annotations spreadsheet
clades = pd.read_csv(clade_file, sep='\t', header=0, usecols=['strain', 'mutation']) #load clade-defining mutations file
clades = clades.loc[clades.strain == strain] #only look at the relevant part of that file
attributes = gvf["#attributes"].str.split(pat=';').apply(pd.Series)
hgvs_protein = attributes[0].str.split(pat='=').apply(pd.Series)[1]
hgvs_nucleotide = attributes[1].str.split(pat='=').apply(pd.Series)[1]
gvf["mutation"] = hgvs_protein.str[2:] #drop the prefix
#merge annotated vcf and functional annotation files by 'mutation' column in the gvf
for column in df.columns:
df[column] = df[column].str.lstrip()
merged_df = pd.merge(df, gvf, on=['mutation'], how='right') #add functional annotations
merged_df = | pd.merge(clades, merged_df, on=['mutation'], how='right') | pandas.merge |
import os
import pandas as pd
from utilies import warn_by_qq, data_load, hyper_tuner
def main():
item_dict = {'PH': 0, 'DO': 1, 'CODMN': 2, 'BOD': 3, 'AN': 4, 'TP': 5, 'CODCR': 6}
item_name_list = ['PH', 'DO', 'CODMN', 'BOD', 'AN', 'TP', 'CODCR']
data_path = "ziya.csv"
log_path = "alog"
save_name = "a_tune.csv"
main_path = "a_tune"
# if not os.path.exists(main_path):
# os.mkdir(main_path)
train_data, valid_data, test_data = data_load(data_path)
for time_steps in range(1, 13):
for item, item_index in item_dict.items():
item_name = item_name_list[item_index]
results = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""Compares informal collaboration by cohort of researchers and publication
year of papers.
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from _205_compute_centralities import p_to_stars
OUTPUT_FOLDER = "./990_output/Figures/"
PERSON = {"groups": [0, 1970, 1980, 1990, 2000, 2020],
"labels": ["<1970", "1970-1979", "1980-1989", "1990-1999", ">=2000"],
"var": "first_pub_year", "name": "person"}
PAPER = {"groups": [1997, 2002, 2007, 2020],
"labels": ["1997-2001", "2002-2006", "2007-2011"],
"var": "year", "name": "paper"}
DEP_VARS = {'num_com_n': {"title": "Average number of commenters per author",
"ylabel": "No. of commenters per author"},
'num_con_n': {"title": "Average number of conferences per author",
"ylabel": "No. of conferences per author"},
'num_sem_n': {"title": "Average number of seminars per author",
"ylabel": "No. of seminars per author"},
'num_auth': {"title": "Average number of co-authors",
"ylabel": "No. of co-authors"}}
def add_plot(data, mat, var, ax, scale, ylabel=None, title=None, lw=1.5,
arrowheight=0.05):
"""Add plot with bars and t-test indicators to a specific ax."""
# Start plot showing means and error bars
sns.barplot(y=var, x='cohort', data=data, ax=ax)
# Aesthetics
ax.set(ylabel=ylabel, xlabel="")
ax.set_title(title, pad=15)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Parameters
distance = ax.patches[1].get_bbox().x1 - ax.patches[0].get_bbox().x0
arrow = {"arrowstyle": f'-[, widthB={distance*scale[0]}, lengthB={scale[1]}',
"lw": lw, "color": 'black'}
# Draw bracket spanning neighboring bars with significance stars
for idx in range(mat.shape[0]-1):
stars = mat.iloc[idx][idx+1]
if stars:
x0 = ax.patches[idx].get_bbox().x0
x1 = ax.patches[idx+1].get_bbox().x1
y = max([ax.lines[idx].get_ydata()[-1],
ax.lines[idx+1].get_ydata()[-1]])
height = y*(1 + arrowheight)
ax.annotate(stars, xytext=((x0+x1)/2, height), arrowprops=arrow,
xy=((x0+x1)/2, height), va='bottom', ha='center')
def compare_means(group_1_years, group_2_years, year_column, var, df):
"""Compares means in y for two cohorts defined by year ranges via
Welch's t-test.
"""
from scipy.stats import ttest_ind
group1 = df[(df[year_column].isin(group_1_years))]
group2 = df[(df[year_column].isin(group_2_years))]
ttest = ttest_ind(group1[var], group2[var], equal_var=False)
return ttest
def compute_tstats(data, ranges, var, by_var, verbose=True):
"""Compute lower triangular matrix of significance levels of
pairwise t-tests.
"""
if verbose:
print(f"...{var}:")
mat = pd.DataFrame(columns=ranges, index=ranges)
for idx1, r1 in enumerate(ranges):
for idx2 in range(idx1+1, len(ranges)):
r2 = ranges[idx2]
ttest = compare_means(r1, r2, by_var, var, data)
stars = p_to_stars(ttest[1])
if verbose and stars:
print(f"...{r1} vs. {r2}: {ttest[1]:.2}")
mat.iloc[idx1][idx2] = mat.iloc[idx2][idx1] = stars
return mat
def read_paper_file():
"""Read file with paper-specific information."""
PAPER_FILE = "./580_paper_sample/master.csv"
columns = ["year", "with", "num_auth", "num_coms", "num_con", "num_sem"]
df = | pd.read_csv(PAPER_FILE, usecols=columns, encoding="utf8") | pandas.read_csv |
from __future__ import print_function
# from builtins import str
# from builtins import object
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import os
from .data_utils import make_dir
class XlsxRecorder(object):
"""
xlsx recorder for results
including two recorder: one for current experiments, record details of results changed by iteration
the other is for record the summary of different expreiments, which is saved by summary_path
1. detailed results: saved in fig_save_path/results.xlsx
** Sheet1: #total_filename x #metrics, along each row direction, are the records by iteraton
** batch_0: #batch_filename x #metric_by_label , along each column direction, are the records by iteration
** batch_1: same as batch_0
** ......
2. task results: saved in ../data/summary.xlsx
** Sheet1: task_name * #metrics recorded by iteration
"""
def __init__(self, expr_name, saving_path='', folder_name=''):
self.expr_name = expr_name
if not len(saving_path):
self.saving_path = '../data/'+expr_name #saving_path
else:
self.saving_path = saving_path
self.saving_path = os.path.abspath(self.saving_path)
self.folder_name = folder_name
if len(folder_name):
self.saving_path = os.path.join(self.saving_path, folder_name)
"""path of saving excel, default is the same as the path of saving figures"""
self.writer_path = None
self.xlsx_writer = None
self.summary_path = '../data/summary.xlsx'
"""the path for summary, which can record results from different experiments"""
self.measures = ['iou', 'precision', 'recall', 'dice']
"""measures to record"""
self.batch_count = {}
self.row_space = 50
self.column_space = 10
self.start_row = 0
self.summary = None
self.avg_buffer = {}
self.iter_info_buffer = []
self.name_list_buffer = []
self.init_summary()
print("the update space in detailed files is {}".format(self.row_space))
def init_summary(self):
""" init two recorders, initilzation would create a new recorder for this experiment, recording all details
at the same time it would load the data from summary recorder, then it would append the new experiment summary to summary recorder
"""
if not os.path.exists(self.saving_path ):
os.makedirs(self.saving_path )
self.writer_path = os.path.join(self.saving_path, 'results.xlsx')
writer = pd.ExcelWriter(self.writer_path, engine='xlsxwriter')
df = pd.DataFrame([])
df.to_excel(writer)
worksheet = writer.sheets['Sheet1']
worksheet.set_column(1, 1000, 30)
writer.save()
writer.close()
self.writer_book = load_workbook(self.writer_path)
self.xlsx_writer = | pd.ExcelWriter(self.writer_path, engine='openpyxl') | pandas.ExcelWriter |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series( | pd.Categorical([1, 2, 3]) | pandas.Categorical |
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import torch
from pytorch_forecasting.metrics import QuantileLoss
from pytorch_forecasting import TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data import GroupNormalizer
from config import load_config
from load_data import LoadData
spec = load_config("config.yaml")
MODEL_PATH_NBEATS = spec["general"]["model_path_nbeats"]
SCALER_PATH_NBEATS = spec["general"]["scaler_path_nbeats"]
DATA_PATH = spec["general"]["data_path"]
FOLDER_LIST = spec["general"]["folder_list"]
MODEL_PATH = spec["model"]["model_path"]
BATCH_SIZE = spec["model"]["batch_size"]
MAX_EPOCHS = spec["model"]["max_epochs"]
GPUS = spec["model"]["gpus"]
LEARNING_RATE = spec["model"]["learning_rate"]
HIDDEN_SIZE = spec["model"]["hidden_size"]
DROPOUT = spec["model"]["dropout"]
HIDDEN_CONTINUOUS_SIZE = spec["model"]["hidden_continuous_size"]
GRADIENT_CLIP_VAL = spec["model"]["gradient_clip_val"]
ATTENTION_HEAD_SIZE = spec["model"]["attention_head_size"]
LSTM_LAYERS = spec["model"]["lstm_layers"]
lags = spec["model"]["lags"]
sma = spec["model"]["sma"]
time_varying_known_reals = spec["model"]["time_varying_known_reals"]
if lags:
lags_columns = [f"lag_{lag}" for lag in range(lags, 0, -1)]
time_varying_known_reals = time_varying_known_reals + lags_columns
if sma:
sma_columns = [f"sma_{sma}" for sma in sma]
time_varying_known_reals = time_varying_known_reals + sma_columns
time_varying_known_categoricals = spec["model"]["time_varying_known_categoricals"]
max_prediction_length = spec["model"]["max_prediction_length"]
max_encoder_length = spec["model"]["max_encoder_length"]
sample = spec["model"]["sample"]
cutoff = spec["model"]["cutoff"]
# _________________________________________________________________________________________________________________
# Load Data for PyTorch Models:
train_data, test_data = LoadData(
data_path=DATA_PATH,
folder_list=FOLDER_LIST,
cutoff=cutoff,
sample="60min",
date_features=True,
sma=sma,
lags=lags,
time_idx=True,
).load_data(
min_obs=700,
reduce_memory=["cat", "float", "int"]
)
# _________________________________________________________________________________________________________________
# Load Temporal Fusion Transformer Model:
training = TimeSeriesDataSet(
train_data,
time_idx="time_idx",
target="value",
group_ids=["id"],
min_encoder_length=max_encoder_length // 2,
max_encoder_length=max_encoder_length,
min_prediction_length=1,
max_prediction_length=max_prediction_length,
static_categoricals=["id"],
time_varying_known_reals=time_varying_known_reals,
time_varying_known_categoricals=time_varying_known_categoricals,
time_varying_unknown_categoricals=[],
time_varying_unknown_reals=["value"],
target_normalizer=GroupNormalizer(
groups=["id"], transformation="softplus"
),
add_relative_time_idx=True,
add_target_scales=True,
add_encoder_length=True,
)
model = TemporalFusionTransformer.from_dataset(
training,
learning_rate=LEARNING_RATE,
hidden_size=HIDDEN_SIZE,
lstm_layers=LSTM_LAYERS,
attention_head_size=ATTENTION_HEAD_SIZE,
dropout=DROPOUT,
hidden_continuous_size=HIDDEN_CONTINUOUS_SIZE,
output_size=7,
loss=QuantileLoss(),
log_interval=10,
reduce_on_plateau_patience=4,
)
model.load_state_dict(torch.load("/Volumes/GoogleDrive/My Drive/Colab_Notebooks/model/tft.pt"))
errors = pd.DataFrame()
for data_name in train_data.id.unique().tolist():
# _________________________________________________________________________________________________________________
# Load Exponential Smoothing Model:
with open(f"model/exponential_smoothing/{data_name}.pickle", "rb") as f:
model_es = pickle.load(f)
# _________________________________________________________________________________________________________________
# Exponential Smoothing Data:
test_data_es = test_data[
test_data["id"] == data_name
][["date", "value"]].set_index("date", drop=True)["value"]
test_data_es.index.freq = pd.infer_freq(test_data_es.index)
# _________________________________________________________________________________________________________________
# Temporal Fusion Transformer Data:
test_data_tft = test_data[
(test_data["id"] == data_name)
].reset_index(drop=True)
# _________________________________________________________________________________________________________________
# Forecast:
errors_data_name = pd.DataFrame()
for start in range(0, 107, 1):
try:
# Update ES with new Data.
model_es.update(
test_data_es[start:(start + max_encoder_length)],
update_params=False,
)
# Make forecast for es & tft:
y_hat_es = model_es.predict(list(range(1, max_prediction_length + 1)))
y_hat_tft = pd.Series(
index=y_hat_es.index,
data=model.predict(
test_data_tft[start:(start + max_encoder_length)],
mode="prediction",
return_x=True)[0][0].tolist()
)
y_hat_nbeats = pd.read_csv(
f"/Volumes/GoogleDrive/My Drive/Colab_Notebooks/data_nbeats/{data_name}_{start}.csv"
)
y_hat_nbeats = y_hat_nbeats.set_index(pd.to_datetime(y_hat_nbeats.time))
y_hat_nbeats = y_hat_nbeats.drop(columns="time")
y_obs = test_data_es[start + max_encoder_length: start + max_encoder_length + max_prediction_length]
# Plot forecasts and observed values
ax = test_data_es[start: start + max_encoder_length + max_prediction_length].plot(
figsize=(10, 6),
marker="o",
color="black",
label="observed"
)
y_hat_es.plot(ax=ax, style="--", marker="o", color="red",
label="exponential_smoothing")
y_hat_tft.plot(ax=ax, style="--", marker="o", color="blue",
label="temporal_fusion_transformer")
# y_hat_nbeats.plot(ax=ax, style="--", marker="o", color="green",
# label="N=BEATS")
df_errors = pd.concat([y_obs, y_hat_tft, y_hat_es, y_hat_nbeats], axis=1).reset_index(drop=True)
df_errors.columns = ["observed", "tft", "ets", "nbeats"]
df_errors["step"] = [step for step in range(1, max_prediction_length + 1, 1)]
errors = pd.concat([errors, df_errors], axis=0)
errors_data_name = | pd.concat([errors_data_name, df_errors], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
#%% ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#file from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "sup_fig1")
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False).query('(cancer != "BC")').reset_index(drop=True)
df = | pd.merge(sample_labels, sigs, how='left', on='sample') | pandas.merge |
from load_dataset import load_dataset
from load_dataset import split_data
from load_dataset import accuracy_metric
import numpy as np
import pandas as pd
import pandasql as ps
if __name__ == "__main__":
Y_full = pd.read_csv('emittance_labels.csv')
X_full = | pd.read_csv('unit_cell_data_16.csv') | pandas.read_csv |
from sklearn.cluster import MeanShift, estimate_bandwidth
import pandas as pd
import glob
from pathlib import Path
from spatiotemporal.util import sampling
def load_data_nrel(path, resampling=None):
## some resampling options: 'H' - hourly, '15min' - 15 minutes, 'M' - montlhy
## more options at:
## http://benalexkeen.com/resampling-time-series-data-with-pandas/
allFiles = glob.iglob(path + "/**/*.txt", recursive=True)
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
#print("Reading: ",file_)
df = pd.read_csv(file_,index_col="datetime",parse_dates=['datetime'], header=0, sep=",")
if frame.columns is None :
frame.columns = df.columns
list_.append(df)
frame = pd.concat(list_)
if resampling is not None:
frame = frame.resample(resampling).mean()
frame = frame.fillna(method='ffill')
frame.columns = ['DHHL_3', 'DHHL_4', 'DHHL_5', 'DHHL_10', 'DHHL_11', 'DHHL_9', 'DHHL_2', 'DHHL_1', 'DHHL_1_Tilt',
'AP_6', 'AP_6_Tilt', 'AP_1', 'AP_3', 'AP_5', 'AP_4', 'AP_7', 'DHHL_6', 'DHHL_7', 'DHHL_8']
return frame
def create_spatio_temporal_data_oahu(oahu_df):
lat = [21.31236,21.31303,21.31357,21.31183,21.31042,21.31268,21.31451,21.31533,21.30812,21.31276,21.31281,21.30983,21.31141,21.31478,21.31179,21.31418,21.31034]
lon = [-158.08463,-158.08505,-158.08424,-158.08554,-158.0853,-158.08688,-158.08534,-158.087,-158.07935,-158.08389,-158.08163,-158.08249,-158.07947,-158.07785,-158.08678,-158.08685,-158.08675]
additional_info = pd.DataFrame({'station': oahu_df.columns, 'latitude': lat, 'longitude': lon })
ll = []
for ind, row in oahu_df.iterrows():
for col in oahu_df.columns:
lat = additional_info[(additional_info.station == col)].latitude.values[0]
lon = additional_info[(additional_info.station == col)].longitude.values[0]
irradiance = row[col]
ll.append([lat, lon, irradiance])
return pd.DataFrame(columns=['latitude','longitude','irradiance'], data=ll)
def load_oahu_dataset(start_date = "2010-04-01", end_date = "2011-10-31"):
"""
Dataset used in
"Impact of network layout and time resolution on spatio-temporal solar forecasting" - <NAME>, <NAME>. - Solar Energy 2018
:param start_date: time series start date in dd-mm-yyyy
:param end_date: time series end date in dd-mm-yyyy
:return: dataset in dataframe
"""
# read raw dataset
df = pd.read_csv('https://query.data.world/s/76ohtd4zd6a6fhiwwe742y23fiplgk')
# drop unused columns
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
df.drop(['Time', 'Ioh', 'DH1T', 'AP6T', 'AP3', 'AP2.dif', 'AP2.dir'], axis=1, inplace=True)
# create corrected index
ind = pd.date_range(start='2010-03-18 00:00:00', end='2011-11-01 00:00:00', closed='left', freq='10s')
ts = pd.DataFrame(index=ind)
df['Time'] = ts.between_time("05:00:00", "20:00:00").index
df.set_index('Time', inplace=True)
# filter range of interest
df = df[start_date:end_date]
return df
# create clear sky index dataframe
def get_clear_sky_index(cs,irr):
csi = []
for c,i in zip(cs,irr):
if c:
csi.append(i/c)
else:
csi.append(0)
return csi
def load_oahu_dataset_clear_sky(start_date = "2010-04-01", end_date = "2011-10-31"):
raw_df = load_oahu_dataset(start_date, end_date)
cs_index_df = | pd.DataFrame(index=raw_df.index) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 <EMAIL>
# @File : index_baidu.py
# @Desc : 获取百度指数
import json
import urllib.parse
import pandas as pd
import requests
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t:
:type t:
:param e:
:type e:
:return:
:rtype:
"""
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
def get_ptbk(uniqid: str, cookie: str) -> str:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
def baidu_interest_index(word, cookie):
"""
百度指数 人群画像兴趣分布
:param word: 关键词
:param cookie:
:return:
desc 兴趣分类
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/interest?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['interest']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['interest']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_gender_index(word, cookie):
"""
百度指数 人群画像性别分布
:param word: 关键词
:param cookie:
:return:
desc 性别
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['gender']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['gender']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_age_index(word, cookie):
"""
百度指数 人群画像年龄分布
:param word: 关键词
:param cookie:
:return:
desc 年龄范围
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['age']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['age']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_atlas_index(word, cookie, date=None):
"""
百度指数 需求图谱
:param word: 关键词
:param cookie:
:param date: 周期
:return:
period 周期范围
word 相关词
pv 搜索热度
ratio 搜索变化率
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
if date == None:
date = ""
url = "http://index.baidu.com/api/WordGraph/multi?wordlist[]=%s&datelist=%s" % (word, date)
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
wordlist = data['wordlist'][0]['wordGraph']
res_list = []
for word in wordlist:
tmp = {
"word": word['word'],
"pv": word['pv'],
"ratio": word['ratio'],
"period": data['period']
# "sim": word['sim']
}
res_list.append(tmp)
df = pd.DataFrame(res_list)
return df
except:
return None
def baidu_search_index(word, start_date, end_date, cookie, type="all"):
# 百度搜索数据
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"
}
w = '{"name":"%s","wordType":1}' % word
url = 'http://index.baidu.com/api/SearchApi/index?area=0&word=[[%s]]&startDate=%s&endDate=%s' % (w, start_date, end_date)
r = requests.get(url=url, headers=headers)
data = r.json()["data"]
all_data = data["userIndexes"][0][type]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
temp_df_7 = pd.DataFrame(
[pd.date_range(start=start_date, end=end_date), result],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
except Exception as e:
return None
def baidu_info_index(word, start_date, end_date, cookie):
# 百度资讯指数
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36"
}
w = '{"name":"%s","wordType":1}' % word
url = 'http://index.baidu.com/api/FeedSearchApi/getFeedIndex?area=0&word=[[%s]]&startDate=%s&endDate=%s' % (
w, start_date, end_date)
r = requests.get(url=url, headers=headers)
data = r.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
temp_df_7 = pd.DataFrame(
[pd.date_range(start=start_date, end=end_date), result],
index=["date", word],
).T
temp_df_7.index = | pd.to_datetime(temp_df_7["date"]) | pandas.to_datetime |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Open Page Rank Provider.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
from json import JSONDecodeError
from typing import Any, Tuple, Union, Iterable, Dict, List
import attr
import pandas as pd
from .ti_provider_base import LookupResult, TISeverity, generate_items, TILookupStatus
from .http_base import HttpProvider, IoCLookupParams
from ...common.utility import export
from ..._version import VERSION
__version__ = VERSION
__author__ = "<NAME>"
@export
class OPR(HttpProvider):
"""Open PageRank Lookup."""
_BASE_URL = "https://openpagerank.com"
_IOC_QUERIES = {
"dns": IoCLookupParams(
path="/api/v1.0/getPageRank",
params={"domains[0]": "{observable}"},
headers={"API-OPR": "{API_KEY}"},
)
}
_REQUIRED_PARAMS = ["API_KEY"]
def __init__(self, **kwargs):
"""Initialize a new instance of the class."""
super().__init__(**kwargs)
self._provider_name = self.__class__.__name__
print(
"Using Open PageRank.",
"See https://www.domcop.com/openpagerank/what-is-openpagerank",
)
# pylint: disable=duplicate-code
def lookup_iocs(
self,
data: Union[pd.DataFrame, Dict[str, str], Iterable[str]],
obs_col: str = None,
ioc_type_col: str = None,
query_type: str = None,
**kwargs,
) -> pd.DataFrame:
"""
Lookup collection of IoC observables.
Parameters
----------
data : Union[pd.DataFrame, Dict[str, str], Iterable[str]]
Data input in one of three formats:
1. Pandas dataframe (you must supply the column name in
`obs_col` parameter)
2. Dict of observable, IoCType
3. Iterable of observables - IoCTypes will be inferred
obs_col : str, optional
DataFrame column to use for observables, by default None
ioc_type_col : str, optional
DataFrame column to use for IoCTypes, by default None
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
pd.DataFrame
DataFrame of results.
"""
kwargs.get("provider_name", self.__class__.__name__)
domain_list = set()
bad_requests: List[pd.Series] = []
for ioc, ioc_type in generate_items(data, obs_col, ioc_type_col):
if not ioc:
continue
result = self._check_ioc_type(
ioc=ioc, ioc_type=ioc_type, query_subtype=query_type
)
if result.status == TILookupStatus.ok.value:
domain_list.add(result.ioc)
else:
bad_requests.append(pd.Series(attr.asdict(result)))
results: List[pd.Series] = []
if not domain_list:
return pd.DataFrame(columns=LookupResult.column_map())
for item_result in self._lookup_bulk_request(domain_list): # type: ignore
results.append(pd.Series(attr.asdict(item_result)))
all_results = results + bad_requests
return | pd.DataFrame(data=all_results) | pandas.DataFrame |
"""
Code for the dynamical system component of the Baselining work.
@author: <NAME>
@date Jan 6, 2016
"""
import numpy as np
import pandas as pd
from scipy.signal import cont2discrete
from patsy import dmatrices
from gurobipy import quicksum, GRB, LinExpr
class DynamicalSystem(object):
"""
Abstract base class for dynamical system optimization objects.
"""
def __init__(self, model):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._model = model
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
def get_consumption(self):
"""
Returns power consumption (kW) and energy consumpiton (kWh) as
gurobi LinExpr or QuadExpr
"""
raise NotImplementedError('Function not implemented in base class.')
def set_opts(self, **kwargs):
"""
Set options (kwargs) to their respective values.
"""
raise NotImplementedError('Function not implemented in base class.')
def populate_model(self):
"""
Add system dynamics and constraints as constraints to the gurobipy
optimization model 'model'.
"""
raise NotImplementedError('Function not implemented in base class.')
class LinearSystem(DynamicalSystem):
"""
Linear Dynamical System class.
"""
def __init__(self, model, A, B, **kwargs):
"""
Constructor for a Linear Dynamical System. Here 'model' is a
gurobipy Model object. The dynamics are
x_{t+1} = A * x_t + B * u_t + E * v_t
y_t = C * x_t + D * u_t
where x and u are vectors of the system state, respecively, and
v is a vector of exogenous disturbances. If not specified, the
matrices C defaults to the identity matrix and the matrices
D and E default to zero matrix.
"""
super(LinearSystem, self).__init__(model)
self._opts = {}
self._mats = {'A': np.asarray(A), 'B': np.asarray(B)}
if 'C' in kwargs:
self._mats['C'] = np.asarray(kwargs['C'])
else:
self._mats['C'] = np.eye(self._mats['A'].shape[1])
self._dims = {'x': self._mats['A'].shape[1],
'u': self._mats['B'].shape[1],
'y': self._mats['C'].shape[0]}
if 'D' in kwargs:
self._mats['D'] = np.asarray(kwargs['D'])
else:
self._mats['D'] = np.zeros((self._dims['y'], self._dims['u']))
if 'E' in kwargs:
self._mats['E'] = np.asarray(kwargs['E'])
self._dims['v'] = self._mats['E'].shape[1]
if 'x0' in kwargs:
self.x0 = kwargs['x0']
self._changed = {'T': True}
def set_opts(self, **kwargs):
"""
Set options (kwargs) to their respective values.
For example, to set the initial state 'x0' of the system to
the value [2, 1], call self.set_opts(x0=[2, 1])
"""
self._opts.update(kwargs)
for kwarg in kwargs:
if kwarg == 'x0':
self.x0 = kwargs[kwarg]
self._changed[kwarg] = True
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
try:
if not np.any(self._index == index):
self._opts['T'] = len(index)
self._changed['T'] = True
self._index = index
except (ValueError, AttributeError):
self._opts['T'] = len(index)
self._changed['T'] = True
self._index = index
def simulate(self, u, **kwargs):
"""
Computes system evolution x and output y for the given control
sequence u. If at least one of the matrices E or F has been
specified, a disturbance sequence w of the same length must be
provided as keyword argument.
"""
T = u.shape[0]
A, B, C = self._mats['A'], self._mats['B'], self._mats['C']
hasD, hasE = ('D' in self._mats), ('E' in self._mats)
x = np.zeros((T+1, self._dims['x']))
y = np.zeros((T, self._dims['y']))
x[0, :] = self._opts['x0']
y[0, :] = np.inner(C, x[0, :])
if hasD:
D = self._mats['D']
y[0, :] += np.inner(D, u[0, :])
if hasE:
E = self._mats['E']
v = kwargs['v']
for t in range(T):
x[t+1, :] = np.inner(A, x[t, :]) + np.inner(B, u[t, :])
if hasE:
x[t+1, :] += np.inner(E, v[t, :])
y[t, :] = np.inner(C, x[t, :])
if hasD:
y[t, :] += np.inner(D, u[t, :])
# not sure what to do with the last time step, so leave it be for now
# y[T, :] = np.inner(C, x[T, :])
return x, y
def populate_model(self, v=None, **kwargs):
"""
Add system dynamics and constraints to the gurobipy optimization
model. Overwrites any previous assignment.
"""
T = self._opts['T']
nu, nx = self._dims['u'], self._dims['x']
if self._changed['T']: # first time def or change of horizon
# make sure to delete old optimization variables if they exist
self._removeOld()
# define optimization variables for input and state
u, x = {}, {}
for i in range(nu):
for t in range(T):
u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,
name='u[{},{}]'.format(t, i))
for i in range(nx):
for t in range(T+1):
x[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,
name='x[{},{}]'.format(t, i))
# update the model so it knows the variables
self._model.update()
# add control constraints
umin, umax = self._opts['umin'], self._opts['umax']
has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)
for i in range(nu):
for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:
u[t, i].setAttr(GRB.Attr.LB, umin[t, i])
for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:
u[t, i].setAttr(GRB.Attr.UB, umax[t, i])
# update intitial state, if provided
if 'x0' in kwargs:
self.x0 = kwargs['x0']
# add constraint on initial state
self.x0con = {}
for i in range(nx):
self.x0con[0, i] = self._model.addConstr(
lhs=x[0, i], sense=GRB.EQUAL, rhs=self.x0[i],
name='dyn[0,{}]'.format(i))
# add system dynamics
A, B = self._mats['A'], self._mats['B']
if ('E' in self._mats):
w = np.inner(v, self._mats['E'])
else:
w = np.zeros((T, nx))
# dynamic evolution of state and output
self.dyncon = {}
for t in range(T):
for i in range(nx):
# put w on RHS to speed up constraint updates
self.dyncon[t, i] = self._model.addConstr(
lhs=(x[t+1, i] - quicksum([A[i, k] * x[t, k]
for k in range(nx)]) -
quicksum([B[i, k] * u[t, k] for k in range(nu)])),
sense=GRB.EQUAL, rhs=w[t, i],
name='dyn[{},{}]'.format(t+1, i))
self._model.update()
# add state constraints
xmin, xmax = self._opts['xmin'], self._opts['xmax']
has_xmin, has_xmax = ~np.isnan(xmin), ~np.isnan(xmax)
for i in range(nx):
for t in np.arange(has_xmin.shape[0])[has_xmin[:, i]]:
x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])
for t in np.arange(has_xmax.shape[0])[has_xmax[:, i]]:
x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])
self._model.update()
# indicate that model is up to date
for name in ['T', 'x0', 'umin', 'umax', 'xmin', 'xmax', 'v']:
self._changed[name] = False
# make variables accessible as object variables
self.u, self.x, self.v = u, x, v
else:
# change input constraints
if self._changed['umin']:
umin = self._opts['umin']
for i in range(nu):
for t in range(T):
self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])
self._changed['umin'] = False
if self._changed['umax']:
umax = self._opts['umax']
for i in range(nu):
for t in range(T):
self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])
self._changed['umax'] = False
# change state constraints
if self._changed['xmin']:
xmin = self._opts['xmin']
# xmin[np.isnan(xmin)] = - np.Inf
for i in range(nx):
for t in range(T):
self.x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])
self._changed['xmin'] = False
if self._changed['xmax']:
xmax = self._opts['xmax']
# xmax[np.isnan(xmax)] = np.Inf
for i in range(nx):
for t in range(T):
self.x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])
self._changed['xmax'] = False
# change initial state
if self._changed['x0']:
for i in range(nx):
self._model.getConstrByName('dyn[0,{}]'.format(i)).setAttr(
GRB.Attr.RHS, self.x0[i])
self._changed['x0'] = False
# change effect of disturbance vector on dynamics (if any)
if v is not None:
if not np.all(v == self.v):
self.v = v
w = np.inner(v, self._mats['E'])
for i in range(nx):
for t in range(T):
self._model.getConstrByName(
'dyn[{},{}]'.format(t+1, i)).setAttr(
GRB.Attr.RHS, w[t, i])
# finally update and include all changes
self._model.update()
def _removeOld(self):
"""
Helper function removing all optimization variables from the
underlying gurobipy optimization model if the time horizon
has been changed.
"""
if hasattr(self, 'u'):
for u in self.u.values():
self._model.remove(u)
del self.u
if hasattr(self, 'x'):
for x in self.x.values():
self._model.remove(x)
del self.x
if hasattr(self, 'x0con'):
for x0c in self.x0con.values():
self._model.remove(x0c)
del self.x0con
if hasattr(self, 'dyncon'):
for dc in self.dyncon.values():
self._model.remove(dc)
del self.dyncon
self._model.update()
def get_optvals(self):
"""
Return the optimal values of state and control.
"""
xopt = np.array([[self.x[t, i].X for i in range(self._dims['x'])]
for t in range(self._opts['T']+1)])
uopt = np.array([[self.u[t, i].X for i in range(self._dims['u'])]
for t in range(self._opts['T'])])
return xopt, uopt
def get_consumption(self):
"""
Return power consumption (in kW) and energy consumption (in kWh)
of the system during each optimization time interval as a pandas
Dataframe of gurobipy LinExpr or QuadExpr.
"""
if self._changed['T'] | self._changed['nrg_coeffs']:
nrgcoeffs = np.asarray(self._opts['nrg_coeffs'])
nu = self._dims['u']
pwrcons = [LinExpr(nrgcoeffs, [self.u[t, i] for i in range(nu)])
for t in range(self._opts['T'])]
# rescale to kWh (if time index resolution is different from 1h)
rescale = self._index.freq.delta.total_seconds() / 3600.0
nrgcons = [pc * rescale for pc in pwrcons]
self._cons = pd.DataFrame({'power': pwrcons, 'energy': nrgcons},
index=self._index)
return self._cons
def additional_cost_term(self, **kwargs):
"""
Returns an additional cost term (a Gurobipy LinExp or QuadExp) that
is included into the optimization problem. In the base class this
is just zero.
"""
return 0.0
class FraukesModel(LinearSystem):
"""
The linear model of the Swiss building as used by Frauke
in her paper.
"""
def __init__(self, model, ts=15):
"""
Create an instance of Frauke's Model with sampling time
ts (in minutes)
"""
from .utils import matrices_frauke
A, B, E = matrices_frauke(ts)
super(FraukesModel, self).__init__(model, A, B, E=E)
def set_v(self, df):
"""
Set model data (outside temperature, solar radiation, occupancy).
Here df is a pandas dataframe indexed by a (timezonez-aware)
Datetimeindex with columns outside_temp, solar_rad and occupancy
"""
self.v = df
v = df[['outside_temp', 'solar_rad', 'occupancy']].values
self.set_opts(T=v.shape[0])
self.populate_model(self, v=v)
class PavlaksModel(LinearSystem):
"""
The linear model
"""
def __init__(self, model, ts=15):
"""
Create an instance of Pavlak's Model with sampling time
ts (in minutes)
"""
from .utils import matrices_pavlak
A, B, E = matrices_pavlak(ts)
super(PavlaksModel, self).__init__(model, A, B, E=E)
def set_v(self, df):
"""
Set model data (outside temperature, solar radiation, occupancy).
Here df is a pandas dataframe indexed by a (timezonez-aware)
Datetimeindex with columns outside_temp, solar_rad and occupancy
"""
self.v = df
v = df[['outside_temp', 'solar_rad', 'occupancy']].values
self.set_opts(T=v.shape[0])
self.populate_model(self, v=v)
class GenericBufferedProduction(LinearSystem):
"""
A linear model for a generic buffered production model. Here it is
assumed that all production costs are sunk costs except for energy.
There is a battery with charging and discharging inefficiencies,
as well as leakage, which can be used to store energy.
"""
def __init__(self, model, eta_c=0.9, eta_d=0.9, Tleak=96, ts=15):
"""
Create an instance of a generic buffered production model. Here
eta_c and eta_d are the charging and discharging efficiencies of
the battery, respectively, Tleak is time constant (in hours) of
the charge leakage, and ts is the sampling time (in minutes).
"""
Act = np.array([[-1.0/(Tleak*3600), 0], [0, 0]])
Bct = np.array([[eta_c, -1.0/eta_d], [0, 1]])
Cct = np.array([[0, 0]])
Dct = np.array([[0, 0]])
(A, B, C, D, dt) = cont2discrete(
(Act, Bct, Cct, Dct), ts*60, method='zoh')
super(GenericBufferedProduction, self).__init__(model, A, B)
class QuadraticUtility(DynamicalSystem):
"""
A static quadratic consumption utiltiy model with time-seperable
utilities. Used to benchmark the QuadraticUtilityWithBattery model.
"""
def __init__(self, model, **kwargs):
"""
Constructor for QuadraticUtility model. Here 'model' is a
gurobipy Model object.
"""
super(QuadraticUtility, self).__init__(model)
self._opts = {}
self._dims = {'u': 1}
self._changed = {'T': True}
def set_opts(self, **kwargs):
"""
Set options (kwargs) to their respective values.
"""
self._opts.update(kwargs)
for kwarg in kwargs:
self._changed[kwarg] = True
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
try:
if not np.any(self._index == index):
self._opts['T'] = len(index)
self._changed['T'] = True
self._index = index
except (ValueError, AttributeError):
self._opts['T'] = len(index)
self._changed['T'] = True
self._index = index
def compute_util_params(self, load_shape, nrg_charges, eta=-0.1,
fit='saturated'):
"""
Computes the parameters of the quadratic utility function by
calibrating the consumption under the energy charges nrg_charges
to the provided load shape. If fit=='saturated', then compute a
single parameter for each period. If fit=='regression', use OLS
to estimate a model with a single parameter for each interaction
of month, weekday/weekend and hour of day.
"""
if fit == 'saturated':
# Series of linear coefficients
self._alpha = nrg_charges.loc[self._index] * (1 - 1/eta)
# Series of quadratic coefficients
self._beta = -1.0*nrg_charges.loc[self._index].divide(
load_shape.loc[self._index]*eta, axis='index')
elif fit == 'regression':
df = pd.concat([nrg_charges, load_shape], axis=1)
df = df.rename(columns={nrg_charges.name: 'p',
load_shape.name: 'q'})
df['month'] = df.index.month
df['HoD'] = df.index.hour
df['wknd'] = (df.index.dayofweek >= 5).astype(int)
df['a_indiv'] = df['p'] * (1 - 1 / eta)
df['b_indiv'] = -1.0 * df['p'] / (df['q'] * eta)
y_a, X_a = dmatrices('a_indiv ~ -1 + C(month):C(HoD):C(wknd)',
data=df.loc[self._index])
y_b, X_b = dmatrices('b_indiv ~ -1 + C(month):C(HoD):C(wknd)',
data=df.loc[self._index])
# Note: the following weird syntax is necessary to convert patsy
# DesignMatrix objects to np arrays - o/w this creates issues
# when using multiprocessing since DesignMatrix objects cannot
# be pickled (hopefully to be fixed in a later patsy version)
_alpha = np.dot(np.asarray(X_a), np.linalg.lstsq(
np.asarray(X_a), np.asarray(y_a).flatten())[0])
self._alpha = pd.Series(_alpha, index=self._index)
_beta = np.dot(np.asarray(X_b), np.linalg.lstsq(
np.asarray(X_b), np.asarray(y_b).flatten())[0])
self._beta = | pd.Series(_beta, index=self._index) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Created on Fri Aug 9 14:01:22 2019
@author: cherrabi
"""
from P2N_Lib import GenereListeFichiers # import
from P2N_Config import LoadConfig #
import os # importation de la bibliothèque os qui sert à
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
import nltk
from sematch.semantic.similarity import WordNetSimilarity
from nltk.corpus import wordnet as wn
import pandas as pd
import re
import shutil
import sys
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
import re
import umap
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import string
import gensim
from gensim import corpora
from gensim.corpora import Dictionary
from sklearn.decomposition import TruncatedSVD
import os
import re
import codecs
import logging
import time
from operator import add
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
from P2N_Lib import LoadBiblioFile
from P2N_Lib import GenereListeFichiers
from P2N_Config import LoadConfig
ListeBrevet = [] # The patent List
stop_words = set(stopwords.words('english'))
configFile = LoadConfig()
requete = configFile.requete
GatherContent = configFile.GatherContent
GatherBiblio = configFile.GatherBiblio
GatherPatent = configFile.GatherPatent
GatherFamilly = configFile.GatherFamilly
IsEnableScript = configFile.GatherIramuteq
ResultBiblioPath = configFile.ResultBiblioPath
ndf = configFile.ndf
temporPath = configFile.temporPath
ResultAbstractPath = configFile.ResultAbstractPath
#ResultClaimsPath = configFile.ResultClaimsPath
#add here templateFlask directory local to the request directory normalize path for windows
ResultPathContent= configFile.ResultContentsPath.replace('\\', '/' )
ResultTemplateFlask = os.path.join(ResultPathContent,'Trizifiier').replace('\\','/')
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
if not os.path.exists(ResultTemplateFlask): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask)
if not os.path.exists(ResultTemplateFlask+'/templates'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/templates')
if not os.path.exists(ResultTemplateFlask+'/DataFormat'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/DataFormat')
#add here tempo dir
temporar = configFile.temporPath
wns = WordNetSimilarity()
i=0
# build file list
#direct = os.path.normpath(ResultBiblioPath)
#direct = os.path.normpath(ResultClaimsPath)
direct = os.path.normpath(ResultAbstractPath)
# affiche url de chaque documents txt dans le dossier de la requete inseree , EN tous les url dossier pour en ect...
Fr, En, Unk = GenereListeFichiers(direct)
def convert_tag(tag):
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
CountFile_R = 0
CountFile_W = 0
FichierOrg={}
# compter les nombre de caractere de EN
#if len(En)
PSW = [] # liste de mots vide à compléter au fur et à mesure des recherches
# minimalistic HTML for result file in html format
dataF = """""" # va contenir tous les abstracts du dossier de la requete
import codecs
DejaVus = dict()
f=open(ResultTemplateFlask + '/DataFormat/FileDataAnalysisTrizWiki.csv','w')
entetes = [
u'i',
u'Abstract Number',
u'Term',
u'Action',
u'indiceSimAction',
u'abstract',
u'urlEspacenet'
]
ligneEntete=",".join(entetes)+"\n"
f.write(ligneEntete)
d= pd.read_csv("trizOxfordData.csv",delimiter=";")
listcaras = pd.DataFrame(d,columns=['Colonne3'])
listcara = listcaras.drop_duplicates(['Colonne3'],keep='first')
#lecture des fichiers txt en boucle et placement element dans dataF
for fic in En:
with codecs.open(fic, 'r', 'utf8') as File:
dataF = File.readlines() #single File ne pas lire la première ligne de l'abstract
# dataF = '\n'.join(dataF)
# FichierOrg = dataF
abstract = '\n'.join(dataF[1:])
NumberBrevet= fic.split('-')[1]
#NumberBrevet=NumberBrevet.replace('*Label_','')
NumberBrevet=NumberBrevet.replace('.txt','')
#sys.exit(0)
# tokenization
abstract = re.sub("[^a-zA-Z#]", " ",str(abstract))
Blob = TextBlob(abstract)
wordlist=Blob.words #should give best results@ DR
# remove stop-words and words less 3 caracters
filtered_sentence = []
for w in wordlist:
if w not in stop_words and len(w) > 3:
filtered_sentence.append(w)
#Document-Term Matrix
vectorizer = TfidfVectorizer(stop_words='english',
max_features= 1000, # keep top 1000 terms
max_df = 0.5,
smooth_idf=True)
X = vectorizer.fit_transform(filtered_sentence)
X.shape # check shape of the document-term matrix
# SVD represent documents and terms in vectors
svd_model = TruncatedSVD(n_components=1, algorithm='randomized', n_iter=100, random_state=122)
svd_model.fit(X)
len(svd_model.components_)
terms = vectorizer.get_feature_names()
terms_topic_model =[]
for i, comp in enumerate(svd_model.components_):
terms_comp = zip(terms, comp)
sorted_terms = sorted(terms_comp, key= lambda x:x[1], reverse=True)[:20]
sorted_terms_list = []
for t in sorted_terms:
sorted_terms_list.append (t[0])
sorted_terms_lists=sorted_terms_list
urlEspacenet="https://worldwide.espacenet.com/searchResults?submitted=true&locale=fr_EP&DB=EPODOC&ST=advanced&TI=&AB=&PN="+format(NumberBrevet)
matriceListe = []
matricelistePaire = []
matricelistePaireSort=[]
matricelistePaireAction = []
matricelistePaireObject = []
for word in sorted_terms_lists :
tokens = word
for index, row in listcara.iterrows():
abstractNumber='abs'.format(str((i)))
listaction = row['Colonne3']
listaction = re.sub(r'\([^)]*\)', '', listaction)
#comparaison betwen tags and classe Triz
indiceSimAction = wns.word_similarity(word,str(listaction))
if indiceSimAction == 0 or word.isdigit() == True:
#print "rien a faire "
continue
else:
valeurs=[]
valeurs=[i,NumberBrevet,word,listaction,indiceSimAction,abstract,urlEspacenet]
ligne=",".join(str(v) for v in valeurs) + "\n"
f.write(ligne)
print((NumberBrevet), " abstracts processed" )
f.close()
#open file data semantic classification
d= pd.read_csv(ResultTemplateFlask + "/DataFormat/FileDataAnalysisTrizWiki.csv")
df = pd.DataFrame(d,columns=['i','Abstract Number','Term','Action','indiceSimAction','abstract','urlEspacenet'])
# sorted data by id and term ascending
dfmax = df.sort_values(by=['i','Term','indiceSimAction'],ascending=[True,True,False])
dfmax.to_csv(ResultTemplateFlask + '/DataFormat/tableauTri.csv')
# selected just top indice similiraty for term / action
dresult = dfmax.drop_duplicates(['Term'],keep='first')
dresult.to_csv(ResultTemplateFlask + '/DataFormat/tableauDrop.csv')
dresultmaxI=dresult.sort_values(by='indiceSimAction')
# create file formated datas to use in tabulator html
dresultmaxI.to_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv')
dd= | pd.read_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv') | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime
import os
import warnings
import nose
import numpy as np
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import read_stata, StataReader
import pandas.util.testing as tm
from pandas.util.misc import is_little_endian
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
# Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from:
# http://stata-press.com/data/glmext.html
self.dirpath = tm.get_data_path()
self.dta1 = os.path.join(self.dirpath, 'stata1.dta')
self.dta2 = os.path.join(self.dirpath, 'stata2.dta')
self.dta3 = os.path.join(self.dirpath, 'stata3.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4 = os.path.join(self.dirpath, 'stata4.dta')
self.dta7 = os.path.join(self.dirpath, 'cancer.dta')
self.csv7 = os.path.join(self.dirpath, 'cancer.csv')
self.dta8 = os.path.join(self.dirpath, 'tbl19-3.dta')
self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv')
self.dta9 = os.path.join(self.dirpath, 'lbw.dta')
self.csv9 = os.path.join(self.dirpath, 'lbw.csv')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.dta1_13 = os.path.join(self.dirpath, 'stata1_v13.dta')
self.dta2_13 = os.path.join(self.dirpath, 'stata2_v13.dta')
self.dta3_13 = os.path.join(self.dirpath, 'stata3_v13.dta')
self.dta4_13 = os.path.join(self.dirpath, 'stata4_v13.dta')
def read_dta(self, file):
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_dta1(self):
reader = StataReader(self.dta1)
parsed = reader.data()
reader_13 = StataReader(self.dta1_13)
parsed_13 = reader_13.data()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT')
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
with warnings.catch_warnings(record=True) as w:
parsed = self.read_dta(self.dta2)
parsed_13 = self.read_dta(self.dta2_13)
np.testing.assert_equal(
len(w), 1) # should get a warning for that format.
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_dta3(self):
parsed = self.read_dta(self.dta3)
parsed_13 = self.read_dta(self.dta3_13)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int32)
expected['quarter'] = expected['quarter'].astype(np.int16)
tm.assert_frame_equal(parsed, expected)
| tm.assert_frame_equal(parsed_13, expected) | pandas.util.testing.assert_frame_equal |
import json
#import requests
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import uuid
import subprocess
from datetime import datetime
from bs4 import BeautifulSoup as bs
import re
import pysam
import mysecrets
import glob
import tarfile
from flask import Flask, request, redirect, url_for, jsonify, render_template, flash, send_file
from werkzeug.utils import secure_filename
from flask_sitemap import Sitemap
from flask_uploads import UploadSet, configure_uploads, DATA
from pymongo import MongoClient
from pprint import pprint
import htmltableparser
#import getSimpleSumStats
genomicWindowLimit = 2000000
one_sided_SS_window_size = 100000 # (100 kb on either side of the lead SNP)
fileSizeLimit = 500 * 1024 * 1024 # in Bytes
MYDIR = os.path.dirname(__file__)
APP_STATIC = os.path.join(MYDIR, 'static')
##################
# Default settings
##################
default_region = "1:205500000-206000000"
default_chromname = "#CHROM"
default_posname = "POS"
default_snpname = "ID"
default_refname = "REF"
default_altname = "ALT"
default_pname = "P"
default_betaname = "BETA"
default_stderrname = "SE"
default_nname = "N"
default_mafname = "MAF"
# Default column names for secondary datasets:
CHROM = 'CHROM'
BP = 'BP'
SNP = 'SNP'
P = 'P'
coloc2colnames = ['CHR','POS','SNPID','A2','A1','BETA','SE','PVAL','MAF', 'N']
coloc2eqtlcolnames = coloc2colnames + ['ProbeID']
coloc2gwascolnames = coloc2colnames + ['type']
################
################
app = Flask(__name__)
ext = Sitemap(app=app)
app.config['UPLOAD_FOLDER'] = os.path.join(MYDIR, 'static/upload/')
app.config['UPLOADED_FILES_DEST'] = os.path.join(MYDIR, 'static/upload/')
app.config['MAX_CONTENT_LENGTH'] = fileSizeLimit
ALLOWED_EXTENSIONS = set(['txt', 'tsv', 'ld', 'html'])
app.config['UPLOADED_FILES_ALLOW'] = ALLOWED_EXTENSIONS
app.secret_key = mysecrets.mysecret
files = UploadSet('files', DATA)
configure_uploads(app, files)
collapsed_genes_df_hg19 = pd.read_csv(os.path.join(MYDIR, 'data/collapsed_gencode_v19_hg19.gz'), compression='gzip', sep='\t', encoding='utf-8')
collapsed_genes_df_hg38 = pd.read_csv(os.path.join(MYDIR, 'data/collapsed_gencode_v26_hg38.gz'), compression='gzip', sep='\t', encoding='utf-8')
collapsed_genes_df = collapsed_genes_df_hg19 # For now
ld_mat_diag_constant = 1e-6
conn = "mongodb://localhost:27017"
client = MongoClient(conn)
db = client.GTEx_V7 # For now
available_gtex_versions = ["V7", "V8"]
valid_populations = ["EUR", "AFR","EAS", "SAS", "AMR", "ASN", "NFE"]
####################################
# Helper functions
####################################
def parseRegionText(regiontext, build):
if build not in ['hg19', 'hg38']:
raise InvalidUsage(f'Unrecognized build: {build}', status_code=410)
regiontext = regiontext.strip().replace(' ','').replace(',','').replace('chr','')
if not re.search("^\d+:\d+-\d+$", regiontext.replace('X','23').replace('x','23')):
raise InvalidUsage(f'Invalid coordinate format. {regiontext} e.g. 1:205,000,000-206,000,000', status_code=410)
chrom = regiontext.split(':')[0].lower().replace('chr','').upper()
pos = regiontext.split(':')[1]
startbp = pos.split('-')[0].replace(',','')
endbp = pos.split('-')[1].replace(',','')
chromLengths = pd.read_csv(os.path.join(MYDIR, 'data', build + '_chrom_lengths.txt'), sep="\t", encoding='utf-8')
chromLengths.set_index('sequence',inplace=True)
if chrom in ['X','x'] or chrom == '23':
chrom = 23
maxChromLength = chromLengths.loc['chrX', 'length']
try:
startbp = int(startbp)
endbp = int(endbp)
except:
raise InvalidUsage(f"Invalid coordinates input: {regiontext}", status_code=410)
else:
try:
chrom = int(chrom)
if chrom == 23:
maxChromLength = chromLengths.loc['chrX', 'length']
else:
maxChromLength = chromLengths.loc['chr'+str(chrom), 'length']
startbp = int(startbp)
endbp = int(endbp)
except:
raise InvalidUsage(f"Invalid coordinates input {regiontext}", status_code=410)
if chrom < 1 or chrom > 23:
raise InvalidUsage('Chromosome input must be between 1 and 23', status_code=410)
elif startbp > endbp:
raise InvalidUsage('Starting chromosome basepair position is greater than ending basepair position', status_code=410)
elif startbp > maxChromLength or endbp > maxChromLength:
raise InvalidUsage('Start or end coordinates are out of range', status_code=410)
elif (endbp - startbp) > genomicWindowLimit:
raise InvalidUsage(f'Entered region size is larger than {genomicWindowLimit/10**6} Mbp', status_code=410)
else:
return chrom, startbp, endbp
def allowed_file(filenames):
if type(filenames) == type('str'):
return '.' in filenames and filenames.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
for filename in filenames:
if not ('.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS):
return False
return True
def writeList(alist, filename):
with open(filename, 'w') as f:
for item in alist:
f.write("%s\n" % item)
def writeMat(aMat, filename):
aMat = np.matrix(aMat)
with open(filename, 'w') as f:
for row in np.arange(aMat.shape[0]):
for col in np.arange(aMat.shape[1] - 1):
f.write("%s\t" % str(aMat[row,col]))
f.write("%s\n" % str(aMat[row,-1]))
def genenames(genename, build):
# Given either ENSG gene name or HUGO gene name, returns both HUGO and ENSG names
ensg_gene = genename
if build.lower() in ["hg19","grch37"]:
collapsed_genes_df = collapsed_genes_df_hg19
elif build.lower() in ["hg38", "grch38"]:
collapsed_genes_df = collapsed_genes_df_hg38
if genename in list(collapsed_genes_df['name']):
ensg_gene = collapsed_genes_df['ENSG_name'][list(collapsed_genes_df['name']).index(genename)]
if genename in list(collapsed_genes_df['ENSG_name']):
genename = collapsed_genes_df['name'][list(collapsed_genes_df['ENSG_name']).index(genename)]
return genename, ensg_gene
def classify_files(filenames):
gwas_filepath = ''
ldmat_filepath = ''
html_filepath = ''
extensions = []
for file in filenames:
filename = secure_filename(file.filename)
extension = filename.split('.')[-1]
if extension not in extensions:
if extension in ['txt', 'tsv']:
extensions.extend(['txt','tsv'])
else:
extensions.append(extension)
else:
raise InvalidUsage('Please upload up to 3 different file types as described', status_code=410)
if extension in ['txt', 'tsv']:
gwas_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
elif extension in ['ld']:
ldmat_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
elif extension in ['html']:
html_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
return gwas_filepath, ldmat_filepath, html_filepath
def isSorted(l):
# l is a list
# returns True if l is sorted, False otherwise
return all(l[i] <= l[i+1] for i in range(len(l)-1))
def Xto23(l):
newl = []
validchroms = [str(i) for i in list(np.arange(1,24))]
validchroms.append('.')
for x in l:
if str(str(x).strip().lower().replace('chr','').upper()) == "X":
newl.append(23)
elif str(str(x).strip().lower().replace('chr','')) in validchroms:
if x!='.':
newl.append(int(str(x).strip().lower().replace('chr','')))
else:
newl.append('.')
else:
raise InvalidUsage('Chromosome unrecognized', status_code=410)
return newl
def verifycol(formname, defaultname, filecolnames, error_message_):
"""
Checks if the user-entered column name (formname)
(or the default column name if no column name was entered - defaultname)
can be found in the dataset column names (ie. filecolnames list).
If not, the error_message_ is output and program halted with 410 status
"""
theformname = formname
if formname=='': theformname=str(defaultname)
if theformname not in filecolnames:
raise InvalidUsage(error_message_, status_code=410)
return theformname
def buildSNPlist(df, chromcol, poscol, refcol, altcol, build):
snplist = []
if build.lower() in ["hg38","grch38"]:
build = 'b38'
else:
build = 'b37'
for i in np.arange(df.shape[0]):
chrom = list(df[chromcol])[i]
pos = list(df[poscol])[i]
ref = list(df[refcol])[i]
alt = list(df[altcol])[i]
try:
snplist.append(str(chrom)+"_"+str(pos)+"_"+str(ref)+"_"+str(alt)+"_"+str(build))
except:
raise InvalidUsage(f'Could not convert marker at row {str(i)}')
return snplist
def fetchSNV(chrom, bp, ref, build):
variantid = '.'
if ref is None or ref=='.':
ref=''
# Ensure valid region:
try:
regiontxt = str(chrom) + ":" + str(bp) + "-" + str(int(bp)+1)
except:
raise InvalidUsage(f'Invalid input for {str(chrom):str(bp)}')
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('chr','').replace('23',"X")
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load variant info from dbSNP151
tbx = pysam.TabixFile(dbsnp_filepath)
varlist = []
for row in tbx.fetch(str(chrom), bp-1, bp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
varlist.append(varstr)
# Check if there is a match to an SNV with the provided info
if len(varlist) == 1:
variantid = varstr
elif len(varlist) > 1 and ref != '':
for v in varlist:
if v.split('_')[2] == ref:
variantid = v
break
return variantid
def standardizeSNPs(variantlist, regiontxt, build):
"""
Input: Variant names in any of these formats: rsid, chrom_pos_ref_alt, chrom:pos_ref_alt, chrom:pos_ref_alt_b37/b38
Output: chrom_pos_ref_alt_b37/b38 variant ID format, but looks at GTEx variant lookup table first.
In the case of multi-allelic variants (e.g. rs2211330(T/A,C)), formats such as 1_205001063_T_A,C_b37 are accepted
If variant ID format is chr:pos, and the chr:pos has a unique biallelic SNV, then it will be assigned that variant
"""
if all(x=='.' for x in variantlist):
raise InvalidUsage('No variants provided')
if np.nan in variantlist:
raise InvalidUsage('Missing variant IDs detected in row(s): ' + str([ i+1 for i,x in enumerate(variantlist) if str(x) == 'nan' ]))
# Ensure valid region:
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('23',"X")
# Load GTEx variant lookup table for region indicated
db = client.GTEx_V7
rsid_colname = 'rs_id_dbSNP147_GRCh37p13'
if build.lower() in ["hg38", "grch38"]:
db = client.GTEx_V8
rsid_colname = 'rs_id_dbSNP151_GRCh38p7'
collection = db['variant_table']
variants_query = collection.find(
{ '$and': [
{ 'chr': int(chrom.replace('X','23')) },
{ 'variant_pos': { '$gte': int(startbp), '$lte': int(endbp) } }
]}
)
variants_list = list(variants_query)
variants_df = pd.DataFrame(variants_list)
variants_df = variants_df.drop(['_id'], axis=1)
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
suffix = 'b37'
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load dbSNP file
#delayeddf = delayed(pd.read_csv)(dbsnp_filepath,skiprows=getNumHeaderLines(dbsnp_filepath),sep='\t')
#dbsnp = dd.from_delayed(delayeddf)
tbx = pysam.TabixFile(dbsnp_filepath)
print('Compiling list of known variants in the region from dbSNP151')
chromcol = []
poscol = []
idcol = []
refcol = []
altcol = []
variantid = [] # in chr_pos_ref_alt_build format
rsids = dict({}) # a multi-allelic variant rsid (key) can be represented in several variantid formats (values)
for row in tbx.fetch(str(chrom), startbp, endbp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
chromcol.append(chromi)
poscol.append(posi)
idcol.append(idi)
refcol.append(refi)
altcol.append(alti)
variantid.append(varstr)
rsids[idi] = [varstr]
altalleles = alti.split(',') # could have more than one alt allele (multi-allelic)
if len(altalleles)>1:
varstr = '_'.join([chromi, posi, refi, altalleles[0], suffix])
rsids[idi].append(varstr)
for i in np.arange(len(altalleles)-1):
varstr = '_'.join([chromi, posi, refi, altalleles[i+1], suffix])
rsids[idi].append(varstr)
print('Cleaning and mapping list of variants')
variantlist = [asnp.split(';')[0].replace(':','_').replace('.','') for asnp in variantlist] # cleaning up the SNP names a bit
stdvariantlist = []
for variant in variantlist:
if variant == '':
stdvariantlist.append('.')
continue
variantstr = variant.replace('chr','')
if re.search("^23_",variantstr): variantstr = variantstr.replace('23_','X_',1)
if variantstr.startswith('rs'):
try:
# Here's the difference from the first function version (we look at GTEx first)
if variant in list(variants_df[rsid_colname]):
stdvar = variants_df['variant_id'].loc[ variants_df[rsid_colname] == variant].to_list()[0]
stdvariantlist.append(stdvar)
else:
stdvariantlist.append(rsids[variantstr][0])
except:
stdvariantlist.append('.')
elif re.search("^\d+_\d+_[A,T,G,C]+_[A,T,C,G]+,*", variantstr.replace('X','23')):
strlist = variantstr.split('_')
strlist = list(filter(None, strlist)) # remove empty strings
try:
achr, astart, aend = parseRegionText(strlist[0]+":"+strlist[1]+"-"+str(int(strlist[1])+1), build)
achr = str(achr).replace('23','X')
if achr == str(chrom) and astart >= startbp and astart <= endbp:
variantstr = variantstr.replace("_"+str(suffix),"") + "_"+str(suffix)
if len(variantstr.split('_')) == 5:
stdvariantlist.append(variantstr)
else:
raise InvalidUsage(f'Variant format not recognizable: {variant}. Is it from another coordinate build system?', status_code=410)
else:
stdvariantlist.append('.')
except:
raise InvalidUsage(f'Problem with variant {variant}', status_code=410)
elif re.search("^\d+_\d+_*[A,T,G,C]*", variantstr.replace('X','23')):
strlist = variantstr.split('_')
strlist = list(filter(None, strlist)) # remove empty strings
try:
achr, astart, aend = parseRegionText(strlist[0]+":"+strlist[1]+"-"+str(int(strlist[1])+1), build)
achr = str(achr).replace('23','X')
if achr == str(chrom) and astart >= startbp and astart <= endbp:
if len(strlist)==3:
aref=strlist[2]
else:
aref=''
stdvariantlist.append(fetchSNV(achr, astart, aref, build))
else:
stdvariantlist.append('.')
except:
raise InvalidUsage(f'Problem with variant {variant}', status_code=410)
else:
raise InvalidUsage(f'Variant format not recognized: {variant}', status_code=410)
return stdvariantlist
def cleanSNPs(variantlist, regiontext, build):
"""
Parameters
----------
variantlist : list
list of variant IDs in rs id or chr_pos, chr_pos_ref_alt, chr_pos_ref_alt_build, etc formats
regiontext : str
the region of interest in chr:start-end format
build : str
build.lower() in ['hg19','hg38', 'grch37', 'grch38'] must be true
Returns
-------
A cleaner set of SNP names
rs id's are cleaned to contain only one,
non-rs id formats are standardized to chr_pos_ref_alt_build format)
any SNPs not in regiontext are returned as '.'
"""
variantlist = [asnp.split(';')[0].replace(':','_').replace('.','') for asnp in variantlist] # cleaning up the SNP names a bit
std_varlist = standardizeSNPs(variantlist, regiontext, build)
final_varlist = [ e if (e.startswith('rs') and std_varlist[i] != '.') else std_varlist[i] for i, e in enumerate(variantlist) ]
return final_varlist
def torsid(variantlist, regiontext, build):
"""
Parameters
----------
variantlist : list
List of variants in either rs id or other chr_pos, chr_pos_ref, chr_pos_ref_alt, chr_pos_ref_alt_build format.
Returns
-------
rsidlist : list
Corresponding rs id in the region if found.
Otherwise returns '.'
"""
if all(x=='.' for x in variantlist):
raise InvalidUsage('No variants provided')
variantlist = cleanSNPs(variantlist, regiontext, build)
chrom, startbp, endbp = parseRegionText(regiontext, build)
chrom = str(chrom).replace('23',"X")
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
suffix = 'b37'
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load dbSNP file
tbx = pysam.TabixFile(dbsnp_filepath)
print('Compiling list of known variants in the region from dbSNP151')
chromcol = []
poscol = []
idcol = []
refcol = []
altcol = []
rsid = dict({}) # chr_pos_ref_alt_build (keys) for rsid output (values)
for row in tbx.fetch(str(chrom), startbp, endbp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
chromcol.append(chromi)
poscol.append(posi)
idcol.append(idi)
refcol.append(refi)
altcol.append(alti)
rsid[varstr] = idi
altalleles = alti.split(',') # could have more than one alt allele (multi-allelic)
if len(altalleles)>1:
varstr = '_'.join([chromi, posi, refi, altalleles[0], suffix])
rsid[varstr] = idi
for i in np.arange(len(altalleles)-1):
varstr = '_'.join([chromi, posi, refi, altalleles[i+1], suffix])
rsid[varstr] = idi
finalvarlist = []
for variant in variantlist:
if not variant.startswith('rs'):
try:
finalvarlist.append(rsid[variant])
except:
finalvarlist.append('.')
else:
finalvarlist.append(variant)
return finalvarlist
def decomposeVariant(variant_list):
"""
Parameters
----------
variantid_list : list
list of str standardized variants in chr_pos_ref_alt_build format
Returns
-------
A pandas.dataframe with chromosome, pos, reference and alternate alleles columns
"""
chromlist = [x.split('_')[0] if len(x.split('_'))==5 else x for x in variant_list]
chromlist = [int(x) if x not in ["X","."] else x for x in chromlist]
poslist = [int(x.split('_')[1]) if len(x.split('_'))==5 else x for x in variant_list]
reflist = [x.split('_')[2] if len(x.split('_'))==5 else x for x in variant_list]
altlist = [x.split('_')[3] if len(x.split('_'))==5 else x for x in variant_list]
df = pd.DataFrame({
default_chromname: chromlist
,default_posname: poslist
,default_refname: reflist
,default_altname: altlist
})
return df
def addVariantID(gwas_data, chromcol, poscol, refcol, altcol, build = "hg19"):
"""
Parameters
----------
gwas_data : pandas.DataFrame
Has a minimum of chromosome, position, reference and alternate allele columns.
chromcol : str
chromosome column name in gwas_data
poscol : str
position column name in gwas_data
refcol : str
reference allele column name in gwas_data
altcol : str
alternate allele column name in gwas_data
Returns
-------
pandas.dataframe with list of standardized variant ID's in chrom_pos_ref_alt_build format added to gwas_data
"""
varlist = []
buildstr = 'b37'
if build.lower() == 'hg38':
buildstr = 'b38'
chromlist = list(gwas_data[chromcol])
poslist = list(gwas_data[poscol])
reflist = [x.upper() for x in list(gwas_data[refcol])]
altlist = [x.upper() for x in list(gwas_data[altcol])]
for i in np.arange(gwas_data.shape[0]):
chrom = chromlist[i]
pos = poslist[i]
ref = reflist[i]
alt = altlist[i]
varlist.append('_'.join([str(chrom),str(pos),ref,alt,buildstr]))
gwas_data[default_snpname] = varlist
return gwas_data
def verifyStdSNPs(stdsnplist, regiontxt, build):
# Ensure valid region:
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('23',"X")
# Load GTEx variant lookup table for region indicated
db = client.GTEx_V7
if build.lower() in ["hg38", "grch38"]:
db = client.GTEx_V8
collection = db['variant_table']
variants_query = collection.find(
{ '$and': [
{ 'chr': int(chrom.replace('X','23')) },
{ 'variant_pos': { '$gte': int(startbp), '$lte': int(endbp) } }
]}
)
variants_list = list(variants_query)
variants_df = pd.DataFrame(variants_list)
variants_df = variants_df.drop(['_id'], axis=1)
gtex_std_snplist = list(variants_df['variant_id'])
isInGTEx = [ x for x in stdsnplist if x in gtex_std_snplist ]
return len(isInGTEx)
def subsetLocus(build, summaryStats, regiontext, chromcol, poscol, pcol):
# regiontext format example: "1:205500000-206000000"
if regiontext == "": regiontext = default_region
print('Parsing region text')
chrom, startbp, endbp = parseRegionText(regiontext, build)
print(chrom,startbp,endbp)
print('Eliminating missing rows')
#summaryStats.dropna(subset=[chromcol,poscol,pcol],inplace=True)
print('Subsetting GWAS data to entered region')
summaryStats = summaryStats.loc[ [str(x) != '.' for x in list(summaryStats[chromcol])] ].copy()
bool1 = [x == chrom for x in Xto23(list(summaryStats[chromcol]))]
bool2 = [x>=startbp and x<=endbp for x in list(summaryStats[poscol])]
bool3 = [not x for x in list(summaryStats.isnull().any(axis=1))]
bool4 = [str(x) != '.' for x in list(summaryStats[chromcol])]
gwas_indices_kept = [ ((x and y) and z) and w for x,y,z,w in zip(bool1,bool2,bool3,bool4)]
summaryStats = summaryStats.loc[ gwas_indices_kept ].copy()
summaryStats.sort_values(by=[ poscol ], inplace=True)
chromcolnum = list(summaryStats.columns).index(chromcol)
summaryStats.reset_index(drop=True, inplace=True)
summaryStats.iloc[:,chromcolnum] = Xto23(list(summaryStats[chromcol]))
if summaryStats.shape[0] == 0:
raise InvalidUsage('No data found for entered region', status_code=410)
# Check for invalid p=0 rows:
zero_p = [x for x in list(summaryStats[pcol]) if x==0]
if len(zero_p)>0:
raise InvalidUsage('P-values of zero detected; please replace with a non-zero p-value')
return summaryStats, gwas_indices_kept
def getLeadSNPindex(leadsnpname, summaryStats, snpcol, pcol):
lead_snp = leadsnpname
snp_list = list(summaryStats.loc[:,snpcol])
snp_list = [asnp.split(';')[0] for asnp in snp_list] # cleaning up the SNP names a bit
if lead_snp=='': lead_snp = list(summaryStats.loc[ summaryStats.loc[:,pcol] == min(summaryStats.loc[:,pcol]) ].loc[:,snpcol])[0].split(';')[0]
if lead_snp not in snp_list:
raise InvalidUsage('Lead SNP not found', status_code=410)
lead_snp_position_index = snp_list.index(lead_snp)
return lead_snp_position_index
####################################
# LD Calculation from 1KG using PLINK (on-the-fly)
####################################
def resolve_plink_filepath(build, pop, chrom):
"""
Returns the file path of the binary plink file
"""
if chrom == 'X': chrom = 23
try:
chrom = int(chrom)
except:
raise InvalidUsage(f"Invalid chromosome {str(chrom)}", status_code=410)
if chrom not in np.arange(1,24):
raise InvalidUsage(f"Invalid chromosome {str(chrom)}", status_code=410)
if pop not in valid_populations:
raise InvalidUsage(f"{str(pop)} is not a recognized population", status_code=410)
plink_filepath = ""
if build.lower() in ["hg19","grch37"]:
if chrom == 23:
plink_filepath = os.path.join(MYDIR, "data", "1000Genomes_GRCh37", pop, "chrX")
else:
plink_filepath = os.path.join(MYDIR, "data", "1000Genomes_GRCh37", pop, f"chr{chrom}")
elif build.lower() in ["hg38","grch38"]:
if chrom == 23:
plink_filepath = os.path.join(MYDIR, "data", "1000Genomes_GRCh38", "chrX")
else:
plink_filepath = os.path.join(MYDIR, "data", "1000Genomes_GRCh38", f"chr{chrom}")
else:
raise InvalidUsage(f'{str(build)} is not a recognized genome build')
return plink_filepath
def plink_ldmat(build, pop, chrom, snp_positions, outfilename):
plink_filepath = resolve_plink_filepath(build, pop, chrom)
# make snps file to extract:
snps = [f"chr{str(int(chrom))}:{str(int(position))}" for position in snp_positions]
writeList(snps, outfilename + "_snps.txt")
#plink_path = subprocess.run(args=["which","plink"], stdout=subprocess.PIPE, universal_newlines=True).stdout.replace('\n','')
if build.lower() in ["hg19","grch37"]:
if os.name == 'nt':
plinkrun = subprocess.run(args=[
"./plink.exe", '--bfile', plink_filepath
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--r2", "square"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
plinkrun = subprocess.run(args=[
"./plink", '--bfile', plink_filepath
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--r2", "square"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build.lower() in ["hg38","grch38"]:
popfile = os.path.join(MYDIR, 'data', '1000Genomes_GRCh38', str(pop)+'.txt')
if os.name == 'nt':
plinkrun = subprocess.run(args=[
"./plink.exe", '--bfile', plink_filepath
, "--keep", popfile # this is the difference in running GRCh38
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--r2", "square"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
plinkrun = subprocess.run(args=[
"./plink", '--bfile', plink_filepath
, "--keep", popfile # this is the difference in running GRCh38
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--r2", "square"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
raise InvalidUsage(f'{str(build)} is not a recognized genome build')
if plinkrun.returncode != 0:
raise InvalidUsage(plinkrun.stdout.decode('utf-8'), status_code=410)
ld_snps = list(pd.read_csv(outfilename + ".bim", sep="\t", header=None).iloc[:,1])
ldmat = np.matrix(pd.read_csv(outfilename + ".ld", sep="\t", header=None))
return ld_snps, ldmat
def plink_ld_pairwise(build, lead_snp_position, pop, chrom, snp_positions, snp_pvalues, outfilename):
# positions must be in hg19 coordinates
# returns NaN for SNPs not in 1KG LD file; preserves order of input snp_positions
plink_filepath = resolve_plink_filepath(build, pop, chrom)
# make snps file to extract:
snps = [f"chr{str(int(chrom))}:{str(int(position))}" for position in snp_positions]
writeList(snps, outfilename + "_snps.txt")
# Ensure lead snp is also present in 1KG; if not, choose next best lead SNP
lead_snp = f"chr{str(int(chrom))}:{str(int(lead_snp_position))}"
the1kg_snps = list(pd.read_csv(plink_filepath + ".bim", sep="\t", header=None).iloc[:,1])
new_lead_snp = lead_snp
new_lead_snp_position = int(lead_snp_position)
while (new_lead_snp not in the1kg_snps) and (len(snp_positions) != 1):
print(new_lead_snp + ' not in 1KG ' + str(len(snp_positions)) + ' SNPs left ')
lead_snp_index = snp_positions.index(new_lead_snp_position)
snp_positions.remove(new_lead_snp_position)
del snp_pvalues[lead_snp_index]
new_lead_snp_position = snp_positions[ snp_pvalues.index(min(snp_pvalues)) ]
new_lead_snp = f"chr{str(int(chrom))}:{str(int(new_lead_snp_position))}"
if len(snp_positions) == 0:
raise InvalidUsage('No alternative lead SNP found in the 1000 Genomes', status_code=410)
lead_snp = new_lead_snp
lead_snp_position = new_lead_snp_position
print('Lead SNP in use: ' + lead_snp)
#plink_path = subprocess.run(args=["which","plink"], stdout=subprocess.PIPE, universal_newlines=True).stdout.replace('\n','')
if build.lower() in ["hg19","grch37"]:
if os.name == 'nt':
plinkrun = subprocess.run(args=[
"./plink.exe", '--bfile', plink_filepath
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--ld-snp", f"chr{str(int(chrom))}:{str(int(lead_snp_position))}"
, "--r2"
, "--ld-window-r2", "0"
, "--ld-window", "999999"
, "--ld-window-kb", "200000"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
plinkrun = subprocess.run(args=[
"./plink", '--bfile', plink_filepath
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--ld-snp", f"chr{str(int(chrom))}:{str(int(lead_snp_position))}"
, "--r2"
, "--ld-window-r2", "0"
, "--ld-window", "999999"
, "--ld-window-kb", "200000"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build.lower() in ["hg38","grch38"]:
popfile = os.path.join(MYDIR, 'data', '1000Genomes_GRCh38', str(pop)+'.txt')
if os.name == 'nt':
plinkrun = subprocess.run(args=[
"./plink.exe", '--bfile', plink_filepath
, "--keep", popfile # this is the difference in running GRCh38
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--ld-snp", f"chr{str(int(chrom))}:{str(int(lead_snp_position))}"
, "--r2"
, "--ld-window-r2", "0"
, "--ld-window", "999999"
, "--ld-window-kb", "200000"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
plinkrun = subprocess.run(args=[
"./plink", '--bfile', plink_filepath
, "--keep", popfile # this is the difference in running GRCh38
, "--chr", str(chrom)
, "--extract", outfilename + "_snps.txt"
, "--from-bp", str(min(snp_positions))
, "--to-bp", str(max(snp_positions))
, "--ld-snp", f"chr{str(int(chrom))}:{str(int(lead_snp_position))}"
, "--r2"
, "--ld-window-r2", "0"
, "--ld-window", "999999"
, "--ld-window-kb", "200000"
, "--make-bed"
, "--threads", "1"
, "--out", outfilename
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
raise InvalidUsage(f'{str(build)} is not a recognized genome build')
if plinkrun.returncode != 0:
raise InvalidUsage(plinkrun.stdout.decode('utf-8'), status_code=410)
ld_results = pd.read_csv(outfilename + ".ld", delim_whitespace=True)
available_r2_positions = ld_results[['BP_B', 'R2']]
pos_df = pd.DataFrame({'pos': snp_positions})
merged_df = pd.merge(pos_df, available_r2_positions, how='left', left_on="pos", right_on="BP_B", sort=False)[['pos', 'R2']]
merged_df.fillna(-1, inplace=True)
return merged_df, new_lead_snp_position
####################################
# Getting GTEx Data from Local MongoDB Database
####################################
# This is the main function to extract the data for a tissue and gene_id:
def get_gtex(version, tissue, gene_id):
if version.upper() == "V8":
db = client.GTEx_V8
collapsed_genes_df = collapsed_genes_df_hg38
elif version.upper() == "V7":
db = client.GTEx_V7
collapsed_genes_df = collapsed_genes_df_hg19
tissue = tissue.replace(' ','_')
#gene_id = gene_id.upper()
ensg_name = ""
if tissue not in db.list_collection_names():
raise InvalidUsage(f'Tissue {tissue} not found', status_code=410)
collection = db[tissue]
if gene_id.startswith('ENSG'):
i = list(collapsed_genes_df['ENSG_name']).index(gene_id)
ensg_name = list(collapsed_genes_df['ENSG_name'])[i]
elif gene_id in list(collapsed_genes_df['name']):
i = list(collapsed_genes_df['name']).index(gene_id)
ensg_name = list(collapsed_genes_df['ENSG_name'])[i]
else:
raise InvalidUsage(f'Gene name {gene_id} not found', status_code=410)
#t1=datetime.now()
results = list(collection.find({'gene_id': ensg_name}))
#t2=datetime.now()
#print(f'Time to retrieve gene collection: {t2-t1}')
#t1=datetime.now()
response = []
try:
response = results[0]['eqtl_variants']
except:
return pd.DataFrame([{'error': f'No eQTL data for {gene_id} in {tissue}'}])
results_df = pd.DataFrame(response)
chrom = int(list(results_df['variant_id'])[0].split('_')[0].replace('X','23'))
positions = [ int(x.split('_')[1]) for x in list(results_df['variant_id']) ]
#t2=datetime.now()
#print(f'Reformatting time: {t2-t1}')
#t1=datetime.now()
variants_query = db.variant_table.find(
{ '$and': [
{ 'chr': chrom },
{ 'variant_pos': { '$gte': min(positions), '$lte': max(positions) } }
]}
)
#t2=datetime.now()
#print(f'Time for variant table query: {t2-t1}')
#t1=datetime.now()
t1=datetime.now()
variants_list = list(variants_query)
t2=datetime.now()
print(f'Time for variant query conversion to list: {t2-t1}')
t1=datetime.now()
variants_df = pd.DataFrame(variants_list)
t2=datetime.now()
print(f'Time to convert the variant list to dataframe: {t2-t1}')
#print(variants_df.shape)
#t2=datetime.now()
#print(f'Reformatting time: {t2-t1}')
#t1=datetime.now()
variants_df = variants_df.drop(['_id'], axis=1)
#t2=datetime.now()
#print(f'_id dropping time: {t2-t1}')
#t1=datetime.now()
x = pd.merge(results_df, variants_df, on='variant_id')
#t2=datetime.now()
#print(f'Merging time: {t2-t1}')
if version.upper() == "V7":
x.rename(columns={'rs_id_dbSNP147_GRCh37p13': 'rs_id'}, inplace=True)
elif version.upper() == "V8":
x.rename(columns={'rs_id_dbSNP151_GRCh38p7': 'rs_id'}, inplace=True)
return x
# Function to merge the GTEx data with a particular snp_list
def get_gtex_data(version, tissue, gene, snp_list, raiseErrors = False):
build = "hg19"
if version.upper() == "V8":
build = "hg38"
gtex_data = []
rsids = True
rsid_snps = [ x for x in snp_list if x.startswith('rs') ]
b37_snps = [ x for x in snp_list if x.endswith('_b37') ]
b38_snps = [ x for x in snp_list if x.endswith('_b38') ]
if len(rsid_snps) > 0 and (len(b37_snps)>0 or len(b38_snps) > 0):
raise InvalidUsage("There is a mix of rsid and other variant id formats; please use a consistent format")
elif len(rsid_snps) > 0:
rsids = True
elif len(b37_snps) or len(b38_snps) > 0:
rsids = False
else:
raise InvalidUsage('Variant naming format not supported; ensure all are rs ID\'s are formatted as chrom_pos_ref_alt_b37 eg. 1_205720483_G_A_b37')
hugo_gene, ensg_gene = genenames(gene, build)
print(f'Gathering eQTL data for {hugo_gene} ({ensg_gene}) in {tissue}')
response_df = pd.DataFrame({})
if version.upper() == "V7":
response_df = get_gtex("V7", tissue, gene)
elif version.upper() == "V8":
response_df = get_gtex("V8", tissue, gene)
if 'error' not in response_df.columns:
eqtl = response_df
if rsids:
snp_df = pd.DataFrame(snp_list, columns=['rs_id'])
#idx = pd.Index(list(snp_df['rs_id']))
idx2 = pd.Index(list(eqtl['rs_id']))
#snp_df = snp_df[~idx.duplicated()]
eqtl = eqtl[~idx2.duplicated()]
# print('snp_df.shape' + str(snp_df.shape))
gtex_data = snp_df.reset_index().merge(eqtl, on='rs_id', how='left', sort=False).sort_values('index')
# print('gtex_data.shape' + str(gtex_data.shape))
# print(gtex_data)
else:
snp_df = pd.DataFrame(snp_list, columns=['variant_id'])
gtex_data = snp_df.reset_index().merge(eqtl, on='variant_id', how='left', sort=False).sort_values('index')
else:
try:
error_message = list(response_df['error'])[0]
gtex_data = pd.DataFrame({})
except:
if raiseErrors:
raise InvalidUsage("No response for tissue " + tissue.replace("_"," ") + " and gene " + hugo_gene + " ( " + ensg_gene + " )", status_code=410)
return gtex_data
# This function simply merges the eqtl_data extracted with the snp_list,
# then returns a list of the eQTL pvalues for snp_list (if available)
def get_gtex_data_pvalues(eqtl_data, snp_list):
rsids = True
if snp_list[0].startswith('rs'):
rsids = True
elif snp_list[0].endswith('_b37'):
rsids = False
elif snp_list[0].endswith('_b38'):
rsids = False
else:
raise InvalidUsage('Variant naming format not supported; ensure all are rs ID\'s or formatted as chrom_pos_ref_alt_b37 eg. 1_205720483_G_A_b37')
if rsids:
gtex_data = pd.merge(eqtl_data, pd.DataFrame(snp_list, columns=['rs_id']), on='rs_id', how='right')
else:
gtex_data = pd.merge(eqtl_data, pd.DataFrame(snp_list, columns=['variant_id']), on='variant_id', how='right')
return list(gtex_data['pval'])
def fix_gwasfile(infile):
outfile = infile.replace('.txt','_mod.txt')
with open(infile) as f:
with open(outfile, 'w') as fout:
filestr = f.readlines()
for line in filestr:
if line[0:2] != "##":
fout.write(line.replace('\t\t\n','\t\n'))
try:
gwas_data = pd.read_csv(outfile, sep="\t", encoding='utf-8')
return gwas_data
except:
raise InvalidUsage('Failed to load primary dataset. Please check formatting is adequate.', status_code=410)
#####################################
# API Routes
#####################################
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(413)
def request_entity_too_large(error):
return 'File Too Large', error
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/populations")
def get1KGPopulations():
populations = pd.read_csv(os.path.join(MYDIR, 'data/populations.tsv'), sep='\t')
return jsonify(populations.to_dict(orient='list'))
@app.route("/genenames/<build>")
def getGeneNames(build):
if build.lower() == "hg38":
collapsed_genes_df = collapsed_genes_df_hg38
elif build.lower() == "hg19":
collapsed_genes_df = collapsed_genes_df_hg19
return jsonify(list(collapsed_genes_df['name']))
@app.route("/genenames/<build>/<chrom>/<startbp>/<endbp>")
def getGenesInRange(build, chrom, startbp, endbp):
collapsed_genes_df = collapsed_genes_df_hg19
if build.lower() == "hg38":
collapsed_genes_df = collapsed_genes_df_hg38
regiontext = str(chrom) + ":" + startbp + "-" + endbp
chrom, startbp, endbp = parseRegionText(regiontext, build)
genes_to_draw = collapsed_genes_df.loc[ (collapsed_genes_df['chrom'] == ('chr' + str(chrom).replace('23','X'))) &
( ((collapsed_genes_df['txStart'] >= startbp) & (collapsed_genes_df['txStart'] <= endbp)) |
((collapsed_genes_df['txEnd'] >= startbp ) & (collapsed_genes_df['txEnd'] <= endbp )) |
((collapsed_genes_df['txStart'] <= startbp) & (collapsed_genes_df['txEnd'] >= endbp )) )]
return jsonify(list(genes_to_draw['name']))
@app.route("/gtex/<version>/tissues_list")
def list_tissues(version):
if version.upper() == "V8":
db = client.GTEx_V8
elif version.upper() == "V7":
db = client.GTEx_V7
tissues = list(db.list_collection_names())
tissues.remove('variant_table')
return jsonify(tissues)
@app.route("/gtex/<version>/<tissue>/<gene_id>")
def get_gtex_route(version, tissue, gene_id):
x = get_gtex(version, tissue, gene_id)
x = x.fillna(-1)
return jsonify(x.to_dict(orient='records'))
@app.route("/gtex/<version>/<tissue>/<gene_id>/<variant>")
def get_gtex_variant(version, tissue, gene_id, variant):
x = get_gtex(version, tissue, gene_id)
response_df = x
result = []
if variant.startswith("rs"):
result = response_df.loc[ response_df['rs_id'] == variant ]
elif variant.endswith("_b37") or variant.endswith("_b38"):
result = response_df.loc[ response_df['variant_id'] == variant ]
else:
raise InvalidUsage(f'variant name {variant} not found', status_code=410)
if result.shape[0] == 0:
raise InvalidUsage(f'variant name {variant} not found', status_code=410)
return jsonify(result.to_dict(orient='records'))
@app.route("/previous_session", methods=['GET', 'POST'])
def prev_session():
if request.method == 'POST':
old_session_id = request.form['session-id']
if old_session_id != '':
my_session_id = old_session_id
sessionfile = f'session_data/form_data-{my_session_id}.json'
genes_sessionfile = f'session_data/genes_data-{my_session_id}.json'
SSPvalues_file = f'session_data/SSPvalues-{my_session_id}.json'
coloc2_file = f'session_data/coloc2result-{my_session_id}.json'
sessionfilepath = os.path.join(MYDIR, 'static', sessionfile)
genes_sessionfilepath = os.path.join(MYDIR, 'static', genes_sessionfile)
SSPvalues_filepath = os.path.join(MYDIR, 'static', SSPvalues_file)
coloc2_filepath = os.path.join(MYDIR, 'static', coloc2_file)
else: # blank input
raise InvalidUsage('Invalid input')
# print(f'Session filepath: {sessionfilepath} is {str(os.path.isfile(sessionfilepath))}')
# print(f'Genes filepath: {genes_sessionfilepath} is {str(os.path.isfile(genes_sessionfilepath))}')
# print(f'SSPvalues filepath: {SSPvalues_filepath} is {str(os.path.isfile(SSPvalues_filepath))}')
if not (os.path.isfile(sessionfilepath) and os.path.isfile(genes_sessionfilepath) and os.path.isfile(SSPvalues_filepath) and os.path.isfile(coloc2_filepath)):
raise InvalidUsage(f'Could not locate session {my_session_id}')
return render_template("plot.html", sessionfile = sessionfile, genesfile = genes_sessionfile, SSPvalues_file = SSPvalues_file, coloc2_file = coloc2_file, sessionid = my_session_id)
return render_template('session_form.html')
@app.route("/session_id/<old_session_id>")
def prev_session_input(old_session_id):
if old_session_id != '':
my_session_id = old_session_id
sessionfile = f'session_data/form_data-{my_session_id}.json'
genes_sessionfile = f'session_data/genes_data-{my_session_id}.json'
SSPvalues_file = f'session_data/SSPvalues-{my_session_id}.json'
coloc2_file = f'session_data/coloc2result-{my_session_id}.json'
sessionfilepath = os.path.join(MYDIR, 'static', sessionfile)
genes_sessionfilepath = os.path.join(MYDIR, 'static', genes_sessionfile)
SSPvalues_filepath = os.path.join(MYDIR, 'static', SSPvalues_file)
coloc2_filepath = os.path.join(MYDIR, 'static', coloc2_file)
else: # blank input
raise InvalidUsage('Invalid input')
# print(f'Session filepath: {sessionfilepath} is {str(os.path.isfile(sessionfilepath))}')
# print(f'Genes filepath: {genes_sessionfilepath} is {str(os.path.isfile(genes_sessionfilepath))}')
# print(f'SSPvalues filepath: {SSPvalues_filepath} is {str(os.path.isfile(SSPvalues_filepath))}')
if not (os.path.isfile(sessionfilepath) and os.path.isfile(genes_sessionfilepath) and os.path.isfile(SSPvalues_filepath) and os.path.isfile(coloc2_filepath)):
raise InvalidUsage(f'Could not locate session {my_session_id}')
return render_template("plot.html", sessionfile = sessionfile, genesfile = genes_sessionfile, SSPvalues_file = SSPvalues_file, coloc2_file = coloc2_file, sessionid = my_session_id)
@app.route("/update/<session_id>/<newgene>")
def update_colocalizing_gene(session_id, newgene):
sessionfile = f'session_data/form_data-{session_id}.json'
sessionfilepath = os.path.join(APP_STATIC, sessionfile)
data = json.load(open(sessionfilepath, 'r'))
gtex_tissues = data['gtex_tissues']
snp_list = data['snps']
gtex_version = data['gtex_version']
if gtex_version.upper() not in available_gtex_versions:
gtex_version = "V7"
# gtex_data = {}
for tissue in tqdm(gtex_tissues):
data[tissue] = pd.DataFrame({})
eqtl_df = get_gtex_data(gtex_version, tissue, newgene, snp_list)
#eqtl_filepath = os.path.join(APP_STATIC, f'session_data/eqtl_df-{tissue}-{newgene}-{session_id}.txt')
# if os.path.isfile(eqtl_filepath):
if len(eqtl_df) > 0:
eqtl_df.fillna(-1, inplace=True)
data[tissue] = eqtl_df.to_dict(orient='records')
# data.update(gtex_data)
# json.dump(data, open(sessionfilepath, 'w'))
return jsonify(data)
@app.route("/regionCheck/<build>/<regiontext>")
def regionCheck(build, regiontext):
message = dict({'response': "OK"})
if build not in ['hg19', 'hg38']:
message['response'] = f'Unrecognized build: {build}'
return jsonify(message)
regiontext = regiontext.strip().replace(' ','').replace(',','').replace('chr','')
if not re.search("^\d+:\d+-\d+$", regiontext.replace('X','23').replace('x','23')):
message['response'] = 'Invalid coordinate format. e.g. 1:205,000,000-206,000,000'
return jsonify(message)
chrom = regiontext.split(':')[0].lower().replace('chr','').upper()
pos = regiontext.split(':')[1]
startbp = pos.split('-')[0].replace(',','')
endbp = pos.split('-')[1].replace(',','')
chromLengths = pd.read_csv(os.path.join(MYDIR, 'data', build + '_chrom_lengths.txt'), sep="\t", encoding='utf-8')
chromLengths.set_index('sequence',inplace=True)
if chrom in ['X','x'] or chrom == '23':
chrom = 23
maxChromLength = chromLengths.loc['chrX', 'length']
try:
startbp = int(startbp)
endbp = int(endbp)
except:
message['response'] = "Invalid coordinate input"
return jsonify(message)
else:
try:
chrom = int(chrom)
if chrom == 23:
maxChromLength = chromLengths.loc['chrX', 'length']
else:
maxChromLength = chromLengths.loc['chr'+str(chrom), 'length']
startbp = int(startbp)
endbp = int(endbp)
except:
message['response'] = "Invalid coordinate input"
return jsonify(message)
if chrom < 1 or chrom > 23:
message['response'] = 'Chromosome input must be between 1 and 23'
elif startbp > endbp:
message['response'] = 'Starting chromosome basepair position is greater than ending basepair position'
elif startbp > maxChromLength or endbp > maxChromLength:
message['response'] = 'Start or end coordinates are out of range'
elif (endbp - startbp) > genomicWindowLimit:
message['response'] = f'Entered region size is larger than {genomicWindowLimit/10**6} Mbp'
return jsonify(message)
else:
return jsonify(message)
return jsonify(message)
@app.route('/', methods=['GET', 'POST'])
def index():
data = {"success": False}
# Initializing timing variables:
t1_total = np.nan
file_size = np.nan
ldmat_file_size = np.nan
upload_time = np.nan
ldmat_upload_time = np.nan
gwas_load_time = np.nan
ld_pairwise_time = np.nan
user_ld_load_time = np.nan
gtex_one_gene_time = np.nan
gene_list_time = np.nan
SS_region_subsetting_time = np.nan
gtex_all_queries_time = np.nan
ldmat_time = np.nan
ldmat_subsetting_time = np.nan
SS_time = np.nan
# coloc2-specific secondary dataset columns:
BETA = 'BETA'
SE = 'SE'
ALT = 'A1'
REF = 'A2'
MAF = 'MAF'
ProbeID = 'ProbeID'
N = 'N'
#######################################################
# Uploading files
#######################################################
if request.method == 'POST':
t1_total = datetime.now()
if request.files.get('files[]'):
t1 = datetime.now () # timer to get total upload time
if 'files[]' in request.files:
filenames = request.files.getlist('files[]')
for file in filenames:
filename = secure_filename(file.filename)
filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
if not os.path.isfile(filepath):
request_entity_too_large(413)
gwas_filepath, ldmat_filepath, html_filepath = classify_files(filenames)
upload_time = datetime.now() - t1
#######################################################
# Checking form input parameters
#######################################################
my_session_id = uuid.uuid4()
coordinate = request.form['coordinate']
gtex_version = "V7"
collapsed_genes_df = collapsed_genes_df_hg19
if coordinate.lower() == "hg38":
gtex_version = "V8"
collapsed_genes_df = collapsed_genes_df_hg38
print(f'Session ID: {my_session_id}')
print(f'Coordinate: {coordinate}')
print(f'GTEx version: {gtex_version}')
print('Loading file')
t1 = datetime.now() # timing started for GWAS loading/subsetting/cleaning
try:
gwas_data = pd.read_csv(gwas_filepath, sep="\t", encoding='utf-8')
except:
print('File not proper. Attempt fixing')
gwas_data = fix_gwasfile(gwas_filepath)
inferVariant = request.form.get('markerCheckbox')
chromcol, poscol, refcol, altcol = ('','','','')
snpcol = ''
columnnames = []
if inferVariant:
print('User would like variant locations inferred')
snpcol = verifycol(formname = request.form['snp-col'], defaultname = default_snpname, filecolnames = gwas_data.columns, error_message_='Variant ID column not found')
columnnames = [ snpcol ]
else:
chromcol = verifycol(formname = request.form['chrom-col'], defaultname = default_chromname, filecolnames = gwas_data.columns, error_message_=f"Chromosome column ({request.form['chrom-col']}) not found")
poscol = verifycol(formname = request.form['pos-col'], defaultname = default_posname, filecolnames = gwas_data.columns, error_message_=f"Basepair position column ({request.form['pos-col']}) not found")
refcol = verifycol(formname = request.form['ref-col'], defaultname = default_refname, filecolnames = gwas_data.columns, error_message_=f"Reference allele column ({request.form['ref-col']}) not found")
altcol = verifycol(formname = request.form['alt-col'], defaultname = default_altname, filecolnames = gwas_data.columns, error_message_=f"Alternate allele column ({request.form['alt-col']}) not found")
snpcol = request.form['snp-col'] # optional input in this case
if snpcol != '':
snpcol = verifycol(formname = snpcol, defaultname = default_snpname, filecolnames = gwas_data.columns, error_message_='Variant ID column not found')
columnnames = [ chromcol, poscol, snpcol, refcol, altcol ]
else:
columnnames = [ chromcol, poscol, refcol, altcol ]
print('No SNP ID column provided')
# Check whether data types are ok:
if not all(isinstance(x, int) for x in Xto23(list(gwas_data[chromcol]))):
raise InvalidUsage(f'Chromosome column ({chromcol}) contains unrecognizable values', status_code=410)
if not all(isinstance(x, int) for x in list(gwas_data[poscol])):
raise InvalidUsage(f'Position column ({poscol}) has non-integer entries', status_code=410)
pcol = verifycol(formname = request.form['pval-col'], defaultname = default_pname, filecolnames = gwas_data.columns, error_message_='P-value column not found')
columnnames.append(pcol)
if not all(isinstance(x, float) for x in list(gwas_data[pcol])):
raise InvalidUsage(f'P-value column ({pcol}) has non-numeric entries', status_code=410)
runcoloc2 = request.form.get('coloc2check')
if runcoloc2:
print('User would like COLOC2 results')
betacol = verifycol(formname = request.form['beta-col'], defaultname = default_betaname, filecolnames = gwas_data.columns, error_message_='Beta column not found')
stderrcol = verifycol(formname = request.form['stderr-col'], defaultname = default_betaname, filecolnames = gwas_data.columns, error_message_='Stderr column not found')
numsamplescol = verifycol(formname = request.form['numsamples-col'], defaultname = default_nname, filecolnames = gwas_data.columns, error_message_='Number of samples column not found')
mafcol = verifycol(formname = request.form['maf-col'], defaultname = default_mafname, filecolnames = gwas_data.columns, error_message_='MAF column not found')
columnnames.extend([ betacol, stderrcol, numsamplescol, mafcol ])
studytype = request.form['studytype']
studytypedf = pd.DataFrame({'type': np.repeat(studytype,gwas_data.shape[0]).tolist()})
if 'type' not in gwas_data.columns:
gwas_data = pd.concat([gwas_data, studytypedf], axis=1)
columnnames.append('type')
if studytype == 'cc':
coloc2gwascolnames.append('Ncases')
numcases = request.form['numcases']
if not str(numcases).isdigit(): raise InvalidUsage('Number of cases entered must be an integer', status_code=410)
numcasesdf = pd.DataFrame({'Ncases': np.repeat(int(numcases), gwas_data.shape[0]).tolist()})
if 'Ncases' not in gwas_data.columns:
gwas_data = pd.concat([gwas_data, numcasesdf], axis=1)
columnnames.append('Ncases')
if not all(isinstance(x, float) for x in list(gwas_data[betacol])):
raise InvalidUsage(f'Beta column ({betacol}) has non-numeric entries')
if not all(isinstance(x, float) for x in list(gwas_data[stderrcol])):
raise InvalidUsage(f'Standard error column ({stderrcol}) has non-numeric entries')
if not all(isinstance(x, int) for x in list(gwas_data[numsamplescol])):
raise InvalidUsage(f'Number of samples column ({numsamplescol}) has non-integer entries')
if not all(isinstance(x, float) for x in list(gwas_data[mafcol])):
raise InvalidUsage(f'MAF column ({mafcol}) has non-numeric entries')
# Further check column names provided:
if len(set(columnnames)) != len(columnnames):
raise InvalidUsage(f'Duplicate column names provided: {columnnames}')
gwas_data = gwas_data[ columnnames ]
if snpcol == '':
gwas_data = addVariantID(gwas_data, chromcol, poscol, refcol, altcol, coordinate)
snpcol = default_snpname
# LD:
pops = request.form['LD-populations']
if len(pops) == 0: pops = 'EUR'
print('Populations:', pops)
# GTEx tissues and genes:
gtex_tissues = request.form.getlist('GTEx-tissues')
print('GTEx tissues:',gtex_tissues)
gtex_genes = request.form.getlist('region-genes')
if len(gtex_tissues) > 0 and len(gtex_genes) == 0:
raise InvalidUsage('Please select one or more genes to complement your GTEx tissue(s) selection', status_code=410)
elif len(gtex_genes) > 0 and len(gtex_tissues) == 0:
raise InvalidUsage('Please select one or more tissues to complement your GTEx gene(s) selection', status_code=410)
# old code remnant:
# if gtex_version=="V7": gene='ENSG00000174502.14'
# if gtex_version=="V8": gene='ENSG00000174502.18'
if len(gtex_genes)>0:
gene = gtex_genes[0]
elif coordinate == 'hg19':
gene = 'ENSG00000174502.14'
elif coordinate == 'hg38':
gene = 'ENSG00000174502.18'
# Set-based P override:
setbasedP = request.form['setbasedP']
if setbasedP=='':
setbasedP = 'default'
else:
try:
setbasedP = float(setbasedP)
if setbasedP < 0 or setbasedP > 1:
raise InvalidUsage('Set-based p-value threshold given is not between 0 and 1')
except:
raise InvalidUsage('Invalid value provided for the set-based p-value threshold. Value must be numeric between 0 and 1.')
# Ensure custom LD matrix and GWAS files are sorted for accurate matching:
if ldmat_filepath != '' and poscol != '' and not isSorted(list(gwas_data[poscol])):
raise InvalidUsage('GWAS data input is not sorted and may not match with the LD matrix', status_code=410)
regionstr = request.form['locus']
if regionstr == "": regionstr = default_region
leadsnpname = request.form['leadsnp']
#######################################################
# Standardizing variant ID's to chrom_pos_ref_alt_build format
#######################################################
if inferVariant:
# standardize variant id's:
variant_list = standardizeSNPs(list(gwas_data[snpcol]), regionstr, coordinate)
if all(x=='.' for x in variant_list):
raise InvalidUsage(f'None of the variants provided could be mapped to {regionstr}!', status_code=410)
# get the chrom, pos, ref, alt info from the standardized variant_list
vardf = decomposeVariant(variant_list)
gwas_data = pd.concat([vardf, gwas_data], axis=1)
chromcol = default_chromname
poscol = default_posname
refcol = default_refname
altcol = default_altname
gwas_data = gwas_data.loc[ [str(x) != '.' for x in list(gwas_data[chromcol])] ].copy()
gwas_data.reset_index(drop=True, inplace=True)
#######################################################
# Subsetting GWAS file
#######################################################
gwas_data, gwas_indices_kept = subsetLocus(coordinate, gwas_data, regionstr, chromcol, poscol, pcol)
lead_snp_position_index = getLeadSNPindex(leadsnpname, gwas_data, snpcol, pcol)
lead_snp_position = gwas_data.iloc[lead_snp_position_index,:][poscol]
positions = list(gwas_data[poscol])
snp_list = list(gwas_data[snpcol])
snp_list = [asnp.split(';')[0] for asnp in snp_list] # cleaning up the SNP names a bit
lead_snp = snp_list[ lead_snp_position_index ]
pvals = list(gwas_data[pcol])
chrom, startbp, endbp = parseRegionText(regionstr, coordinate)
gwas_load_time = datetime.now() - t1
std_snp_list = []
thechr = gwas_data[chromcol].tolist()
thepos = gwas_data[poscol].tolist()
theref = gwas_data[refcol].tolist()
thealt = gwas_data[altcol].tolist()
buildstr = 'b37'
if coordinate == 'hg38':
buildstr = 'b38'
for i in np.arange(gwas_data.shape[0]):
std_snp = str(thechr[i]).replace('23','X') + "_" + str(thepos[i]) + "_" + str(theref[i]) + "_" + str(thealt[i]) + "_" + buildstr
std_snp_list.append(std_snp)
# Check that a good portion of these SNPs can be found
thresh = 0.8
snp_warning = False
numGTExMatches = verifyStdSNPs(std_snp_list, regionstr, coordinate)
if numGTExMatches / len(std_snp_list) < thresh:
snp_warning = True
####################################################################################################
# Get LD:
if ldmat_filepath == '':
t1 = datetime.now() # timing started for pairwise LD
print('Calculating pairwise LD using PLINK')
#ld_df = queryLD(lead_snp, snp_list, pops, ld_type)
ld_df, new_lead_snp_position = plink_ld_pairwise(coordinate, lead_snp_position, pops, chrom, positions, pvals, os.path.join(MYDIR, "static", "session_data", f"ld-{my_session_id}"))
if new_lead_snp_position != lead_snp_position:
lead_snp_position_index = list(gwas_data[poscol]).index(new_lead_snp_position)
lead_snp = snp_list[ lead_snp_position_index ]
lead_snp_position = new_lead_snp_position
r2 = list(ld_df['R2'])
ld_pairwise_time = datetime.now() - t1
else:
print('---------------------------------')
print('Loading user-supplied LD matrix')
print('---------------------------------')
t1 = datetime.now() # timer started for loading user-defined LD matrix
ld_mat = pd.read_csv(ldmat_filepath, sep="\t", encoding='utf-8', header=None)
ld_mat = ld_mat.loc[ gwas_indices_kept, gwas_indices_kept ]
r2 = list(ld_mat.iloc[:, lead_snp_position_index])
ld_mat = np.matrix(ld_mat)
if not ((ld_mat.shape[0] == ld_mat.shape[1]) and (ld_mat.shape[0] == gwas_data.shape[0])):
raise InvalidUsage('GWAS and LD matrix input have different dimensions', status_code=410)
user_ld_load_time = datetime.now() - t1
data = {}
data['snps'] = snp_list
data['inferVariant'] = inferVariant
data['pvalues'] = list(gwas_data[pcol])
data['lead_snp'] = lead_snp
data['ld_values'] = r2
data['positions'] = positions
data['chrom'] = chrom
data['startbp'] = startbp
data['endbp'] = endbp
data['ld_populations'] = pops
data['gtex_tissues'] = gtex_tissues
data['gene'] = genenames(gene, coordinate)[0]
data['gtex_genes'] = [ genenames(agene, coordinate)[0] for agene in gtex_genes ]
data['coordinate'] = coordinate
data['gtex_version'] = gtex_version
data['set_based_p'] = setbasedP
SSlocustext = request.form['SSlocus'] # SSlocus defined below
data['std_snp_list'] = std_snp_list
data['runcoloc2'] = runcoloc2
data['snp_warning'] = snp_warning
data['thresh'] = thresh
data['numGTExMatches'] = numGTExMatches
#######################################################
# Loading any secondary datasets uploaded
#######################################################
t1 = datetime.now()
secondary_datasets = {}
table_titles = []
if html_filepath != '':
print('Loading secondary datasets provided')
with open(html_filepath, encoding='utf-8', errors='replace') as f:
html = f.read()
if (not html.startswith('<h3>')) and (not html.startswith('<html>')) and (not html.startswith('<table>') and (not html.startswith('<!DOCTYPE html>'))):
raise InvalidUsage('Secondary dataset(s) provided are not formatted correctly. Please use the merge_and_convert_to_html.py script for formatting.', status_code=410)
soup = bs(html, 'lxml')
table_titles = soup.find_all('h3')
table_titles = [x.text for x in table_titles]
tables = soup.find_all('table')
hp = htmltableparser.HTMLTableParser()
for i in np.arange(len(tables)):
try:
table = hp.parse_html_table(tables[i])
secondary_datasets[table_titles[i]] = table.fillna(-1).to_dict(orient='records')
except:
secondary_datasets[table_titles[i]] = []
data['secondary_dataset_titles'] = table_titles
if runcoloc2:
data['secondary_dataset_colnames'] = ['CHR', 'POS', 'SNPID', 'PVAL', BETA, SE, 'N', ALT, REF, MAF, ProbeID]
else:
data['secondary_dataset_colnames'] = [CHROM, BP, SNP, P]
data.update(secondary_datasets)
sec_data_load_time = datetime.now() - t1
####################################################################################################
t1 = datetime.now() # set timer for extracting GTEx data for selected gene:
# Get GTEx data for the tissues and SNPs selected:
gtex_data = {}
if len(gtex_tissues)>0:
print('Gathering GTEx data')
for tissue in tqdm(gtex_tissues):
eqtl_df = get_gtex_data(gtex_version, tissue, gene, snp_list, raiseErrors=True) # for the full region (not just the SS region)
if len(eqtl_df) > 0:
eqtl_df.fillna(-1, inplace=True)
gtex_data[tissue] = eqtl_df.to_dict(orient='records')
data.update(gtex_data)
gtex_one_gene_time = datetime.now() - t1
####################################################################################################
# Checking that there is at least one secondary dataset for colocalization
####################################################################################################
if len(gtex_tissues)==0 and html_filepath == '':
raise InvalidUsage('Please provide at least one secondary dataset or select at least one GTEx tissue for colocalization analysis')
####################################################################################################
t1 = datetime.now() # timer for determining the gene list
# Obtain any genes to be plotted in the region:
print('Summarizing genes to be plotted in this region')
genes_to_draw = collapsed_genes_df.loc[ (collapsed_genes_df['chrom'] == ('chr' + str(chrom).replace('23','X'))) &
( ((collapsed_genes_df['txStart'] >= startbp) & (collapsed_genes_df['txStart'] <= endbp)) |
((collapsed_genes_df['txEnd'] >= startbp ) & (collapsed_genes_df['txEnd'] <= endbp )) |
((collapsed_genes_df['txStart'] <= startbp) & (collapsed_genes_df['txEnd'] >= endbp )) )]
genes_data = []
for i in np.arange(genes_to_draw.shape[0]):
genes_data.append({
'name': list(genes_to_draw['name'])[i]
,'txStart': list(genes_to_draw['txStart'])[i]
,'txEnd': list(genes_to_draw['txEnd'])[i]
,'exonStarts': [int(bp) for bp in list(genes_to_draw['exonStarts'])[i].split(',')]
,'exonEnds': [int(bp) for bp in list(genes_to_draw['exonEnds'])[i].split(',')]
})
gene_list_time = datetime.now() - t1
####################################################################################################
# 1. Determine the region to calculate the Simple Sum (SS):
if SSlocustext != '':
SSchrom, SS_start, SS_end = parseRegionText(SSlocustext, coordinate)
else:
#SS_start = list(gwas_data.loc[ gwas_data[pcol] == min(gwas_data[pcol]) ][poscol])[0] - one_sided_SS_window_size
#SS_end = list(gwas_data.loc[ gwas_data[pcol] == min(gwas_data[pcol]) ][poscol])[0] + one_sided_SS_window_size
SS_start = int(lead_snp_position - one_sided_SS_window_size)
SS_end = int(lead_snp_position + one_sided_SS_window_size)
SSlocustext = str(chrom) + ":" + str(SS_start) + "-" + str(SS_end)
data['SS_region'] = [SS_start, SS_end]
# # Getting Simple Sum P-values
# 2. Subset the region (step 1 was determining the region to do the SS calculation on - see above SS_start and SS_end variables):
t1 = datetime.now() # timer for subsetting SS region
print('SS_start: ' + str(SS_start))
print('SS_end:' + str(SS_end))
chromList = [('chr' + str(chrom).replace('23','X')), str(chrom).replace('23','X')]
if 'X' in chromList:
chromList.extend(['chr23','23'])
gwas_chrom_col = pd.Series([str(x) for x in list(gwas_data[chromcol])])
SS_chrom_bool = [str(x).replace('23','X') for x in gwas_chrom_col.isin(chromList) if x == True]
SS_indices = SS_chrom_bool & (gwas_data[poscol] >= SS_start) & (gwas_data[poscol] <= SS_end)
SS_gwas_data = gwas_data.loc[ SS_indices ]
if runcoloc2:
coloc2_gwasdf = SS_gwas_data.rename(columns={
chromcol: 'CHR'
,poscol: 'POS'
,snpcol: 'SNPID'
,pcol: 'PVAL'
,refcol: REF
,altcol: ALT
,betacol: BETA
,stderrcol: SE
,mafcol: MAF
,numsamplescol: 'N'
})
coloc2_gwasdf = coloc2_gwasdf.reindex(columns=coloc2gwascolnames)
# print(coloc2_gwasdf)
#print(gwas_data.shape)
#print(SS_gwas_data.shape)
#print(SS_gwas_data)
if SS_gwas_data.shape[0] == 0: InvalidUsage('No data points found for entered Simple Sum region', status_code=410)
PvaluesMat = [list(SS_gwas_data[pcol])]
SS_snp_list = list(SS_gwas_data[snpcol])
SS_snp_list = cleanSNPs(SS_snp_list, regionstr, coordinate)
# optimizing best match variant if given a mix of rsids and non-rsid variants
# varids = SS_snp_list
# if inferVariant:
# rsidx = [i for i,e in enumerate(SS_snp_list) if e.startswith('rs')]
# varids = standardizeSNPs(SS_snp_list, SSlocustext, coordinate)
# SS_rsids = torsid(SS_std_snp_list, SSlocustext, coordinate)
if SSlocustext == '':
SSlocustext = str(chrom) + ":" + str(SS_start) + "-" + str(SS_end)
#SS_std_snp_list = standardizeSNPs(SS_snp_list, SSlocustext, coordinate)
#SS_rsids = torsid(SS_std_snp_list, SSlocustext, coordinate)
SS_positions = list(SS_gwas_data[poscol])
if len(SS_positions) != len(set(SS_positions)):
dups = set([x for x in SS_positions if SS_positions.count(x) > 1])
raise InvalidUsage('Duplicate chromosome basepair positions detected at: ' + str(dups))
SS_std_snp_list = [e for i,e in enumerate(std_snp_list) if SS_indices[i]]
# Extra file written:
gwas_df = pd.DataFrame({
'Position': SS_positions,
'SNP': SS_snp_list,
'variant_id': SS_std_snp_list,
'P': list(SS_gwas_data[pcol])
})
gwas_df.to_csv(os.path.join(MYDIR, 'static', f'session_data/gwas_df-{my_session_id}.txt'), index=False, encoding='utf-8', sep="\t")
SS_region_subsetting_time = datetime.now() - t1
data['num_SS_snps'] = gwas_df.shape[0]
####################################################################################################
# 3. Determine the genes to query
#query_genes = list(genes_to_draw['name'])
query_genes = gtex_genes
coloc2eqtl_df = pd.DataFrame({})
# 4. Query and extract the eQTL p-values for all tissues & genes from GTEx
t1 = datetime.now() # timer set to check how long data extraction from Mongo takes
if len(gtex_tissues)>0:
print('Obtaining eQTL p-values for selected tissues and surrounding genes')
for tissue in gtex_tissues:
for agene in query_genes:
gtex_eqtl_df = get_gtex_data(gtex_version, tissue, agene, SS_std_snp_list)
# print('len(gtex_eqtl_df) '+ str(len(gtex_eqtl_df)))
# print('gtex_eqtl_df.shape ' + str(gtex_eqtl_df.shape))
# print('len(SS_snp_list) ' + str(len(SS_snp_list)))
#print(SS_std_snp_list)
#print(gtex_eqtl_df.dropna())
if len(gtex_eqtl_df) > 0:
#gtex_eqtl_df.fillna(-1, inplace=True)
pvalues = list(gtex_eqtl_df['pval'])
if runcoloc2:
tempdf = gtex_eqtl_df.rename(columns={
'rs_id': 'SNPID'
,'pval': 'PVAL'
,'beta': BETA
,'se': SE
,'sample_maf': MAF
,'chr': 'CHR'
,'variant_pos': 'POS'
,'ref': REF
,'alt': ALT
})
tempdf.dropna(inplace=True)
if len(tempdf.index) != 0:
numsamples = round(tempdf['ma_count'].tolist()[0] / tempdf[MAF].tolist()[0])
numsampleslist = np.repeat(numsamples, tempdf.shape[0]).tolist()
tempdf = pd.concat([tempdf,pd.Series(numsampleslist,name='N')],axis=1)
probeid = str(tissue) + ':' + str(agene)
probeidlist = np.repeat(probeid, tempdf.shape[0]).tolist()
tempdf = pd.concat([tempdf, pd.Series(probeidlist,name='ProbeID')],axis=1)
tempdf = tempdf.reindex(columns = coloc2eqtlcolnames)
coloc2eqtl_df = pd.concat([coloc2eqtl_df, tempdf], axis=0)
else:
pvalues = np.repeat(np.nan, len(SS_snp_list))
PvaluesMat.append(pvalues)
# print(f'tissue: {tissue}, gene: {gene}, len(pvalues): {len(pvalues)}')
# print(f'len(SS_positions): {len(SS_positions)}, len(SS_snp_list): {len(SS_snp_list)}')
# Extra files written:
# eqtl_df = pd.DataFrame({
# 'Position': SS_positions,
# 'SNP': SS_snp_list,
# 'P': pvalues
# })
# eqtl_df.to_csv(os.path.join(MYDIR, 'static', f'session_data/eqtl_df-{tissue}-{agene}-{my_session_id}.txt'), index=False, encoding='utf-8', sep="\t")
# print(f'Time to extract eQTLs for {tissue} and {agene}:' + str(datetime.now()-t1))
gtex_all_queries_time = datetime.now() - t1
####################################################################################################
# 4.2 Extract user's secondary datasets' p-values
####################################################################################################
if len(secondary_datasets)>0:
if runcoloc2:
print('Saving uploaded secondary datasets for coloc2 run')
for i in np.arange(len(secondary_datasets)):
secondary_dataset = pd.DataFrame(secondary_datasets[list(secondary_datasets.keys())[i]])
if secondary_dataset.shape[0] == 0:
print(f'No data for table {table_titles[i]}')
pvalues = np.repeat(np.nan, len(SS_snp_list))
PvaluesMat.append(pvalues)
continue
if not set(coloc2eqtlcolnames).issubset(secondary_dataset):
raise InvalidUsage(f'You have chosen to run COLOC2. COLOC2 assumes eQTL data as secondary dataset, and you must have all of the following column names: {coloc2eqtlcolnames}')
secondary_dataset['SNPID'] = cleanSNPs(secondary_dataset['SNPID'].tolist(),regionstr,coordinate)
#secondary_dataset.set_index('SNPID', inplace=True)
idx = pd.Index(list(secondary_dataset['SNPID']))
secondary_dataset = secondary_dataset.loc[~idx.duplicated()].reset_index().drop(columns=['index'])
# merge to keep only SNPs already present in the GWAS/primary dataset (SS subset):
secondary_data_std_snplist = standardizeSNPs(secondary_dataset['SNPID'].tolist(), regionstr, coordinate)
secondary_dataset = pd.concat([secondary_dataset, pd.DataFrame(secondary_data_std_snplist, columns=['SNPID.tmp'])], axis=1)
snp_df = pd.DataFrame(SS_std_snp_list, columns=['SNPID.tmp'])
secondary_data = snp_df.reset_index().merge(secondary_dataset, on='SNPID.tmp', how='left', sort=False).sort_values('index')
pvalues = list(secondary_data['PVAL'])
PvaluesMat.append(pvalues)
coloc2eqtl_df = pd.concat([coloc2eqtl_df, secondary_data.reindex(columns = coloc2eqtlcolnames)], axis=0)
else:
print('Obtaining p-values for uploaded secondary dataset(s)')
for i in np.arange(len(secondary_datasets)):
secondary_dataset = pd.DataFrame(secondary_datasets[list(secondary_datasets.keys())[i]])
if secondary_dataset.shape[0] == 0:
print(f'No data for table {table_titles[i]}')
pvalues = np.repeat(np.nan, len(SS_snp_list))
PvaluesMat.append(pvalues)
continue
# remove duplicate SNPs
secondary_dataset[SNP] = cleanSNPs(secondary_dataset[SNP].tolist(),regionstr,coordinate)
idx = pd.Index(list(secondary_dataset[SNP]))
secondary_dataset = secondary_dataset.loc[~idx.duplicated()].reset_index().drop(columns=['index'])
# merge to keep only SNPs already present in the GWAS/primary dataset (SS subset):
secondary_data_std_snplist = standardizeSNPs(secondary_dataset[SNP].tolist(), regionstr, coordinate)
std_snplist_df = pd.DataFrame(secondary_data_std_snplist, columns=[SNP+'.tmp'])
secondary_dataset = pd.concat([secondary_dataset,std_snplist_df], axis=1)
snp_df = | pd.DataFrame(SS_std_snp_list, columns=[SNP+'.tmp']) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_tspecscores.ipynb (unless otherwise specified).
__all__ = ['log_it', 'tsi', 'spm', 'zscore', 'tau', 'ts_func', 'calc_ts']
# Cell
import pandas as pd
import numpy as np
# Cell
def log_it(data: pd.DataFrame) -> pd.DataFrame:
df = data.copy()
return np.log(1 + df)
# Cell
def tsi(data: pd.DataFrame) -> pd.DataFrame:
df = data.copy()
foo = np.sum(df.values, axis=1)[:, None]
return pd.DataFrame(
np.divide(df.values, foo, out=df.values, where=foo != 0), index=df.index, columns=df.columns
)
# Cell
def spm(data: pd.DataFrame) -> pd.DataFrame:
df = data.copy()
foo = np.linalg.norm(df.values, axis=1)[:, None] * df.values
return pd.DataFrame(
np.divide(df.values ** 2, foo, out=df.values, where=foo != 0),
index=df.index,
columns=df.columns,
)
# Cell
def zscore(data: pd.DataFrame, transform=True) -> pd.DataFrame:
df = data.copy()
std = np.std(df.values, axis=1, ddof=1)[:, None]
zs = np.divide(df.values - df.values.mean(axis=1)[:, None], std, where=std != 0)
zs[np.where(std == 0)[0], :] = 0
if transform:
max_zs = (df.values.shape[1] - 1) / np.sqrt(df.values.shape[1])
zs = np.divide((zs + max_zs), (2 * max_zs), where=zs != 0)
return | pd.DataFrame(zs, index=df.index, columns=df.columns) | pandas.DataFrame |
import math
import pandas as pd
from model.Enumeration import Level
class BondsDao(object):
def __init__(self):
pass
def my_filter(self, df):
# c_col = df.loc[:, 'G']
std1, mean1 = df.describe().loc[['std', 'mean'], '估价收益久期']
std2, mean2 = df.describe().loc[['std', 'mean'], '估价收益率']
result = df[(df['估价收益久期'] < mean1 + 3 * std1) & (df.估价收益久期 > mean1 - 3 * std1)
& (df['估价收益率'] < mean2 + 3 * std2) & (df.估价收益率 > mean2 - 3 * std2)]
return result
def get_credit_debt(self):
# 读取
df = pd.read_csv('data/bond_real_time_data.csv').loc[0: 3560, :]
df.rename(columns={df.columns[0]: '证券代码'}, inplace=True)
df_aid = pd.read_csv('data/credit_bonds_quarter_data.csv')[
['证券代码', '经营活动产生的现金流量净额/流动负债\n[报告期] 2018中报', '资产负债率\n[报告期] 2018中报\n[单位] %',
'估价收益率(上清所)\n[日期] 2018-09-14',
'估价修正久期(上清所)\n[日期] 2018-09-14']]
df_aid.columns = ['证券代码', '现金流量比', '资产负债率', '估价收益率', '估价收益久期']
# 换列
p = pd.merge(df_aid, df[['证券代码', 'AMOUNT', 'MATURITYDATE', 'COUPONRATE', 'DIRTYPRICE']], on='证券代码').dropna(
how='any')
p = p[['证券代码', 'AMOUNT', 'MATURITYDATE', 'COUPONRATE', 'DIRTYPRICE', '资产负债率', '现金流量比', '估价收益率', '估价收益久期']]
result = self.my_filter(p).values
for rows in result:
rows[2] = rows[2][: -5]
return result
def get_interest_bonds(self):
# 读取
df = pd.read_csv('data/bond_real_time_data.csv').loc[3561:, :]
df.rename(columns={df.columns[0]: '证券代码'}, inplace=True)
df_aid = pd.read_csv('data/interest_bonds_quarter_data.csv')[
['证券代码', '估价收益率(上清所)\n[日期] 2018-09-14',
'估价修正久期(上清所)\n[日期] 2018-09-14']]
df_aid.columns = ['证券代码', '估价收益率', '估价收益久期']
# 换列
p = pd.merge(df_aid, df[['证券代码', 'MATURITYDATE', 'COUPONRATE', 'DIRTYPRICE']], on='证券代码').dropna(
how='any')
p = p[['证券代码', 'MATURITYDATE', 'COUPONRATE', 'DIRTYPRICE', '估价收益率', '估价收益久期']]
result = self.my_filter(p).values
for rows in result:
rows[1] = rows[1][: -5]
return result
def get_price_by_code(self, code):
"""
根据债券代码找到最新收盘价
返回 收盘价
"""
code = int(code.split('.')[0])
df = pd.read_csv('data/bond_real_time_data.csv')
result = df.loc[df['TRADE_CODE'] == int(code)]
if len(result) > 0:
if int(result.index[0]) > 3560:
return result['CLOSE'].values[0]
else:
return result['CLOSE'].values[0]
return 1.0
# return 1.0
'''
根据代码得到名字
'''
def get_name_by_code(self, code):
df = pd.read_csv('data/bond_name.csv')
for i in range(df.__len__()):
if df.values[i][1] == code:
return df.values[i][2]
return 'no name'
def get_start_date(self, invreq_):
# todo
pass
def get_class(self, code):
"""
根据债券代码找到类别
"""
code = int(code.split('.')[0])
df = pd.read_csv('data/bond_real_time_data.csv')
result = df.loc[df['TRADE_CODE'] == int(code)]
if len(result) > 0:
if int(result.index[0]) > 3560:
return '国债'
else:
return '信用债'
return '国债'
def get_interest_quarter_data(self, mode: Level, code):
df = | pd.read_csv('data/interest_bonds_quarter_data2.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas.compat import IS64
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single(ufunc):
a = pd.array([1, 2, -3, np.nan], dtype="Float64")
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64")
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = pd.Series(ufunc(s.astype(float)), dtype="Float64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_float(ufunc):
# two FloatingArrays
a = pd.array([1, 0.2, -3, np.nan], dtype="Float64")
result = ufunc(a, a)
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with scalar
result = ufunc(a, 1)
expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
arr = pd.array(values, dtype="Float64")
res = np.add.reduce(arr)
expected = arr.sum(skipna=False)
tm.assert_almost_equal(res, expected)
@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system")
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = | pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64") | pandas.Series |
import concurrent.futures
import logging
import os
import pathlib
from pathlib import Path
from typing import Dict, List
import numpy as np
import pandas as pd
import pytz
import xarray as xr
from src.constants import ROOT_DIR
from src.data.utils import Location
from src.logger import get_logger
logger = get_logger("Data Transformer")
class DataTransformer:
def __init__(
self,
variable: str,
locations_csv_path: Path = ROOT_DIR / "data/external/stations.csv",
output_dir: Path = ROOT_DIR / "data/processed/",
observations_dir: Path = ROOT_DIR / "data/interim/observations/",
forecast_dir: Path = ROOT_DIR / "data/interim/forecasts/",
time_range: Dict[str, str] = None,
):
self.variable = variable
self.locations = pd.read_csv(locations_csv_path)
self.output_dir = output_dir
self.observations_dir = observations_dir
self.forecast_dir = forecast_dir
if time_range is None:
time_range = dict(start="2019-06-01", end="2021-03-31")
self.time_range = time_range
def run(self) -> List[Path]:
data_for_locations_paths = []
locations = [
Location(
location[1]["id"],
location[1]["city"],
location[1]["country"],
location[1]["latitude"],
location[1]["longitude"],
location[1]["timezone"],
location[1]["elevation"],
)
for location in self.locations.iterrows()
]
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_entry = {
executor.submit(self._data_transform, location): location
for location in locations
}
for future in concurrent.futures.as_completed(future_to_entry):
result = future.result()
if type(result) is pathlib.PosixPath:
logger.info(f"Intermediary data saved to: {result}")
data_for_locations_paths.append(result)
else:
logger.error(result)
return data_for_locations_paths
def _data_transform(self, loc) -> Path or Exception:
try:
logger.info(f"Extracting data for location: {str(loc)}")
inter_loc_path = self.get_output_path(loc)
if inter_loc_path.exists():
logger.info(f"Station at {loc.city} is already computed.")
return inter_loc_path
data_for_location = LocationTransformer(
self.variable,
loc,
observations_dir=self.observations_dir,
forecast_dir=self.forecast_dir,
time_range=self.time_range,
).run()
data_for_location.to_csv(str(inter_loc_path))
return inter_loc_path
except Exception as ex:
return ex
def get_output_path(self, loc: Location) -> Path:
ext = ".csv"
intermediary_path = Path(
self.output_dir,
self.variable,
f"data_{self.variable}_{loc.location_id}{ext}",
)
if not intermediary_path.parent.exists():
os.makedirs(intermediary_path.parent, exist_ok=True)
return intermediary_path
class LocationTransformer:
def __init__(
self,
variable: str,
location: Location,
observations_dir: Path = ROOT_DIR / "data/interim/observations/",
forecast_dir: Path = ROOT_DIR / "data/interim/forecasts/",
time_range: Dict[str, str] = None,
):
self.variable = variable
if time_range is None:
time_range = dict(start="2019-06-01", end="2021-03-31")
self.time_range = time_range
self.location = location
self.observations_path = location.get_observations_path(
observations_dir,
self.variable,
"_".join(self.time_range.values()).replace("-", ""),
)
self.forecast_path = location.get_forecast_path(
forecast_dir, "_".join(self.time_range.values()).replace("-", "")
)
def run(self) -> pd.DataFrame:
"""
Main workflow for the LocationTransformer class
Returns:
pd.DataFrame: observations and forecasts merged
"""
# Open forecast and observational data
try:
observed_data = self.opening_and_transforming_observations()
except Exception as ex:
raise Exception(
"There is not data for this variable at the" " location of interest"
)
forecast_data = self.opening_and_transforming_forecast()
# Merge both xarray datasets
merged = xr.merge([forecast_data, observed_data])
merged_pd = merged.to_dataframe()
# Adding local_time as a coordinate
merged_pd = self.adding_local_time_hour(merged_pd)
# There are some stations which has 0s, which seems to be NaN, drop
# them
varname = f"{self.variable}_observed"
merged_pd[varname] = merged_pd[varname].where(merged_pd[varname] >= 0)
# There are sometimes where the observation is NaN, we drop these values
merged_pd = merged_pd.dropna()
# Calculation of the bias
merged_pd[f"{self.variable}_bias"] = (
merged_pd[f"{self.variable}_forecast"]
- merged_pd[f"{self.variable}_observed"]
)
merged_pd.reset_index(inplace=True)
return merged_pd
def opening_and_transforming_forecast(self) -> xr.Dataset:
"""
Open the forecasts given the path specified in the object
declaration. It also transforms the units of the air quality variables
and disaggregate some variables temporally.
Returns:
xr.Dataset: the forecast dataset
"""
# Open the data
forecast_data = xr.open_dataset(self.forecast_path)
# Rename some of the variables
forecast_data = forecast_data.rename({"pm2p5": "pm25", "go3": "o3"})
# Interpolate time axis to 1h data
logger.info("Interpolating time data to hourly resolution.")
hourly_times = pd.date_range(
forecast_data.time.values[0], forecast_data.time.values[-1], freq="1H"
)
forecast_data = forecast_data.interp(time=hourly_times, method="linear")
# Transform units of concentration variables
for variable in ["pm25", "o3", "no2", "so2", "pm10"]:
logger.info(f"Transforming data for variable {variable}.")
# The air density depends on temperature and pressure, but an
# standard is known when 15K and 1 atmosphere of pressure
surface_pressure = self.calculate_surface_pressure_by_msl(
forecast_data["t2m"], forecast_data["msl"]
)
air_density = self.calculate_air_density(
surface_pressure, forecast_data["t2m"]
)
# Now, we use the air density to transform to Micrograms / m³
forecast_data[variable] *= air_density.values
forecast_data[variable] *= 10 ** 9
# Some forecast variables are aggregated daily, so a temporal
# disaggregation is needed
logger.info(f"Dissaggregate forecast variables.")
forecast_data = self.forecast_accumulated_variables_disaggregation(
forecast_data
)
# Rename all the variables to "{variable}_forecast" in order to
# distinguish them when merged
for data_var in list(forecast_data.data_vars.keys()):
forecast_data = forecast_data.rename({data_var: f"{data_var}_forecast"})
forecast_data = forecast_data.drop(["latitude", "longitude", "station_id"])
return forecast_data
def opening_and_transforming_observations(self) -> xr.Dataset:
"""
Open the observations given the path specified in the object
declaration. It also transforms the units of the air quality variables
and filter the outliers.
Returns:
xr.Dataset: the observations dataset
"""
# Open the data
observations_data = xr.open_dataset(self.observations_path)
# Resample the values in order to have the same time frequency as
# CAMS model forecast
observations_data = observations_data.resample({"time": "1H"}).mean()
# If there are more than one station associated with the location of
# interest an average is performed taking into consideration the
# distance to the location of interest
observations_data = self.weight_average_with_distance(observations_data)
# Rename all the variables to "{variable}_observed" in order to
# distinguish them when merged
for data_var in list(observations_data.data_vars.keys()):
observations_data = observations_data.rename(
{data_var: f"{data_var}_observed"}
)
# Filter outliers
observations_data = self.filter_observations_data(observations_data)
# Resample time axis to 1H time frequency
observations_data = observations_data.resample({"time": "1H"}).asfreq()
# Rolling through the data to interpolate NaNs
for data_var in list(observations_data.data_vars.keys()):
observations_data[data_var] = observations_data[data_var].interpolate_na(
dim="time",
method="linear",
fill_value="extrapolate",
use_coordinate=True,
max_gap= | pd.Timedelta(value=12, unit="h") | pandas.Timedelta |
# env: py3
# Author: <NAME>
import pandas as pd
import datetime
import urllib
from urllib.request import urlopen
def AirNow():
baseURL = "http://www.airnowapi.org/aq/forecast/"
api_key = '###YOUR_API_KEY###'
#date = '2018-08-04'
# get the current date as input
now = datetime.datetime.now()
date = str(now)
miles = 25
dfs = list()
text_file = open("INPUT.txt", "r")
latlong = text_file.read().split(' ')
text_file.close()
lat = latlong[0::2]
lon = latlong[1::2]
for lats, lons in zip(lat, lon):
latlonURL = baseURL + "latLong/?" + urllib.parse.urlencode({
'format': 'application/json',
'latitude': lats,
'longitude': lons,
'date': date[:10],
'distance': miles,
'API_KEY': api_key
})
response = urlopen(latlonURL).read().decode('utf-8')
df = pd.read_json(response)
#df = df.assign(Zipcode=zipcode)
dfs.append(df)
results = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import time
import urllib.request, json
from bs4 import BeautifulSoup
import nltk
from nltk.corpus import stopwords
import datetime
import calendar
import csv
import pandas as pd
import os
path = os.environ["heuristik_data_path"]
path = os.path.abspath(path) + '/'
nltk.download("punkt",path)
nltk.download('stopwords',path)
symbols_nyse = pd.read_csv(path+'nyse-listed_csv.csv')['ACT Symbol'].tolist()
symbols_nasdaq = pd.read_csv(path+'nasdaq-listed-symbols_csv.csv')['Symbol'].tolist()
def name_extraction(symbol):
"""
Resolve name from symbol.
"""
df_nyse = pd.read_csv(path+'nyse-listed_csv.csv')
df_nyse = df_nyse.rename(columns={"ACT Symbol": "Symbol"})
df_nasdaq = pd.read_csv(path+'nasdaq-listed-symbols_csv.csv')
dfs = df_nyse.append(df_nasdaq)
results = dfs[dfs['Symbol'].str.match(symbol)]
if len(results) == 0:
print('No relevant symbol found.')
symbol = ''
elif len(results) >= 1:
symbol = list(results['Company Name'])[0].partition(' ')[0]
return symbol
def clean_raw_text(raw,
max_length = 200,
remove_tags = True,
remove_punctuation = True,
remove_stopwords = True,
remove_numbers = True,
remove_name = ''):
"""
Clean text by removing html, tags, punctuation, company name, stopwords...
"""
clean = BeautifulSoup(raw, 'lxml') # remove script
if remove_tags:
clean = clean.text.lower() # remove tags, lower case
if remove_punctuation:
tokenizer = nltk.RegexpTokenizer(r"\w+") # remove punctuation
clean = tokenizer.tokenize(clean) #tokenize
for i, word in enumerate(clean):
if word == remove_name.lower():
clean[i] = 'company' #remove clear name
if remove_numbers and any(character.isdigit() for character in word):
clean[i] = ''
if remove_stopwords and (word in stopwords.words('english')):
clean[i] = ''
if len(clean)>max_length: #limit length
clean = " ".join(clean[0:max_length])
else:
clean = " ".join(clean)
clean = " ".join(clean.split())
return clean
def apply_sentiment_to_text(text, price_data):
"""
Labels text items according to price sentiment.
"""
text['sentiment'] = ''
for text_time in list(text.index):
nearest_index = price_data.index.get_loc(text_time, method='ffill')
text['sentiment'][text_time] = price_data['sentiment'][nearest_index]
return text
def get_sentiment(df,start_time,timedelta,barriers):
"""
Extracting stock sentiment from stock price data.
"""
end_time = start_time + timedelta
if end_time > df.index[-1]:
end_time = df.index[-1]
if start_time == end_time:
sentiment = 0
else:
nearest_start_index = df.index.get_loc(start_time, method='bfill')
nearest_end_index = df.index.get_loc(end_time, method='bfill')
interval_data = df._slice(slice(nearest_start_index, nearest_end_index))
start_price = interval_data['price'][0]
end_price = interval_data['price'][-1]
horizontal_barriers = start_price * pd.Series([1+barriers,1-barriers])
upper = (interval_data['price']>horizontal_barriers[0])
lower = (interval_data['price']<horizontal_barriers[1])
upper_any = upper.any()
lower_any = lower.any()
if lower_any:
if upper.any():
upper_first_index = interval_data[upper].index[0]
lower_first_index = interval_data[lower].index[0]
if upper_first_index > lower_first_index:
sentiment = -1
else:
sentiment = 1
else:
sentiment = -1
else:
if upper.any():
sentiment = 1
else:
sentiment = 0
return sentiment
def apply_sentiment(df,timedelta = pd.Timedelta('7 days'),barriers = 0.05):
"""
Adding sentiment to a stock price dataframe
"""
print('Extract price sentiment. Timeframe: '+ str(timedelta)+'. Barriers : '+ str(100*barriers)+'%.')
return df.index.map(lambda s: get_sentiment(df,s,timedelta,barriers))
def download_price(symbol, # name of the equity
function = 'SMA', # Values: 'SMA', TIME_SERIES_INTRADAY' , TIME_SERIES_DAILY
outputsize = 'compact', # Values: compact, full
apikey = os.environ["heuristik_alphavantage"], # Docs https://www.alphavantage.co/documentation/
timedelta = pd.Timedelta('7 days'), # Time window for tripple barrier method
barriers = 0.05, # Vertical window for tripple barrier method
force_download = False # False means use cached files
):
"""
Downloading stock prices from AlphaVantage. Various options for different time-resolutions.
"""
print('Getting prices for '+symbol+'.')
query = ''.join(('https://www.alphavantage.co/query?&',
'datatype=','csv','&',
'function=',function,'&',
'outputsize=',outputsize,'&',
'symbol=',symbol,'&',
'apikey=',apikey,
''
))
str_timedelta = str(timedelta).replace(' ','_').replace(':','_')
save_file = path+'price_data/'+function +'_'+ str_timedelta+'_'+str(barriers) + '_' +symbol + '.csv'
if os.path.exists(save_file) and not force_download:
print('Loading prices from file.')
data = (pd.read_csv(save_file))
data['time'] = data['time'].map(lambda s: pd.to_datetime(s))
data = data.set_index('time')
else:
print('Downloading prices from AlphaVantage.')
if function == 'TIME_SERIES_INTRADAY':
query += ('&interval=' + '5min')
df = pd.read_csv(query)
if 'high' in df:
df = df.drop(columns = ['high','low','close','volume'])
df['timestamp'] = df['timestamp'].map(lambda s: pd.to_datetime(s).tz_localize('US/Eastern'))
df = df.rename(columns={'open':'price','timestamp': 'time'})
df = df.set_index('time')
else:
print('Error: Did not retrieve data.')
elif function == 'SMA':
query += ('&interval=' + '60min')
query += ('&time_period='+'5')
query += ('&series_type='+'open')
df = pd.read_csv(query)
if 'time' in df:
df['time'] = df['time'].map(lambda s: pd.to_datetime(s).tz_localize('US/Eastern'))
df = df.rename(columns={'SMA':'price'})
df = df.set_index('time')
else:
print('Error: Did not retrieve data.')
elif function == 'TIME_SERIES_DAILY':
df = pd.read_csv(query)
if 'timestamp' in df:
df = df.drop(columns = ['high','low','close','volume'])
df['timestamp'] = df['timestamp'].map(lambda s: pd.to_datetime(s).tz_localize('US/Eastern'))
df = df.rename(columns={'open':'price','timestamp': 'time'})
df = df.set_index('time')
else:
print('Error: Did not retrieve data.')
df = df.iloc[::-1]
data = df
data['sentiment'] = apply_sentiment(data,timedelta = timedelta,barriers =barriers)
data.to_csv(save_file)
return save_file, data
def retrieve_symbols(symbol):
"""
Resolving company symbol from AlphaVantage keyword search
"""
query = 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + symbol + '&apikey=' + os.environ["heuristik_alphavantage"]
with urllib.request.urlopen(query) as url:
data = json.loads(url.read().decode())
if not 'bestMatches' in data.keys():
output = []
else:
output = data['bestMatches']
return output
def download_stocknews_page(name = 'TSLA', page = 1,api_key = os.environ["heuristik_stocknews"],
page_size = 50,print_query = False):
"""
Download one page from StockNews API.
"""
if name == 'all':
query_dict = {'section': 'alltickers',
'items':str(page_size),
'token':api_key,
'page': str(page)}
query = 'https://stocknewsapi.com/api/v1/category?'
for key in list(query_dict.keys()):
query = query + key + '=' + query_dict[key]+'&'
query = query[0:-1]
else:
query_dict = {'tickers': name,
'items':str(page_size),
'token':api_key,
'page': str(page)}
#Assemble query:
query = 'https://stocknewsapi.com/api/v1?'
for key in list(query_dict.keys()):
query = query + key + '=' + query_dict[key]+'&'
query = query[0:-1]
with urllib.request.urlopen(query) as url:
data = json.loads(url.read().decode())
if data['data'] ==[]:
pages = 0
else:
pages = data['total_pages']
if print_query:
print(query)
return pages, data['data']
def download_stocknews(name = 'TSLA', pages = 'all', api_key = os.environ["heuristik_stocknews"],
download_path = '', save = True,print_query = False):
"""
Download and save multiple pages from StockNews API.
"""
assert pages == 'all' or (isinstance(pages,int) and pages>0), "Option pages should be 'all' or positive integer."
start_time = time.time()
number_of_pages, page_0 = download_stocknews_page(name = name, page = 1,api_key = api_key, print_query = print_query)
if number_of_pages == 0:
return []
all_pages = convert_stocknews_data(page_0,name = name)
if pages == 'all':
pages = number_of_pages
if number_of_pages < pages:
pages = number_of_pages
print('Downloading '+ str(pages) +' pages from StockNews. This may take a minute...')
for i in range(2,pages):
if time.time() < start_time + 1/12:
time.sleep(1/12)
_, page_i = download_stocknews_page(name = name, page = i,api_key = api_key)
current_page = convert_stocknews_data(page_i,name = name)
all_pages = all_pages.append(current_page)
start_time = time.time()
if save:
all_pages.to_csv(download_path)
return all_pages
def convert_stocknews_data(data,name = ''):
"""
Cleans text contained in a dataframe.
"""
df = pd.DataFrame({'time' : [],'symbol':[],'text' : [],'raw_text' : [],'url': [],'src_sentiment' : []})
if name != 'all':
clear_name = name_extraction(name)
length_of_page = len(data)
for i in range(0,length_of_page):
if data[i]['text'] == None:
text_body = data[i]['title']
elif data[i]['title'] == None:
text_body = data[i]['text']
else:
text_body = data[i]['title']+' '+ data[i]['text']
if name != 'all':
text_body = clean_raw_text(text_body,remove_name = clear_name)
df = df.append({'time' : pd.to_datetime(data[i]['date']).tz_convert('US/Eastern'),
'symbol': ', '.join(data[i]['tickers']),
'source_name': data[i]['source_name'],
'text':text_body,
'raw_title':data[i]['title'],
'raw_text': data[i]['text'],
'url': data[i]['news_url'],
'src_sentiment':data[i]['sentiment']}, ignore_index=True)
df = df.set_index('time')
return df
def process_company(asset_name,pages = 200, force_download = False, text_src = 'stocknews'
,timewindow = '3 days', barriers = 0.05, data_version = '4'):
"""
To process a company: downloads stock prices and news, and saves labeled dataframe to disk.
"""
success = False
timedelta = pd.Timedelta(timewindow)
_, price_data = download_price(symbol = asset_name, function = 'TIME_SERIES_DAILY',
outputsize ='full',barriers = barriers,timedelta = timedelta,
force_download = force_download )
text_name = name_extraction(asset_name)
if text_src == 'stocknews':
symbol = asset_name
text_path = path+'text_data/' + 'stocknews_text_v'+data_version +'_'+ asset_name +'.csv'
if os.path.exists(text_path) and force_download == False:
print('Loading text from file: '+text_path)
text = pd.read_csv(text_path)
text['time'] = text['time'].map(lambda s: pd.to_datetime(s).tz_convert('US/Eastern'))
text = text.set_index('time')
text = text.iloc[::-1]
else:
text = download_stocknews(name = symbol, pages = pages, download_path = text_path )
if len(text) == 0:
print('No news found.')
return
else:
text.to_csv(text_path)
else:
print('Error. Source not found.')
if len(text) != 0:
text = apply_sentiment_to_text(text, price_data)
print('Save data.')
str_timedelta = str(timedelta).replace(' ','_').replace(':','_')
text.to_csv(path+'text_data/' +'stocknews_labeled_text_v'+data_version+'_' + asset_name +'_'+str_timedelta+'_'+str(barriers)+'.csv')
success = True
else:
success = False
return success
class data:
def __init__(self,
timeframe = '3 days',
data_version = '4',
barriers = '5%',
binary_sentiment = True
):
"""
Data manager class to load data from files or initiate download.
Example use:
data = heuristik.data(
timeframe = args.timeframe,
data_version = args.data_version,
barriers = args.barriers,
binary_sentiment = args.binary_sentiment)
df = data.retrieve(symbols = ['TWTR','AMD'],download=True)
"""
self.path = path
self.timeframe = timeframe
self.data_version = data_version
self.barriers = barriers
self.data_version = data_version
self.binary_sentiment = binary_sentiment
def percent_to_float(self,s):
assert isinstance(s,str) or isinstance(s,float) or isinstance(s,int ), 'Please provide str or float as input for barrier.'
assert not s.startswith("-"), 'Provide positive barrier percentage.'
if isinstance(s,float)or isinstance(s,int ):
barrier = float(s/ 100)
else:
s = str(float(s.rstrip("%")))
i = s.find(".")
if i == -1:
barrier = int(s) / 100
s = s.replace(".", "")
i -= 2
if i < 0:
barrier = float("." + "0" * abs(i) + s)
else:
barrier = float(s[:i] + "." + s[i:])
return barrier
def to_numerical_sentiment(self,rating):
if isinstance(rating,str):
if rating == 'Negative':
if self.binary_sentiment:
return 1
else:
print('nooo')
return -1
elif rating == 'Positive':
return 1
else:
return 0
if (rating == rating) : #check for 'NaN'
rating = int(rating)
if rating == -1:
if self.binary_sentiment:
return 1
else:
return -1
elif rating == 0:
return 0
else:
return 1
def human_format(self,num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%.1f%s' % (num, ['', 'k', 'M', 'G', 'T', 'P'][magnitude])
def retrieve(self,
symbols,
download = False):
path = self.path
timeframe = self.timeframe
data_version = self.data_version
barriers = self.barriers
data_version = self.data_version
pd_timeframe = pd.Timedelta(timeframe)
greater_than_zero = pd_timeframe.round('1s')>pd.Timedelta('0')
assert greater_than_zero, 'Please provide valid timeframe > 1 second.'
if not path[-1]=='/':
path = path+'/'
if isinstance(symbols,str):
symbols = symbols.replace(' ','').split(',')
str_timedelta = str(pd_timeframe).replace(' ','_').replace(':','_')
str_barriers = str(self.percent_to_float(barriers))
file_name = 'stocknews_labeled_text_v'+data_version+'_'
data_paths = []
if path[0]== '~':
print('Warning: expanding ~ to home directory.')
path = os.path.expanduser(path)
print('Path: '+ path)
for symbol in symbols:
data_paths.append(path+'text_data/'+file_name+symbol+'_'+str_timedelta+'_'+str_barriers+'.csv')
data_available = []
for i, data_path in enumerate(data_paths):
if not os.path.exists(data_path):
if download == True:
print('Downloading data.')
success = process_company(symbols[i],pages = 200, force_download = False, text_src = 'stocknews',timewindow = self.timeframe, barriers = float(str_barriers),data_version = self.data_version)
print('Data downloaded for ', symbols[i],'.')
data_available.append(success)
else:
print('Data unavailable for symbol '+symbols[i]+'. Skipping.')
data_available.append(False)
else:
data_available.append(True)
if not any(data_available):
return ''
if data_available[0]:
df = | pd.read_csv(data_paths[0]) | pandas.read_csv |
from trading.indicators.indicators import (
bollinger,
directional_movement,
macd,
mma,
mme,
parabolic_sar,
rsi,
stochastic
)
import json
import pandas as pd
import math
import pytest
import random
@pytest.mark.parametrize("nb, values, mma_column", [
(1, [0.2], [0.2]),
(1, [0.2, 0.3, 0.4], [0.2, 0.3, 0.4]),
(2, [0.2, 0.3, 0.4], [None, 0.25, 0.35]),
(3, [0.2, 0.3, 0.4], [None, None, 0.3]),
(4, [0.2, 0.3, 0.4], [None, None, None]),
(20, [
90.70, 92.90, 92.98, 91.80, 92.66, 92.68, 92.30, 92.77, 92.54, 92.95, 93.20,
91.07, 89.83, 89.74, 90.40, 90.74, 88.02, 88.09, 88.84, 90.78, 90.54, 91.39, 90.65
], [
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, 91.25, 91.24, 91.17, 91.05
])
])
def test_that_mma_is_computed(nb, values, mma_column):
df = pd.DataFrame(values, columns=["high"])
df = mma(df, nb)
for i in range(len(mma_column)):
if not mma_column[i]:
assert df[f"MMA{nb}"][i] != df[f"MMA{nb}"][i]
else:
assert round(df[f"MMA{nb}"][i], 2) == mma_column[i]
@pytest.mark.parametrize("nb, alpha, values, mme_column", [
(1, 0.5, [0.2], [0.2]),
(1, 0.5, [0.2, 0.3, 0.4], [0.2, 0.3, 0.4]),
(2, 0.5, [0.2, 0.3, 0.4], [None, 4/15, 11/30]),
(3, 0.5, [0.2, 0.3, 0.4], [None, None, 12/35]),
(3, 0.2, [0.2, 0.3, 0.4], [None, None, 96/305]),
(3, 0, [0.2, 0.3, 0.4], [None, None, 0.3]), # alpha = 0 -> same as MMA
(3, 1, [0.2, 0.3, 0.4], [None, None, 0.4]), # alpha = 1 -> only current value taken into account
(4, 0.5, [0.2, 0.3, 0.4], [None, None, None]),
])
def test_that_mme_is_computed(nb, alpha, values, mme_column):
df = pd.DataFrame(values, columns=["high"])
df = mme(df, nb, alpha=alpha)
for i in range(len(mme_column)):
if not mme_column[i]:
assert df[f"MME{nb}-{alpha}"][i] != df[f"MME{nb}-{alpha}"][i]
else:
assert math.isclose(mme_column[i], df[f"MME{nb}-{alpha}"][i])
def test_that_macd_is_computed():
values = [random.uniform(0, 1) for _ in range(100)]
df = | pd.DataFrame(values, columns=["high"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats import hmean
import cirpy
import datetime
from matplotlib import pyplot as plt
import seaborn as sns
from data_loader import GraphCancerMolecules
sns.set()
sns.set_context('talk')
def read_in_cpdb():
cpdb_lit = pd.read_csv('../data/cpdb.lit.tab.txt', sep='\t')
cpdb_nci = pd.read_csv('../data/cpdb.ncintp.tab.txt', sep='\t')
cpdb_df = pd.concat([cpdb_lit, cpdb_nci])
print(f"number of unique chemcodes in rats cpdb {cpdb_df[cpdb_df['species']=='r']['chemcode'].nunique()}")
print(f"number of unique chemcodes in cpdb {cpdb_df['chemcode'].nunique()}")
cpdb_name = | pd.read_csv('../data/cpdb_name.tsv', sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Library with PME calculation functions, provides utility for Private Equity analysis.
@author: <NAME> (<EMAIL>)
"""
import pandas as pd
import numpy as np
import scipy.optimize
from datetime import date
#Helper functions
def nearest(series, lookup, debug = False):
if debug==True:
print( "lookup: " + str(lookup) + " | closest: " + str(series.iloc[(series-lookup).abs().argsort()[0]]))
return series.iloc[(series-lookup).abs().argsort()[0]]
def xnpv(rate, values, dates):
'''Equivalent of Excel's XNPV function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xnpv(0.1, values, dates)
-966.4345...
'''
if rate <= -1.0:
return float('inf')
datesx = pd.to_datetime(dates).apply(lambda x: date(x.year,x.month,x.day)) # Just in case conversion to native datime date
d0 = datesx[0] # or min(dates)
return sum([ vi / (1.0 + rate)**((di - d0).days / 365.0) for vi, di in zip(values, datesx)])
def xirr(values, dates):
'''Equivalent of Excel's XIRR function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xirr(values, dates)
0.0100612...
'''
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates), -1.0, 1e10)
def TVM(value, value_date, money_date, discount_rate):
''' Calculates the discounted value of money to date money_date (i.e. either PV or FV depending on date)
'''
time_delta = ((pd.to_datetime(money_date) - pd.to_datetime(value_date)) / np.timedelta64(1, 'D')).astype(int)/365
return value*(1+discount_rate)**time_delta
def xirr2(values, dates):
datesx = pd.to_datetime(dates).apply(lambda x: date(x.year,x.month,x.day)) # Just in case conversion to native datime date
transactions = list(zip(datesx,values))
years = [(ta[0] - transactions[0][0]).days / 365.0 for ta in transactions]
residual = 1
step = 0.05
guess = 0.05
epsilon = 0.0001
limit = 100000
while abs(residual) > epsilon and limit > 0:
limit -= 1
residual = 0.0
for i, ta in enumerate(transactions):
residual += ta[1] / pow(guess, years[i])
if abs(residual) > epsilon:
if residual > 0:
guess += step
else:
guess -= step
step /= 2.0
return guess-1
### PME Algorhitms
#GENERATE DISCOUNTING TABLE
def discount_table(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = 1):
''' Automatically matches cashflow and index dates and subsequently generates discount table (which can be used to calculate Direct Alpha et al.).
Also useful for debugging and exporting.
Args:
dates_cashflows: An ndarray the dates corresponding to cashflows.
cashflows: An ndarray of the cashflow amounts (sign does not matter)
cashflows_type: Accepts three types [Distribution \ Capital Call \ Value]
dates_index: Ndarray of dates for the index, same logic as cashflows.
index: The index levels corresponding to the dates
NAV_scaling: Coefficient which can be used to scale the NAV amount (so as to counteract systemic mispricing)
auto_NAV: Toggle for automatic handling of the NAV. If False, NAV is not calculated and function returns a tuple of [sum_fv_distributions, sum_fv_calls]
(allows for manual completion of the PME formula using appropriate NAV value)
Returns:
DataFrame(Date|Amount|Type|Status|Discounted|Index|FV_Factor)
'''
_dates_index = | pd.to_datetime(dates_index) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: https://towardsdatascience.com/hands-on-predict-customer-churn-5c2a42806266
# In[2]:
# Churn quantifies the number of customers who have
# unsubscribed or canceled their service contract.
# Steps
# 1. Use Case / Business Case
# Only by understanding the final objective we can build
# a model that is actually of use.
# 2. Data collection & cleaning
# identify the right data sources, cleansing the data sets and
# preparing for feature selection or engineering.
# 3. Feature selection & engineering
# decide which features we want to include in our model and
# prepare the cleansed data to be used for the machine learning
# algorithm to predict customer churn.
# 4. Modelling
# Find the right model (selection) and evaluate that the
# algorithm actually works.
# 5. Insights and Actions
# Evaluate and interpret the outcomes
# In our case we actually want to make them stop leaving.
# In[3]:
# Load libraries
import matplotlib
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.figsize'] = [5.0, 5.0]
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
import pandas as pd
import numpy as np
from random import randint
import os.path
import click
from itertools import product
from sklearn.metrics import (
precision_score,
recall_score,
confusion_matrix,
accuracy_score,
)
from .preprocessing import (
feature_extraction,
group_feature_extraction,
normalize_data,
)
from .classification import (
split_data,
k_fold_crossvalid,
leave_one_group_out,
train_model,
predict_with_model,
predict_top_classes,
)
MODEL_NAMES = [
'LDA',
'random_forest',
'decision_tree',
'extra_tree',
'adaboost',
'knn',
'gaussianNB',
'linear_svc',
'svm',
'logistic_regression',
'neural_network',
]
NORMALIZER_NAMES = [
'raw',
'standard_scalar',
'total_sum',
'binary'
]
NOISE_VALUES = [
0,
0.0000000001,
0.000000001,
0.00000001,
0.0000001,
0.000001,
0.00001,
0.0001,
0.001,
0.01,
0.1,
1,
10,
100,
1000
]
@click.group()
def main():
pass
test_size = click.option('--test-size', default=0.2, help='The relative size of the test data')
num_estimators = click.option('--num-estimators', default=100, help='Number of trees in our Ensemble Methods')
num_neighbours = click.option('--num-neighbours', default=21, help='Number of clusters in our knn/MLknn')
n_components = click.option('--n-components', default=100,
help='Number of components for dimensionality reduction in Linear Discriminant Analysis')
model_name = click.option('--model-name', default='random_forest', help='The model type to train')
normalize_method = click.option('--normalize-method', default='standard_scalar', help='Normalization method')
feature_name = click.option('--feature-name', default='city', help='The feature to predict')
normalize_threshold = click.option('--normalize-threshold', default='0.0001',
help='Normalization threshold for binary normalization.')
@main.command('kfold')
@click.option('--k-fold', default=10, help='The value of k for cross-validation')
@test_size
@num_estimators
@num_neighbours
@n_components
@model_name
@normalize_method
@feature_name
@normalize_threshold
@click.option('--test-filename', default="test_sample.csv", help='Filename to save test dataset')
@click.option('--model-filename', default="model_k.pkl", help='Filename to save Model')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def kfold_cv(k_fold, test_size, num_estimators, num_neighbours, n_components, model_name, normalize_method,
feature_name, normalize_threshold, test_filename, model_filename, metadata_file, data_file, out_dir):
"""Train and evaluate a model with k-fold cross-validation. echo the model results to stderr."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training {model_name} using {normalize_method} to predict {feature_name}',err=True)
tbl, seed = {}, randint(0, 1000)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
normalized = normalize_data(raw_data, method=normalize_method, threshold=normalize_threshold)
split_train_data, split_test_data, split_train_feature, split_test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
model, mean_score, std_score = k_fold_crossvalid(
split_train_data, split_train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, k_fold=k_fold, seed=seed
)
click.echo(f'Average cross-validation score {mean_score} and standard deviation {std_score}',err=True)
predictions = predict_with_model(model, split_test_data).round()
file_name = str(model_name + '_' + normalize_method)
model_results = []
model_results.append(accuracy_score(split_test_feature, predictions.round()))
model_results.append(precision_score(split_test_feature, predictions, average="micro"))
model_results.append(recall_score(split_test_feature, predictions, average="micro"))
tbl[file_name] = model_results
conf_matrix = pd.DataFrame(confusion_matrix(split_test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), file_name + "." + 'csv'))
col_names = [
'Accuracy',
'Precision',
'Recall',
]
out_metrics = pd.DataFrame.from_dict(tbl, columns=col_names, orient='index')
out_metrics.to_csv(os.path.join(out_dir, str(model_name + '_' + normalize_method) + "." + 'csv'))
@main.command('one')
@test_size
@num_estimators
@num_neighbours
@n_components
@model_name
@normalize_method
@feature_name
@normalize_threshold
@click.option('--model-filename', default=None, help='Filename of previously saved model')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def eval_one(test_size, num_estimators, num_neighbours, n_components, model_name, normalize_method,
feature_name, normalize_threshold, model_filename, metadata_file, data_file, out_dir):
"""Train and evaluate a model. Print the model results to stderr."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training {model_name} using {normalize_method} to predict {feature_name}',err=True)
tbl, seed = {}, randint(0, 1000)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'classification_report'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'classification_report'))
normalized = normalize_data(raw_data, method=normalize_method, threshold=normalize_threshold)
train_data, test_data, train_feature, test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
model = train_model(
train_data, train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, seed=seed
)
predictions = predict_with_model(model, test_data).round()
conf_matrix = pd.DataFrame(confusion_matrix(test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), str(model_name + '_' + normalize_method) + "." + 'csv'))
model_results = []
model_results.append(accuracy_score(test_feature, predictions.round()))
model_results.append(precision_score(test_feature, predictions, average="micro"))
model_results.append(recall_score(test_feature, predictions, average="micro"))
col_names = [
'Accuracy',
'Precision',
'Recall',
]
tbl[str(model_name + ' ' + normalize_method)] = model_results
out_metrics = pd.DataFrame.from_dict(tbl, columns=col_names, orient='index')
out_metrics.to_csv(os.path.join(out_dir, str(model_name + '_' + normalize_method) + "." + 'csv'))
@main.command('all')
@test_size
@num_estimators
@num_neighbours
@n_components
@feature_name
@normalize_threshold
@click.option('--noisy', default=True, help='Add noise to data')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def eval_all(test_size, num_estimators, num_neighbours, n_components, feature_name, normalize_threshold, noisy,
metadata_file, data_file, out_dir):
"""Evaluate all models and all normalizers."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training all models using multiple normalization to predict {feature_name}',err=True)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'pd_confusion_matrix'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'pd_confusion_matrix'))
model_results = []
noise_data = [0]
if noisy==True:
noise_data = NOISE_VALUES
tbl, seed = {}, randint(0, 1000)
for model_name, norm_name in product(MODEL_NAMES, NORMALIZER_NAMES):
click.echo(
f'Training {model_name} using {norm_name} to predict {feature_name}',
err=True
)
normalized = normalize_data(raw_data, method=norm_name, threshold=normalize_threshold)
train_data, test_data, train_feature, test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
for i in noise_data:
click.echo(f'Gaussian noise {i} has been added',err=True)
# Adding noise to train data to check for over-fitting
train_noise = np.random.normal(0, i,(train_data.shape[0], train_data.shape[1]))
train_data = train_data+ train_noise
model = train_model(
train_data, train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, seed=seed
)
predictions = predict_with_model(model, test_data).round()
model_results = predict_top_classes(model, test_data, test_feature)
model_results.append(precision_score(test_feature, predictions, average="micro"))
model_results.append(recall_score(test_feature, predictions, average="micro"))
model_results.insert(0,i);
model_results.insert(0,norm_name);
model_results.insert(0,model_name);
tbl[str(model_name + '_' + norm_name + '_' + str(i))] = model_results
conf_matrix = pd.DataFrame(confusion_matrix(test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), str(model_name + '_' + norm_name + '_' + str(i)) + "." + 'csv'))
CV_table = pd.crosstab(name_map[test_feature], name_map[predictions], rownames=['Actual ' + feature_name], colnames=['Predicted ' + feature_name])
CV_table.to_csv(os.path.join(str(out_dir + '/' + 'pd_confusion_matrix' + '/'), str(model_name + '_' + norm_name + '_' + str(i)) + "." + 'csv'))
col_names = [
'Classifier',
'Preprocessing',
'Noise',
'Accuracy',
'Top_2_accuracy',
'Top_3_accuracy',
'Top_5_accuracy',
'Top_10_accuracy',
'Precision',
'Recall',
]
out_metrics = pd.DataFrame.from_dict(tbl, columns=col_names, orient='index')
out_metrics.to_csv(os.path.join(out_dir, 'output_metrics' + "." + 'csv'))
@main.command('leave-one')
@num_estimators
@num_neighbours
@n_components
@model_name
@normalize_method
@feature_name
@click.option('--group-name', default='city', help='The group to be considered')
@normalize_threshold
@click.option('--test-filename', default="test_sample.csv", help='Filename to save test dataset')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def leave_one(num_estimators, num_neighbours, n_components, model_name, normalize_method,
feature_name, group_name, normalize_threshold, test_filename, metadata_file, data_file, out_dir):
"""Train and evaluate a model and validate using a third-party group. echo the model results to stderr."""
raw_data, microbes, feature, name_map, group_feature, group_map = group_feature_extraction(data_file,
metadata_file, feature_name=feature_name, group_name=group_name)
click.echo(f'Training {model_name} using {normalize_method} to predict {feature_name}',err=True)
tbl, seed = {}, randint(0, 1000)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
normalized = normalize_data(raw_data, method=normalize_method, threshold=normalize_threshold)
model, mean_score, std_score, test_data, test_feature = leave_one_group_out(
normalized, feature, group_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, seed=seed
)
predictions = predict_with_model(model, test_data).round()
conf_matrix = pd.DataFrame(confusion_matrix(test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), str(model_name + '_' + normalize_method) + "." + 'csv'))
model_results = []
model_results.append(accuracy_score(test_feature, predictions.round()))
model_results.append(precision_score(test_feature, predictions, average="micro"))
model_results.append(recall_score(test_feature, predictions, average="micro"))
col_names = [
'Accuracy',
'Precision',
'Recall',
]
tbl[str(model_name + ' ' + normalize_method)] = model_results
out_metrics = | pd.DataFrame.from_dict(tbl, columns=col_names, orient='index') | pandas.DataFrame.from_dict |
import matplotlib.pyplot as pyplot
from SQL import querys as sql
from Diagram import hex_converting
import seaborn as sb
import numpy as np
import pandas as ps
import sqlite3
import datetime
import config
save_plots = 'plots/'
__databaseFile = config.CONFIG['database_file_name']
sb.set(style="dark", color_codes=True)
# connect the sqllite database and return connection
def diagram_weekly():
connection = sqlite3.connect(__databaseFile)
avarage_line_plot(connection)
# converte datetime
def unix_to_date(timestamp):
d = datetime.datetime.fromtimestamp((timestamp))
formatted_time = d.strftime('%Y-%m-%d')
return formatted_time
#
def weekley_epoch(start_date):
date_1 = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = date_1 + datetime.timedelta(days=1)
date_1 = date_1.strftime('%Y-%m-%d')
end_date = end_date.strftime('%Y-%m-%d')
return end_date
def avarage(intarray):
avg_weekly= sum(intarray) / len(intarray)
return avg_weekly
# sort dataframe by date
def sort_db(dataframe):
df = dataframe.sort_values(by=['tx_time'])
return df
# create a chart showing the use of the OP_RETURN field in relation to time
def weekly_avarage_clc (connection):
avarage_year2013 = []
avarage_year2014 = []
avarage_year2015 = []
avarage_year2016 = []
avarage_year2017 = []
avarage_year2018 = []
# create dataframe by using pandas with corresponding SQL statement and the connection to database
read_data = ps.read_sql_query(sql.get_count_daily_op_return(), connection)
data = sort_db(read_data)
t1 = [tuple(x) for x in data.values]
array_av = []
x=0
i = 0
while t1[i+1][0] not in t1[-1][0]:
if x == 0:
end = weekley_epoch(t1[0][0])
array_av.append(t1[i][1])
if str(end) in t1[i+1][0]:
array_av.append(t1[i+1][1])
i = i+1
x = x+1
end = weekley_epoch(end)
else:
array_av.append(0)
x = x+1
end = weekley_epoch(end)
if (t1[-1][0] == str(end)):
break
if len(array_av) == 7:
if "2013" in end:
avarage_year2013.append(avarage(array_av))
elif "2014" in end:
avarage_year2014.append(avarage(array_av))
elif "2015" in end:
avarage_year2015.append(avarage(array_av))
elif "2016" in end:
avarage_year2016.append(avarage(array_av))
elif "2017" in end:
avarage_year2017.append(avarage(array_av))
elif "2018" in end:
avarage_year2018.append(avarage(array_av))
array_av = []
array_all = []
array_all.append(avarage_year2013)
array_all.append(avarage_year2014)
array_all.append(avarage_year2015)
array_all.append(avarage_year2016)
array_all.append(avarage_year2017)
array_all.append(avarage_year2018)
print(len(avarage_year2015))
print(array_tup(avarage_year2013))
return array_all
# plooting yearly occurences of OP_RETURN in weekly avarage
def avarage_line_plot (connection):
result_array = weekly_avarage_clc(connection)
result_array2013 = array_tup(result_array[0])
x_val13 = [x for x,_ in result_array2013]
y_val13 = [y for _,y in result_array2013]
result_array2014 = array_tup(result_array[1])
x_val14 = [x for x,_ in result_array2014]
y_val14 = [y for _,y in result_array2014]
result_array2015 = array_tup(result_array[2])
x_val15 = [x for x,_ in result_array2015]
y_val15 = [y for _,y in result_array2015]
result_array2016 = array_tup(result_array[3])
x_val16 = [x for x,_ in result_array2016]
y_val16 = [y for _,y in result_array2016]
result_array2017 = array_tup(result_array[4])
x_val17 = [x for x,_ in result_array2017]
y_val17 = [y for _,y in result_array2017]
result_array2018 = array_tup(result_array[5])
x_val18 = [x for x,_ in result_array2018]
y_val18 = [y for _,y in result_array2018]
d13 = {'week': x_val13, 'avarage': y_val13}
pdnumsqr13 = ps.DataFrame(d13)
sb.lineplot(x='week', y='avarage', data=pdnumsqr13,label="2013")
d14 = {'week': x_val14, 'avarage': y_val14}
pdnumsqr14 = ps.DataFrame(d14)
sb.lineplot(x='week', y='avarage', data=pdnumsqr14,label="2014")
d15 = {'week': x_val15, 'avarage': y_val15}
pdnumsqr15 = ps.DataFrame(d15)
sb.lineplot(x='week', y='avarage', data=pdnumsqr15,label="2015")
d16 = {'week': x_val16, 'avarage': y_val16}
pdnumsqr16 = | ps.DataFrame(d16) | pandas.DataFrame |
"""
Match two sets of proteins based on BLAST results,
using the maximum weight bipartite matching method.
Parameters:
1. set1 proteins fasta
2. set2 proteins fasta
3. set1 vs. set2 blast6 result
(must use -outfmt "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen slen")
4. set2 vs. set1 blast6 result
(must use -outfmt "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen slen")
5. Weight parameter - e.g. bitscore or pident
6. Min weight - minnimal weight value allowed in a match
* can normalize weights according to Emms, <NAME>., and <NAME>, 2015
Output:
TSV file with the matches pairs and their respective weight
"""
from __future__ import print_function, division
from networkx import bipartite, matching
from networkx.algorithms.bipartite.matching import minimum_weight_full_matching
import argparse
import pandas as pd
import numpy as np
### FUNCTIONS
def assign_ids(fasta, start=0):
i = start
d = {}
with open(fasta) as f:
for line in f:
if line.startswith('>'):
d[i] = line.strip()[1:]
i += 1
return d
blast6_headers = ["qseqid", "sseqid", "pident", "length", "mismatch", "gapopen", "qstart", "qend", "sstart", "send", "evalue", "bitscore", "qlen", "slen"]
def parse_blast6(blast6, weight_param):
df = | pd.read_csv(blast6, sep='\t', names=blast6_headers) | pandas.read_csv |
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn import preprocessing, cross_validation, neighbors, svm
from sklearn.preprocessing import StandardScaler
n_components=2
t_size=0.2
df = pd.read_csv('phising data with description.txt')
#print("Full data: ")
#print(df)
#print("df size before drop: ")
#print(df.shape)
#print("Column 1: ")
#print(df['c1'])
#print("Result data: ")
#print(df['result'])
X=np.array(df.drop(['result'],1))
#print("X: ")
#print(X)
y=np.array(df['result'])
#print("y: ")
#print(y)
#print(y.shape)
#print(X.shape)
#print("df size after drop: ")
#print(df.shape)
true=1
while true:
X_train,X_test,y_train,y_test=cross_validation.train_test_split(X,y,test_size=t_size)
clf=neighbors.KNeighborsClassifier()
clf.fit(X_train,y_train)
#find the accuracy for Original phishing
accuracy_original = clf.score(X_test,y_test)
print("Accuracy for Original Phishing KNN: ")
print(accuracy_original)
#find null accuracy for original dataset
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for original dataset: ",count.head(1)[1]/2211)
print("Accuracy for original by confusion matrix: ",acc)
print("Error rate for original KNN: ",err)
print("Sensitivity for original KNN: ",sen)
print("Specificity for original KNN: ",spe)
print("Precision for original KNN: ",pre)
print("Recall for original KNN: ",rec)
print("F-Score for original KNN: ",fsc)
print("TPR for original KNN: ",tpr)
print("FPR for original KNN: ",fpr)
print()
#original SVM
clf_svm=svm.SVC()
clf_svm.fit(X_train,y_train)
#find the accuracy for SVM
accuracy_pca_svm = clf_svm.score(X_test,y_test)
print("Accuracy for Original svm: ")
print(accuracy_pca_svm)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf_svm.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for original dataset : ",count.head(1)[1]/2211)
print("Accuracy for original SVM: ",acc)
print("Error rate for original SVM: ",err)
print("Sensitivity for original SVM: ",sen)
print("Specificity for original SVM: ",spe)
print("Precision for original SVM: ",pre)
print("Recall for original SVM: ",rec)
print("F-Score for original SVM: ",fsc)
print("TPR for original SVM: ",tpr)
print("FPR for original SVM: ",fpr)
print()
#original + Random Forest
from sklearn.ensemble import RandomForestClassifier
clf_rf=RandomForestClassifier(n_estimators=100)
clf_rf.fit(X_train,y_train)
#find the accuracy for SVM
accuracy_pca_rf = clf_rf.score(X_test,y_test)
print("Accuracy for Original & RF: ")
print(accuracy_pca_rf)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf_rf.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for pca dataset : ",count.head(1)[1]/2211)
print("Accuracy for original RF: ",acc)
print("Error rate for original RF: ",err)
print("Sensitivity for original RF: ",sen)
print("Specificity for original RF: ",spe)
print("Precision for original RF: ",pre)
print("Recall for original RF: ",rec)
print("F-Score for original RF: ",fsc)
print("TPR for original RF: ",tpr)
print("FPR for original RF: ",fpr)
print()
#PCA
scaler = StandardScaler()
scaler.fit(df.drop(['result'],1))
scaled_data = scaler.transform(df.drop(['result'],1))
from sklearn.decomposition import PCA
pca = PCA(n_components)
# fragmenting in components, final result is x_pca
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
print("Size of data for original and after pca")
print(scaled_data.shape)
print(x_pca.shape)
#print("data after PCA: ")
#print(x_pca)
#plot the orthoganal components in X Y axis
plt.figure(figsize=(10,10))
plt.scatter(x_pca[:,0],x_pca[:,1],c=y,cmap='plasma')
plt.xlabel('First Component')
plt.ylabel('Second Component')
#plt.zlabel('Third Component')
#plt.show()
#fit to KNN
X=x_pca
#print("X for knn: ")
#print(X)
y=np.array(y)
#print ("y for knn: ")
#print(y)
#print("changed data:")
#print(type(x_pca))
#print(type(y))
# import necessary things for KNN and accuracy
#print("Test")
X_train,X_test,y_train,y_test=cross_validation.train_test_split(X,y,test_size=t_size)
clf=neighbors.KNeighborsClassifier()
clf.fit(X_train,y_train)
#find the accuracy for KNN
accuracy_pca = clf.score(X_test,y_test)
print("Accuracy for PCA KNN: ")
print(accuracy_pca)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for pca dataset: ",count.head(1)[1]/2211)
print("Accuracy for pca by confusion matrix: ",acc)
print("Error rate for pca KNN: ",err)
print("Sensitivity for pca KNN: ",sen)
print("Specificity for pca KNN: ",spe)
print("Precision for pca KNN: ",pre)
print("Recall for pca KNN: ",rec)
print("F-Score for pca KNN: ",fsc)
print("TPR for pca KNN: ",tpr)
print("FPR for pca KNN: ",fpr)
print()
#PCA + SVM
clf_svm=svm.SVC()
clf_svm.fit(X_train,y_train)
#find the accuracy for SVM
accuracy_pca_svm = clf_svm.score(X_test,y_test)
print("Accuracy for PCA svm: ")
print(accuracy_pca_svm)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf_svm.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for pca dataset : ",count.head(1)[1]/2211)
print("Accuracy for pca SVM: ",acc)
print("Error rate for pca SVM: ",err)
print("Sensitivity for pca SVM: ",sen)
print("Specificity for pca SVM: ",spe)
print("Precision for pca SVM: ",pre)
print("Recall for pca SVM: ",rec)
print("F-Score for pca SVM: ",fsc)
print("TPR for pca SVM: ",tpr)
print("FPR for pca SVM: ",fpr)
print()
#PCA + Random Forest
from sklearn.ensemble import RandomForestClassifier
clf_rf=RandomForestClassifier(n_estimators=100)
clf_rf.fit(X_train,y_train)
#find the accuracy for SVM
accuracy_pca_rf = clf_rf.score(X_test,y_test)
print("Accuracy for PCA & RF: ")
print(accuracy_pca_rf)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf_rf.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for pca dataset : ",count.head(1)[1]/2211)
print("Accuracy for pca RF: ",acc)
print("Error rate for pca RF: ",err)
print("Sensitivity for pca RF: ",sen)
print("Specificity for pca RF: ",spe)
print("Precision for pca RF: ",pre)
print("Recall for pca RF: ",rec)
print("F-Score for pca RF: ",fsc)
print("TPR for pca RF: ",tpr)
print("FPR for pca RF: ",fpr)
print()
#PCA + Linear regression
from sklearn import linear_model
print("n_components: ",n_components)
linear_regression = linear_model.LinearRegression()
linear_regression.fit(X_train,y_train)
#find the accuracy for linear regression
accuracy_pca_lr = linear_regression.score(X_test,y_test)
print("Accuracy for PCA & LR: ")
print(accuracy_pca_lr)
#PCA + Logistic regression
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty='l2',C=1)
model.fit(X_train,y_train)
#find the accuracy for Logistic regression
accuracy_pca_logr = model.score(X_test,y_test)
print("Accuracy for PCA & Logistic R: ")
print(accuracy_pca_logr)
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=model.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for pca dataset : ",count.head(1)[1]/2211)
print("Accuracy for pca LogR: ",acc)
print("Error rate for pca LogR: ",err)
print("Sensitivity for pca LogR: ",sen)
print("Specificity for pca LogR: ",spe)
print("Precision for pca LogR: ",pre)
print("Recall for pca LogR: ",rec)
print("F-Score for pca LogR: ",fsc)
print("TPR for pca LogR: ",tpr)
print("FPR for pca LogR: ",fpr)
print()
#Random Gaussian Projection
from sklearn import random_projection
transformer = random_projection.GaussianRandomProjection(n_components)
print("Transformer matrix: ")
print(transformer)
X = transformer.fit_transform(df.drop(['result'],1))
y=np.array(df['result'])
#find accuracy for random gaussian Projection
X_train,X_test,y_train,y_test=cross_validation.train_test_split(X,y,test_size=t_size)
clf=neighbors.KNeighborsClassifier()
clf.fit(X_train,y_train)
#find the accuracy for RGP
accuracy_gaussian = clf.score(X_test,y_test)
print("Accuracy for gaussian KNN: ")
print(accuracy_gaussian)
#Gaussian KNN
yt=pd.Series(y_test)
#print(yt.value_counts())
count=yt.value_counts()
#print(max(yt.mean(),1-yt.mean()))
#print(count.head(1)[1]/2211)
y_predict=clf.predict(X_test)
conf=metrics.confusion_matrix(y_test,y_predict)
print("Confusion matrix for original dataset: ")
print(conf)
TP=conf[1,1]
TN=conf[0,0]
FP=conf[0,1]
FN=conf[1,0]
acc=(TP+TN)/(TP+TN+FP+FN)
err=(1-acc)
sen=TP/(TP+FN)
spe=TN/(FP+TN)
pre=TP/(TP+FP)
rec=TP/(TP+FN)
fsc=(2*pre*rec)/(pre+rec)
tpr=sen
fpr=(1-spe)
print()
print("n_components: ",n_components)
print("test size: ",t_size)
print("null accuracy for original dataset: ",count.head(1)[1]/2211)
print("Accuracy for Gaussian by confusion matrix: ",acc)
print("Error rate for Gaussian KNN: ",err)
print("Sensitivity for Gaussian KNN: ",sen)
print("Specificity for Gaussian KNN: ",spe)
print("Precision for Gaussian KNN: ",pre)
print("Recall for Gaussian KNN: ",rec)
print("F-Score for Gaussian KNN: ",fsc)
print("TPR for Gaussian KNN: ",tpr)
print("FPR for Gaussian KNN: ",fpr)
print()
#Gaussian SVM
clf_svm=svm.SVC()
clf_svm.fit(X_train,y_train)
#find the accuracy for SVM
accuracy_gau_svm = clf_svm.score(X_test,y_test)
print("Accuracy for Gaussian svm: ")
print(accuracy_gau_svm)
yt= | pd.Series(y_test) | pandas.Series |
from pandas import to_datetime
from pandas.io.json import json_normalize
from requests import get
def chart(
apiToken="demo",
apiVersion="v0",
host="api.fugle.tw",
output="dataframe",
symbolId="2884",
):
outputs = ["dataframe", "raw"]
if output not in outputs:
raise ValueError('output must be one of ["dataframe", "raw"]')
url = "https://{}/realtime/{}/intraday/chart".format(host, apiVersion)
params = dict(apiToken=apiToken, symbolId=symbolId)
response = get(url=url, params=params)
json = response.json()
if response.status_code != 200:
if output == "dataframe":
return | json_normalize(json) | pandas.io.json.json_normalize |
'''
Clase que contiene los métodos que permiten "limpiar" la información extraida por el servicio de web scrapper
(Es implementada directamente por la calse analyzer)
'''
import pandas as pd
import re
from pathlib import Path
import numpy as np
import unidecode
class Csvcleaner:
@staticmethod
def FilterDataOpinautos():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/opinautos_items.csv").resolve()
file_path_out = (base_path / "../extractors/opinautos_items_filtered.csv").resolve()
df_opinautos = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca','Modelo', 'Estrellas','Opinion','Votos','Fecha'])
df_opinautos=Csvcleaner.FilterBrand(df_opinautos,'Marca')# Filtrado de marcas
df_opinautos=Csvcleaner.FilterModel(df_opinautos,'Modelo')# Filtrado de modelos
df_opinautos=df_opinautos.loc[df_opinautos['Fecha'].str.contains('z', flags = re.IGNORECASE)].reset_index(drop=True)# Elimirar aquellos con fecha en otro formato
for index, row in df_opinautos.iterrows():
df_opinautos.iloc[index,4]=df_opinautos.iloc[index,4].replace(u"\r",u" ").replace(u"\n",u" ").strip()# Ajuste de texto en opiniones
df_opinautos=df_opinautos.loc[df_opinautos['Opinion'].str.len()<3000].reset_index(drop=True) # limito numero de caracteres
df_opinautos['Fecha'] = pd.to_datetime(df_opinautos['Fecha'])# Conversion de formato de fecha
mask = (df_opinautos['Fecha'] > '2019-1-01') & (df_opinautos['Fecha'] <= '2021-1-1')
df_opinautos=df_opinautos.loc[df_opinautos['Nombre'].str.contains('2019', flags = re.IGNORECASE) | df_opinautos['Nombre'].str.contains('2020', flags = re.IGNORECASE)]
df_opinautos=df_opinautos.loc[mask]
df_opinautos.to_csv(file_path_out,index=False)
return df_opinautos
@staticmethod
def FilterDataAutotest():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/autotest_items.csv").resolve()
file_path_out = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
df_autotest = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca','Modelo', 'C_General','C_Vida','C_Diseño','C_Manejo','C_Performance','A_favor','En_contra'])
df_autotest=Csvcleaner.FilterBrand(df_autotest,'Marca')# Filtrado de marcas
df_autotest=Csvcleaner.FilterModel(df_autotest,'Modelo')# Filtrado de modelos
df_autotest.to_csv(file_path_out,index=False)
return df_autotest
@staticmethod
def FilterDataMotorpasion():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/motorpasion_items.csv").resolve()
file_path_out = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
df_motor = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Version', 'C_General','C_Acabados','C_Seguridad','C_Equipamiento','C_Infotenimiento',
'C_Comportamiento', 'C_Motor', 'C_Transmision', 'C_Consumo', 'C_Espacio', 'C_Precio', 'Lo_Bueno', 'Lo_Malo'])
df_motor.dropna(subset=['Nombre'], inplace=True)
df_motor=Csvcleaner.FilterBrand(df_motor,'Nombre')# Filtrado de marcas
df_motor=Csvcleaner.FilterModel(df_motor,'Nombre')# Filtrado de modelos
df_motor.to_csv(file_path_out,index=False)
return df_motor
@staticmethod
def FilterDataQuecoche():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/quecochemecompro_items.csv").resolve()
file_path_out = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
df_quecoche = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca', 'Puntuacion', 'Informativo', 'C_peque_manej', 'C_deportivo', 'C_bueno_barato',
'C_practico', 'C_ecologico', 'C_atractivo', 'Lo_mejor', 'Lo_peor'])
df_quecoche=Csvcleaner.FilterBrand(df_quecoche,'Nombre')# Filtrado de marcas
df_quecoche=Csvcleaner.FilterModel(df_quecoche,'Nombre')# Filtrado de modelos
df_quecoche.to_csv(file_path_out,index=False)
return df_quecoche
@staticmethod
def FilterBrand(dataframe, brandField):
dataframe=dataframe.loc[dataframe[brandField].str.contains('nissan', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('chevrolet', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('buick', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('gmc', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('cadillac', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('audi', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('porsche', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('seat', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('volkswagen', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('toyota', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('ram', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('dodge', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('jeep', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('fiat', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('chrysler', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('alfa', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('kia', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('honda', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('mazda', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('hyundai', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('renault', flags = re.IGNORECASE)].reset_index(drop=True)
return dataframe
@staticmethod
def FilterModel(dataframe, ModelField):
dataframe=dataframe.loc[~dataframe[ModelField].str.contains('malib', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('cabstar', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('urvan', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('express', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('silverado', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('caddy', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('crafter', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('transporter', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('hiace', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('promaster', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('Ducato', flags = re.IGNORECASE)].reset_index(drop=True)
return dataframe
# TODO: generar hoja de puntuaciones
@staticmethod
def generateScoreSheet():
base_path = Path(__file__).parent
file_autos_path = (base_path / "../data_csv/autos_data_mod_csv.csv").resolve()
file_autos_path_out = (base_path / "../data_csv/scoreSheet.csv").resolve()
file_quecoche_path = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
file_autotest_path = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
file_motorpasion_path = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
file_opinautos_path = (base_path / "../extractors/opinautos_items_Comprehend_parsed.csv").resolve()
col_list = ["marca", "modelo", "año", "versión"]
dfAutos = pd.read_csv(file_autos_path, encoding='utf-8', usecols=col_list)
dfQuecoche = pd.read_csv(file_quecoche_path, encoding='utf-8')
dfAutoTest = pd.read_csv(file_autotest_path, encoding='utf-8')
dfMotorPasion = pd.read_csv(file_motorpasion_path, encoding='utf-8')
dfOpinautos = pd.read_csv(file_opinautos_path, encoding='utf-8')
columns=['general', 'confort', 'desempeño','tecnología','ostentosidad','deportividad','economía','eficiencia','seguridad','ecología','a_favor','en_contra','cP','cN']
dfAutos[columns] = pd.DataFrame([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]], index=dfAutos.index)
dfAutos['modelo'] = dfAutos['modelo'].apply(Csvcleaner.remove_accents)
dfQuecoche['Nombre'] = dfQuecoche['Nombre'].apply(Csvcleaner.remove_accents)
dfAutoTest['Nombre'] = dfAutoTest['Nombre'].apply(Csvcleaner.remove_accents)
dfMotorPasion['Nombre'] = dfMotorPasion['Nombre'].apply(Csvcleaner.remove_accents)
dfOpinautos['Modelo'] = dfOpinautos['Modelo'].apply(Csvcleaner.remove_accents)
for index, row in dfAutos.iterrows():
general=[]
confort=[]
desempeño=[]
tecnologia=[]
ostentosidad=[]
deportividad=[]
economia=[]
eficiencia=[]
seguridad=[]
ecologia=[]
cp=[]
cn=[]
afavor=''
encontra=''
dfAux=dfQuecoche.loc[dfQuecoche['Nombre'].str.contains(row['marca']+' ', flags = re.IGNORECASE) &
dfQuecoche['Nombre'].str.contains(' '+row['modelo'], flags = re.IGNORECASE)]
if not dfAux.empty:
idxVersion=Csvcleaner.getVersionIndex(dfAux,' '+row['versión'],'Puntuacion')
if not pd.isnull(dfAux.at[idxVersion, 'Puntuacion']):
general.append(float(dfAux.at[idxVersion, 'Puntuacion'].replace(",", ".")))
if not pd.isnull(dfAux.at[idxVersion, 'C_peque_manej']):
confort.append(dfAux.at[idxVersion, 'C_peque_manej'])
if not pd.isnull(dfAux.at[idxVersion, 'C_atractivo']):
confort.append(dfAux.at[idxVersion, 'C_atractivo'])
if not pd.isnull(dfAux.at[idxVersion, 'C_deportivo']):
deportividad.append(dfAux.at[idxVersion, 'C_deportivo'])
if not pd.isnull(dfAux.at[idxVersion, 'C_bueno_barato']):
economia.append(dfAux.at[idxVersion, 'C_bueno_barato'])
if not pd.isnull(dfAux.at[idxVersion, 'C_peque_manej']):
economia.append(dfAux.at[idxVersion, 'C_peque_manej'])
if not pd.isnull(dfAux.at[idxVersion, 'C_peque_manej']):
eficiencia.append(dfAux.at[idxVersion, 'C_peque_manej'])
if not pd.isnull(dfAux.at[idxVersion, 'C_ecologico']):
eficiencia.append(dfAux.at[idxVersion, 'C_ecologico'])
if not pd.isnull(dfAux.at[idxVersion, 'C_ecologico']):
ecologia.append(dfAux.at[idxVersion, 'C_ecologico'])
if not pd.isnull(dfAux.at[idxVersion, 'Lo_mejor']):
if len(afavor)<2:
afavor+=dfAux.at[idxVersion, 'Lo_mejor']
else:
afavor+=' '+dfAux.at[idxVersion, 'Lo_mejor']
if not pd.isnull(dfAux.at[idxVersion, 'Lo_peor']):
if len(encontra)<2:
encontra+=dfAux.at[idxVersion, 'Lo_peor']
else:
encontra+=' '+dfAux.at[idxVersion, 'Lo_peor']
dfAux=dfAutoTest.loc[dfAutoTest['Nombre'].str.contains(row['marca']+' ', flags = re.IGNORECASE) &
dfAutoTest['Nombre'].str.contains(' '+row['modelo'], flags = re.IGNORECASE)]
if not dfAux.empty:
idxVersion=Csvcleaner.getVersionIndex(dfAux,' '+row['versión'],'C_General')
if not pd.isnull(dfAux.at[idxVersion, 'C_General']):
general.append(dfAux.at[idxVersion, 'C_General'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Vida']):
confort.append(dfAux.at[idxVersion, 'C_Vida'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Diseño']):
confort.append(dfAux.at[idxVersion, 'C_Diseño'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Manejo']):
confort.append(dfAux.at[idxVersion, 'C_Manejo'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Manejo']):
desempeño.append(dfAux.at[idxVersion, 'C_Manejo'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Performance']):
desempeño.append(dfAux.at[idxVersion, 'C_Performance'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Vida']):
tecnologia.append(dfAux.at[idxVersion, 'C_Vida'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Manejo']):
deportividad.append(dfAux.at[idxVersion, 'C_Manejo'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Performance']):
eficiencia.append(dfAux.at[idxVersion, 'C_Performance'])
if not pd.isnull(dfAux.at[idxVersion, 'C_Diseño']):
seguridad.append(dfAux.at[idxVersion, 'C_Diseño'])
if not | pd.isnull(dfAux.at[idxVersion, 'A_favor']) | pandas.isnull |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
| pd.DataFrame(self.numpy_input) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import os
import math
#import utm
import shapefile as shp
import seaborn as sns
from collections import OrderedDict
import geopandas as gpd
from geopy.distance import distance
import argparse
# PRIMARY DATA SOURCE
# https://s3.amazonaws.com/capitalbikeshare-data/index.html
# - - - - - - - - - - - - - - - -
# - - CHECK OUT MY DOCSTRINGS!!!
# - - NumPy/SciPy Docstring Format
# - - - - - - - - - - - - - - - -
def pd_csv_group(data_folder,num=-1):
"""Read many csv data files from a specified directory into a single data frame.
Parameters
----------
data_folder : str
path to directory containing ONLY csv data files.
num (int), optional
number of csv files to read and integrate into the primary dataframe.
Returns
-------
DataFrame()
dataframe built from csv files in given directory.
"""
if num == -1:
file_count = len(os.listdir(data_folder))
else:
file_count = num
df_list = []
#print('files to be included: ', files)
print("stacking dataframes....")
#print('(Please be patient for ~ 30 seconds)')
for file_num,file in enumerate(os.listdir(data_folder)):
f = pd.read_csv(data_folder+file)
print(f'appending df #{file_num+1}...')
df_list.append(f)
# if there is a file number limit, stop here.
if file_num == file_count-1:
data = pd.concat(df_list, axis=0, ignore_index=True, sort=False)
print(f'{len(data)/1e6:0.2}M rows of data with {len(data.columns)} features/columns derived from {file_num+1} CSV files. ')
return data
data = pd.concat(df_list, axis=0, ignore_index=True, sort=False)
print(f'{len(data)/1e6:0.2}M rows of data with {len(data.columns)} features/columns derived from {file_num+1} CSV files. ')
return data
def lifetime(duration):
"""Returns a dictionary that converts a number of seconds into a dictionary object with keys of 'days', 'hours', 'minutes', and 'seconds'.
Parameters
----------
duration (int):
The duration (in seconds) to be transformed into a dictionary.
Returns
-------
dict
a dictionary in the form of dict('days':int(), 'hours':int(), 'minutes':int(),'seconds':int())
"""
dct = {}
dct['days'] = duration//86400
dct['hours']= duration%86400//3600
dct['minutes'] = (duration%86400)%3600//60
dct['seconds'] = (duration%86400)%3600%60
return dct
def freq_dict(lst):
"""Returns a dictionary with keys of unique values in the given list and values representing the count of occurances.
Parameters
----------
lst (list)
a list of items to determine number of occurances for.
Returns
-------
dict
a dictionary of the format dict('unique_value_n': int(), 'unique_value_(n+1)': int(), ... )
"""
dct = dict()
for item in lst:
if item not in dct:
dct[item]=0
else:
dct[item]+=1
return dct
def series_freq_dict(df, column_name):
"""Performs the function "freq_dict()" for df["column_name"]
after extracting the "datetime.hour" value from each element.
Parameters
----------
df (DataFrame)
the dataframe object
column_name (str)
name of column in dataframe. *** Must contain ONLY datetime() objects.
Returns
-------
dict()
a frequency dictionary from freq_dict()
"""
lst = [x.hour for x in df[column_name].values]
return freq_dict(lst)
class BikeReport(object):
"""Creates an instance of the BikeReport object.
Attributes
----------
bike_number (str)
bike-specific identification number. Example, "W32432".
duration (dict)
dictionary representation of the duration of bike service life as determined from the data given
Parameters
----------
df (DataFrame)
dataframe that contains the bike of interest.
bike_number (str)
bike-specific identification number. Example, "W32432".
"""
def __init__(self, df, bike_number):
self.bike_number = bike_number
self.duration = lifetime(df[df['Bike number'] ==bike_number].agg({'Duration':'sum'}).Duration )
self.trips = df[df['Bike number'] ==bike_number].count()[0]
def __repr__(self):
return f'<BikeReport.obj>\n\tBikeNumber:{self.bike_number}\n\tServiceLifetime:{self.duration}\n\tTotalTrips:{self.trips}'
def lifetime(self):
dct = {}
dct['days'] = self.duration//86400
dct['hours']= self.duration%86400//3600
dct['minutes'] = (self.duration%86400)%3600//60
dct['seconds'] = (self.duration%86400)%3600%60
return dct
def time_filter(df, colname, start_time, end_time):
"""Returns a filtered dataframe at a specified column for time occurances between a start and end time.
Parameters
----------
df (dataframe)
dataframe object to apply filter to.
colname (str)
name of column in given dataframe to filter through.
start_time (datetime)
datetime object representing the lower bound of the filter.
end_time (datetime)
datetime object representing the upper bound of the filter.
Returns
-------
copy
a filtered copy of the given dataframe
"""
if type(start_time) != type(dt.time(0,0,0)):
print('Error: Given start time must be datetime.time() obj.')
return None
mask_low = df[colname] >= start_time
mask_hi = df[colname] <= end_time
mask = mask_low & mask_hi
return df[mask].copy()
def station_super_dict(df,popular_stations_df):
"""Given a primary dataframe and a dataframe representing bike stations of interest, performs the
series_freq_dict function for each value in popular_stations_df to get the 'by hour' frequency of each station.
Parameters
----------
df (dataframe)
primary dataframe of all bikeshare transactions.
popular_stations_df (dataframe)
dataframe representing bike stations of interest.
Returns
-------
dict()
a dictionary with keys representing the values from the popular_stations_df,
and values are dictionaries of the output for the series_freq_dict() function for each station.
"""
station_time_hist=dict()
station_groups = df.groupby('ADDRESS')
pass
# - - - build the super dict
for station in popular_stations_df.ADDRESS.values:
try:
station_by_hour = station_groups.get_group(station)
except:
# sometimes there is a space at the end of the station name as found in the address column's values for station names.
station_by_hour = station_groups.get_group(station+' ')
# - - - The super-dict's keys are the station names, and the super-dict's values for each key are the time this station
station_time_hist[station] = series_freq_dict(station_by_hour, 'Start time')
return station_time_hist
def read_shapefile(sf):
"""Read a shape file into a padas dataframe object.
Parameters
----------
sf (shapefile object)
a shape file
Returns
-------
dataframe
the loaded dataframe from the given shapefile.
"""
fields = [x[0] for x in sf.fields][1:]
records = sf.records()
shps = [s.points for s in sf.shapes()]
df = | pd.DataFrame(columns=fields, data=records) | pandas.DataFrame |
import os, sys
import numpy as np
from pyhdf.SD import SD, SDC
from scipy import ndimage
import glob
import pandas as pd
import xarray as xr
from joblib import Parallel, delayed
'''
# Basic parameters
lat_0 = 60
lon_0 = -180
res_x = 0.01 # 0.02 for the 2km grid
res_y = 0.01 # 0.02 for the 2km grid
tile_xdim = 600 # 300 for the 2km grid
tile_ydim = 600 # 300 for the 2km grid
# Input information
hid = 14 # 0 - 59
vid = 5 # 0 - 19
x = 0 # column/sample, 0-(tile_xdim-1)
y = 0 # row/line, 0-(tile_ydim-1)
# Output formula
lat_ulcnr = lat_0 - (vid*tile_ydim + y)*res_y # upper-left corner latitude
lon_ulcnr = lon_0 + (hid*tile_xdim + x)*res_y # upper-left corner longitude
lat_cntr = lat_ulcnr - 0.5*res_y # center latitude
lon_cntr = lon_ulcnr + 0.5*res_x # center longitude
'''
def get_tile_coords(tile, resolution_km=1.):
hid = int(tile[1:3])
vid = int(tile[4:6])
lat_0 = 60
lon_0 = -180
res_x = 0.01 * resolution_km
res_y = 0.01 * resolution_km
tile_xdim = int(600 / resolution_km)
tile_ydim = int(600 / resolution_km)
lat_ulcnr = lat_0 - vid*tile_ydim*res_y # upper-left corner latitude
lon_ulcnr = lon_0 + hid*tile_xdim*res_y # upper-left corner longitude
lat_cntr = lat_ulcnr - 0.5*res_y # center latitude
lon_cntr = lon_ulcnr + 0.5*res_x
lats = np.linspace(lat_0 - vid*tile_ydim*res_y, lat_0 - (vid+1)*tile_ydim*res_y+res_y, tile_ydim, endpoint=True)
lons = np.linspace(lon_0 + hid*tile_xdim*res_x, lon_0 + (hid+1)*tile_xdim*res_x-res_x, tile_xdim, endpoint=True)
return lats, lons
class L1GFile(object):
'''
Reads a single L1B file at a common resolution. Channels are bilinearly interpolated to the defined resolution.
Args:
file: Filepath to L1b
bands (optional): List of bands, default=list(range(1,17))
resolution_km (optional): Resolution in km for common grid, default=2
'''
def __init__(self, file, bands=list((range(1,17))),
resolution_km=2.):
self.file = file
self.bands = bands
self.resolution_km = resolution_km
self.resolution_size = int(600. / resolution_km)
self.reflective_bands = list(range(1,7))
self.emissive_bands = list(range(7,17))
def load(self):
fp = SD(self.file, SDC.READ)
data_array = np.zeros((self.resolution_size, self.resolution_size, len(self.bands)))
for i, b in enumerate(self.bands):
b_obj = fp.select('BAND%02i' % b)
attrs = b_obj.attributes()
scale_factor = attrs['Scale_Factor']
#fill_value = attrs['_FillValue'] ## this fill value seems to be wrong in l1g
fill_value = 32768.
arr = b_obj.get()[:].astype(np.float32)
offset = attrs['Offset_Constant']
arr[arr == fill_value] = np.nan
arr *= scale_factor
arr += offset
if arr.shape[0] != self.resolution_size:
arr = ndimage.interpolation.zoom(arr, self.resolution_size/arr.shape[0], order=1)
data_array[:,:,i] = arr
#self.data_array = data_array
return data_array
def solar(self):
fp = SD(self.file, SDC.READ)
sa = fp.select('Solar_Azimuth').get()[:]
sz = fp.select('Solar_Zenith').get()[:]
sa = ndimage.interpolation.zoom(sa, self.resolution_size/sa.shape[0], order=1)
sz = ndimage.interpolation.zoom(sz, self.resolution_size/sz.shape[0], order=1)
return sa*0.01, sz*0.01
def load_xarray(self):
data = self.load()
fp = SD(self.file, SDC.READ)
tile_path = os.path.join("/".join(self.file.split("/")[:-3]), 'constant')
tile_file = glob.glob(tile_path + '/*.hdf')[0]
sat, sensor, date, time, _, tile, _ = os.path.basename(self.file).replace(".hdf","").split("_")
lats, lons = get_tile_coords(tile, self.resolution_km)
sa, sz = self.solar()
#print(tile, 'lats', len(lats))
da = xr.DataArray(data, dims=('lat', 'lon', 'band'), coords=dict(lat=lats, lon=lons, band=self.bands))
zenith = xr.DataArray(sz, dims=('lat', 'lon'), coords=dict(lat=lats, lon=lons))
azimuth = xr.DataArray(sa, dims=('lat', 'lon'), coords=dict(lat=lats, lon=lons))
return xr.Dataset({'data': da, 'zenith': zenith, 'azimuth': azimuth})
class GeoNEXL1G(object):
'''
Get information on L1G data directory, available tiles, years, and files
file lists are locally cached to future reading as retrieving file lists
can be time consuming.
Args:
data_directory: directory of the L1G product
sensor: (G16,G17,H8)
/nex/datapool/geonex/public/
'''
def __init__(self, data_directory, sensor):
self.data_directory = data_directory
self.sensor = sensor
self.sat = os.path.basename(os.path.dirname(os.path.dirname(data_directory)))
def tiles(self):
tile_pattern = os.path.join(self.data_directory, 'h*v*')
tile_folders = glob.glob(tile_pattern)
tiles = [os.path.basename(t) for t in tile_folders]
return tiles
def years(self):
tile = self.tiles()[0]
years = os.listdir(os.path.join(self.data_directory, tile))
years = [int(y) for y in years if y[0] == '2']
return years
def hours(self):
return list(range(0,24))
def files(self, tile=None, year=None, dayofyear=None, cachedir='.tmp'):
'''
Args:
tile (optional): Tile from GeoNEX grid
year (optional): Year of files to get
dayofyear (optional): Day of year
cachedir (optional): Cache filelist in directory
Returns:
pd.DataFrame of filelist with year, dayofyear, hour, minute, tile, file, h, and v
'''
if tile == None:
tile = '*'
if year == None:
year = '*'
else:
year = str(year)
if dayofyear == None:
dayofyear = '*'
else:
dayofyear = '%03i' % dayofyear
cache_file = f'{cachedir}/filelist/{self.sat}_{self.sensor}_{tile}_{year}_{dayofyear}.pkl'
if os.path.exists(cache_file):
return | pd.read_pickle(cache_file) | pandas.read_pickle |
"""DataFrame loaders from different sources for the AccountStatements init."""
import pandas as pd
import openpyxl as excel
def _prepare_df(transactions_df):
"""Cast the string columns into the right type
Parameters
----------
transactions_df : DataFrame
The DataFrame where doing the casting
Returns
---------
DataFrame
"""
# Converte le date in datetime
transactions_df['Data valuta'] = pd.to_datetime(transactions_df['Data valuta'],format='%d/%m/%Y')
transactions_df['Data contabile'] = pd.to_datetime(transactions_df['Data contabile'],format='%d/%m/%Y')
# Converte l'importo in numero
importo_series = transactions_df['Importo'].str.replace('.','')
importo_series = importo_series.str.extract('([-]*\d+,\d+)')[0]
importo_series = importo_series.str.replace(',','.')
transactions_df['Importo'] = | pd.to_numeric(importo_series) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning Engineer Nano Degree - Capstone Project
# ## Student: <NAME>
# ## January 08, 2017
#
# ## Overview
#
# This project started as a work project that I performed for my professional career. The original project was used to identify false/positive readings from web sources that were marked as external attack points against our public web site. After completing the project successfully, my thoughts were that it would be perfect Capstone project for the Machine Learning Engineer Nanodegree program. It encompassed everything I had learned in the course and was applied in an actual environment with very accurate results. However, my employer is very wary of data (even anonymized data) from being used outside the company. My solution to this was to utilize the same process, however change the data source and goal of the project. For this I turned to a Kaggle dataset.
#
# After searching the Kaggle available datasets, I decided to use one focused on lower back pain symptoms. This dataset offered me a real-world problem that needed solving, with enough information to exercise the process I used for my current job. I have always felt the medical community could benefit from either machine learning technologies or expert systems. This project will offer me the capability to see how accurate I can build a lower back diagnostic classifier, if all I had available to me were the data attributes.
#
# The medical field is comprised of individual doctors and physicians that are tasked with diagnosing patient’s illnesses based on patient symptoms and test results. By using machine learning, these healthcare professionals can be assisted in their diagnosis by using patient data to predict the causes of the patient's symptoms.
# This project is designed to provide a methodology of taking raw data and transforming it into a classification tool that can be used to diagnose patients with normal or abnormal spinal conditions. The purpose of this project is to assume raw unclassified data is available and, through the process laid out here, derive classification for each data set through data grouping and random sampling.
#
# A Kaggle dataset will be used for the feature and classification data. Although the data contains a classification attribute, this will only be used to identify clusters of data groups taken from random samples.
# The goal of this project is to generate a training data set using clustering techniques to separate records into Normal and Abnormal classes. Once the training data is created, a classification model is built and then scored against how well the classifier predicts the original classification of each record.
# My goal is simple, determine how close to some of the Kaggle reports I can get a classifier to predict. The biggest difference is that I will not use the dataset directly, but will use unsupervised learning to determine classification attribute. In the end, I will use an accuracy score to compare my classifier against the actual dataset classification.
#
# For reference, here are some Kaggle entries and their scores:
#
#
# __Kaggle Reference | Score Method | Score__
# ***
# - [Discriminating Normal-vs-Abnormal Spines](https://www.kaggle.com/antonio00/d/sammy123/lower-back-pain-symptoms-dataset/discriminating-normal-vs-abnormal-spines/discussion) | Specificity | 54%
# - [Using k-NN to Predict Abnormal Spines](https://www.kaggle.com/sammy123/lower-back-pain-symptoms-dataset/discussion/24230) | Accuracy | 93%
# ## Process
#
# The process used for this project is pretty straigt forward. It will take on the following steps:
#
# - Load and display the original dataset
# - Remove and segregate classification information
# - Remove less important features
# - Scale and Transform the feature data
# - Use Unsupervised Learning to classify the data
# - Peform PCA to display UL classification information
# - Simulate cluster sample evaluation to identify cluster types (Normal/Abnormal)
# - Generate training dataset by classifying data using cluster analysis
# - Determine a suitable classification model to be used
# - Generate model using final training dataset
# - Evaluate model classification against original classification attribute for accuracy
# ## Data source
#
# The data for this project has been downloaded from Kaggle and is named the "Lower Back Pain Symptoms Dataset" (https://www.kaggle.com/sammy123/lower-back-pain-symptoms-dataset)
#
# The information is layed out with 12 different featurs, labeld Col1-Col12 (Attribute1-12) and a classification attribute of "Abnormal" or "Normal". The descriptions for each attribute are as follows:
#
# - Attribute1 = pelvic_incidence (numeric)
# - Attribute2 = pelvic_tilt (numeric)
# - Attribute3 = lumbar_lordosis_angle (numeric)
# - Attribute4 = sacral_slope (numeric)
# - Attribute5 = pelvic_radius (numeric)
# - Attribute6 = degree_spondylolisthesis (numeric)
# - Attribute7 = pelvic_slope(numeric)
# - Attribute8 = direct_tilt(numeric)
# - Attribute9 = thoracic_slope(numeric)
# - Attribute10 = cervical_tilt(numeric)
# - Attribute11 = sacrum_angle(numeric)
# - Attribute12 = scoliosis_slope(numeric)
#
# Although not necessary, for this project the feature names will be replaced with their descriptions to allow for easier feature analsyis.
#
# The classification attribute will be used to classify clusters or groups of data items with similar features and will not be directly used in constructing a classifier for the dataset. The origninal classificatin attribute will be used to evaluate the final classification model.
# ## Data Gathering
# ### Data Import and Display
# In[ ]:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Show matplotlib plots inline (nicely formatted in the notebook)
input_file = "../../../input/sammy123_lower-back-pain-symptoms-dataset/Dataset_spine.csv"
input_data = | pd.read_csv(input_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = | DataFrame(data_timedelta64) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
#rom scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
import gc
#import weakref
import copy
import itertools
import calendar
#from ..cross import DecisionTree, Crosses
import networkx as nx
from operator import itemgetter
import matplotlib.ticker as mtick
try:
import fastcluster
except Exception:
print('For fullness analysis using hierarchical clustering please install fastcluster package.')
from scipy.cluster.hierarchy import fcluster
try:
import hdbscan
except Exception:
print('For fullness analysis using HDBSCAN clustering please install hdbscan package.')
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
from os import system
from IPython.display import Image as Display_Image
#from joblib import Parallel, delayed
# Created by <NAME> and <NAME>
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class Processor(metaclass = ABCMeta):
"""
Base class for processing objects of Data class
"""
@abstractmethod
def __init__(self):
'''
self.stats is a DataFrame with statistics about self.work()
'''
self.stats = pd.DataFrame()
@abstractmethod
def work(self, data, parameters):
pass
def param_dict_to_stats(self, data, d):
'''
TECH
Transforms a dict of parameters to self.stats
Parameters
-----------
data: Data object being processed
d: dictionary {action : list_of_features} where action is a string with action description and list_of_features is a list of features' names to apply the action to
'''
col1 = []
col2 = []
for (how, features) in d.items():
col1 = col1 + [how + ' (' + str(round(data.dataframe[features[i]].mean(), 3)) + ')' if how == 'mean' else how for i in range(len(features))]
col2 = col2 + features
self.stats = pd.DataFrame({'action' : col1, 'features': col2})
#---------------------------------------------------------------
class MissingProcessor(Processor):
'''
Class for missing values processing
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, parameters, quantiles=100, precision=4):
'''
Deals with missing values
Parameters:
-----------
data: an object of Data type that should be processed
inplace: whether to change the data or to create a new Data object
parameters: {how_to_process : features_to_process}
how_to_process takes:
'delete' - to delete samples where the value of any feature from features_to_process is missing
'mean' - for each feature from features_to_process to fill missings with the mean value
'distribution' - for each feature from features_to_process to fill missings according to non-missing distribution
a value - for each feature from features_to_process to fill missings with this value
features_to_process takes list of features from data
quantiles: number of quantiles for 'distribution' type of missing process - all values are divided into quantiles,
then missing values are filled with average values of quantiles. If number of unique values is less then number of quantiles
or field type is not int, float, etc, then no quantiles are calculated - missings are filled with existing values according
to their frequency
precision: precision for quantile edges and average quantile values
Returns:
----------
A copy of data with missings processed for features mentioned in parameters
'''
for how in parameters:
if isinstance(parameters[how], str):
parameters[how] = [parameters[how]]
result = data.dataframe.copy()
for how in parameters:
if how == 'delete':
for feature in parameters[how]:
result = result[result[feature].isnull() == False]
if data.features != None and feature in data.features:
data.features.remove(feature)
elif how == 'mean':
for feature in parameters[how]:
result[feature].fillna(result[feature].mean(), inplace = True)
elif how == 'distribution':
for feature in parameters[how]:
if data.dataframe[feature].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[feature].unique().shape[0]<quantiles:
summarized=data.dataframe[[feature]].dropna().groupby(feature).size()
summarized=summarized.reset_index().rename({feature:'mean', 0:'size'}, axis=1)
else:
summarized=data.dataframe[[feature]].rename({feature:'value'}, axis=1).join(pd.qcut(data.dataframe[feature].dropna(), q=quantiles, precision=4, duplicates='drop')).groupby(feature).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index(drop=True)
#summarized=summarized.reset_index()
summarized['p']=summarized['size']/summarized['size'].sum()
result[feature]=result[feature].apply(lambda x: np.random.choice(summarized['mean'].round(precision), p=summarized['p']) if pd.isnull(x) else x)
else:
result[parameters[how]] = result[parameters[how]].fillna(how)
# statistics added on Dec-04-2018
self.param_dict_to_stats(data, parameters)
return Data(result, data.target, data.features, data.weights, data.name)
#---------------------------------------------------------------
class StabilityAnalyzer(Processor):
'''
For stability analysis
'''
def __init__(self):
self.stats = pd.DataFrame({'sample_name' : [], 'parameter' : [], 'meaning': []})
def work(self, data, time_column, sample_name = None, psi = None, event_rate=None, normalized=True, date_format = "%d.%m.%Y", time_func = (lambda x: 100*x.year + x.month),
yellow_zone = 0.1, red_zone = 0.25, figsize = None, out = True, out_images = 'StabilityAnalyzer/', sep=';', base_period_index=0):
'''
Calculates the dynamic of feature (or groups of values) changes over time so it should be used only for discrete or WOE-transformed
features. There are 2 types of analysis:
PSI. Represents a heatmap (Stability Table) of features stability that contains 3 main zones: green (the feature is
stable), yellow (the feature is not very stable) and red (the feature is unstable). StabilityIndex (PSI) is calculated for each
time period relatively to the first period.
Stability index algorithm:
For each feature value and time period number of samples is calculated: e.g., N[i, t] is number of samples for value i and time period t.
StabilityIndex[t] = (N[i, t]/sum_i(N[i, t]) - (N[i, 0]/sum_i(N[i, 0])))* log(N[i, t]/sum_i(N[i, t])/(N[i, 0]/sum_i(N[i, 0])))
ER (event rate). Calculates average event rate and number of observations for each feature's value over time.
After calculation displays the Stability Table (a heatmap with stability indexes for each feature value and time period)
and Event rate graphs
Parameters:
-----------
data: data to analyze (type Data)
time_column: name of a column with time values to calculate time periods
sample_name: name of sample for report
psi: list of features for PSI analysis (if None then all features from the input Data object will be used)
event_rate: list of features for event rate and distribution in time analysis (if None then all features from the input Data object will be used)
date_format: format of time values in time_column. Codes for format:
%a Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
%A Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
%w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, …, 6
%d Day of the month as a zero-padded decimal number. 01, 02, …, 31
%b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
%B Month as locale’s full name. January, February, …, December (en_US)
%m Month as a zero-padded decimal number. 01, 02, …, 12
%y Year without century as a zero-padded decimal number. 00, 01, …, 99
%Y Year with century as a decimal number. 1970, 1988, 2001, 2013
%H Hour (24-hour clock) as a zero-padded decimal number. 00, 01, …, 23
%I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, …, 12
%p Locale’s equivalent of either AM or PM. AM, PM (en_US)
%M Minute as a zero-padded decimal number. 00, 01, …, 59
%S Second as a zero-padded decimal number. 00, 01, …, 59
%f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
%z UTC offset in the form +HHMM or -HHMM (empty string if the the
object is naive). (empty), +0000, -0400, +1030
%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST
%j Day of the year as a zero-padded decimal number. 001, 002, …, 366
%U Week number of the year (Sunday as the first day of the week)
as a zero padded decimal number. All days in a new year preceding
the first Sunday are considered to be in week 0. 00, 01, …, 53 (6)
%W Week number of the year (Monday as the first day of the week) as
a decimal number. All days in a new year preceding the first
Monday are considered to be in week 0. 00, 01, …, 53 (6)
%c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
%x Locale’s appropriate date representation. 08/16/88 (None); 08/16/1988 (en_US)
%X Locale’s appropriate time representation. 21:30:00 (en_US)
time_func: function for time_column parsing (changes date to some value, representing time period) or
a period type for dt.to_period() function. Codes for available periods:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
yellow_zone: the lower border for the yellow stability zone ('not very stable') in percents of derivation
red_zone: the lower border for the red stability zone ('unstable') in percents of derivation
figsize: matplotlib figsize of the Stability Table
out: a boolean for image output or a path for xlsx output file to export the Stability Tables
out_images: a path for image output (default - StabilityAnalyzer/)
sep: the separator to be used in case of csv export
base_period_index: index of period (starting from 0) for other periods to compare with (0 for the first, -1 for the last)
'''
print('Warning: only for discrete features!!!')
if sample_name is None:
if pd.isnull(data.name):
sample_name = 'sample'
else:
sample_name = data.name
out_images = out_images + sample_name + '/'
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out'], 'meaning' : [out]}))
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out_images'], 'meaning' : [out_images]}))
psi = data.features.copy() if psi is None else [x for x in psi if x in data.features]
event_rate = data.features.copy() if event_rate is None else [x for x in event_rate if x in data.features]
all_features=list(set(psi+event_rate))
if figsize is None:
figsize=(12, max(1,round(len(psi)/2,0)))
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
writer = pd.ExcelWriter(out, engine='openpyxl')
tmp_dataset = data.dataframe[all_features + [time_column, data.target] + ([] if data.weights is None else [data.weights])].copy()
tmp_dataset[time_column] = pd.to_datetime(tmp_dataset[time_column], format=date_format, errors='coerce')
if callable(time_func):
tmp_dataset['tt'] = tmp_dataset[time_column].map(time_func)
elif isinstance(time_func, str):
try:
tmp_dataset['tt'] = tmp_dataset[time_column].dt.to_period(time_func).astype(str)
except Exception:
print('No function or correct period code was provided. Return None.')
return None
c = 0
for feature in sorted(all_features):
print (feature)
if data.weights is not None:
feature_stats=tmp_dataset[[feature, 'tt', data.target, data.weights]]
feature_stats['---weight---']=feature_stats[data.weights]
else:
feature_stats=tmp_dataset[[feature, 'tt', data.target]]
feature_stats['---weight---']=1
feature_stats[data.target]=feature_stats[data.target]*feature_stats['---weight---']
feature_stats=feature_stats[[feature, 'tt', data.target, '---weight---']].groupby([feature, 'tt'], as_index=False).\
agg({'---weight---':'size', data.target:'mean'}).rename({feature:'value', '---weight---':'size', data.target:'mean'}, axis=1)
feature_stats['feature']=feature
if c == 0:
all_stats = feature_stats
c = c+1
else:
all_stats = all_stats.append(feature_stats, ignore_index=True)
all_stats['size']=all_stats['size'].astype(float)
all_stats['mean']=all_stats['mean'].astype(float)
if len(psi)>0:
stability1=all_stats[all_stats.feature.isin(psi)][['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
stability1.columns.name=None
#display(stability1)
dates = stability1.drop(['feature', 'value'], 1).columns.copy()
stability2 = stability1[['feature', 'value']].copy()
for date in dates:
stability2[date] = list(stability1[date]/list(stability1.drop(['value'], 1).groupby(by = 'feature').sum()[date][:1])[0])
#display(stability2)
start_date = dates[base_period_index]
stability3 = stability2[['feature', 'value']]
for date in dates:
stability3[date] = round(((stability2[date]-stability2[start_date])*np.log(stability2[date]/stability2[start_date])).fillna(0), 2).replace([])
#display(stability3)
stability4 = stability3.drop(['value'], 1).groupby(by = 'feature').sum()
#display(stability4)
fig, ax = plt.subplots(figsize = figsize)
ax.set_facecolor("red")
sns.heatmap(stability4, ax=ax, yticklabels=stability4.index, annot = True, cmap = 'RdYlGn_r', center = yellow_zone, vmax = red_zone, linewidths = .05, xticklabels = True)
if out==True or isinstance(out, str):
plt.savefig(out_images+"stability.png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
stability4.style.apply(color_background,
mn=0, mx=red_zone, cntr=yellow_zone).to_excel(writer, engine='openpyxl', sheet_name='PSI')
worksheet = writer.sheets['PSI']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['B2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if len(event_rate)>0:
for_event_rate=all_stats[all_stats['feature'].isin(event_rate)]
date_base=pd.DataFrame(all_stats['tt'].unique(), columns=['tt']).sort_values('tt')
for feature in sorted(for_event_rate['feature'].unique()):
cur_feature_data=for_event_rate[for_event_rate['feature']==feature].copy()
#display(cur_feature_data)
if normalized:
for tt in sorted(cur_feature_data['tt'].unique(), reverse=True):
cur_feature_data.loc[cur_feature_data['tt']==tt, 'percent']=cur_feature_data[cur_feature_data['tt']==tt]['size']/cur_feature_data[cur_feature_data['tt']==tt]['size'].sum()
#display(cur_feature_data)
fig, ax = plt.subplots(1,1, figsize=(15, 5))
ax2 = ax.twinx()
ax.grid(False)
ax2.grid(False)
sorted_values=sorted(cur_feature_data['value'].unique(), reverse=True)
for value in sorted_values:
to_visualize='percent' if normalized else 'size'
value_filter = (cur_feature_data['value']==value)
er=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')['mean']
height=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')[to_visualize].fillna(0)
bottom=date_base.merge(cur_feature_data[['tt',to_visualize]][cur_feature_data['value']>value].groupby('tt', as_index=False).sum(), on='tt', how='left')[to_visualize].fillna(0)
ax.bar(range(date_base.shape[0]), height, bottom=bottom if value!=sorted_values[0] else None, edgecolor='white', alpha=0.3)
ax2.plot(range(date_base.shape[0]), er, label=str(round(value,3)), linewidth=2)
plt.xticks(range(date_base.shape[0]), date_base['tt'])
fig.autofmt_xdate()
ax2.set_ylabel('Event Rate')
ax2.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
ax2.annotate('Obs:', xy=(0, 1), xycoords=('axes fraction', 'axes fraction'), xytext=(-25, 5), textcoords='offset pixels', color='blue', size=11)
for i in range(date_base.shape[0]):
ax2.annotate(str(int(cur_feature_data[cur_feature_data['tt']==date_base['tt'][i]]['size'].sum())),
xy=(i, 1),
xycoords=('data', 'axes fraction'),
xytext=(0, 5),
textcoords='offset pixels',
#rotation=60,
ha='center',
#va='bottom',
color='blue',
size=11)
ax.set_ylabel('Total obs')
plt.xlabel(time_column)
plt.suptitle(feature + ' event rate in time' if callable(time_func) else feature + ' event rate in time, period = '+time_func)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1], loc=0, fancybox=True, framealpha=0.3)
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
event_rate_df=all_stats[['feature', 'value', 'tt', 'mean']].pivot_table(values='mean', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
event_rate_df.columns.name=None
event_rate_df.style.apply(color_background,
mn=0, mx=all_stats['mean'].mean()+2*all_stats['mean'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn_r, subset=pd.IndexSlice[:, [x for x in event_rate_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Event Rate', index=False)
worksheet = writer.sheets['Event Rate']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
if x[0].column!='B':
for cell in worksheet[x[0].column]:
if cell.row!=1:
cell.number_format = '0.000%'
worksheet.freeze_panes = worksheet['C2']
size_df=all_stats[['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
size_df.columns.name=None
size_df.style.apply(color_background,
mn=0, mx=all_stats['size'].mean()+2*all_stats['size'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn, subset=pd.IndexSlice[:, [x for x in size_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Observations', index=False)
worksheet = writer.sheets['Observations']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if isinstance(out, str):
writer.close()
#---------------------------------------------------------------
class DataVisualizer(Processor):
'''
Supports different types of data visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, distribution = True, factorplot = True, factorplot_separate = False, pairplot = None,
out=False, out_images='DataVisualizer/', plot_cells=20, categorical=None):
'''
Produces distribution plot, factorplot, pairplot
Parameters:
-----------
data: data to visualize
distribution: parameter for a distribution plot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use distribution plot
factorplot: parameter for a factorplot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use factorplot
factorplot_separate: if True then separate plots for each target value
pairplot: list of features to make a pairplot for
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - DataVisualizer/)
plot_cells: how many cells would plots get in output excel
categorical: a list of features to be treated as categorical (countplots will be produced instead of distplots)
'''
if pairplot is None:
pairplot=[]
if categorical is None:
categorical=[]
dataframe_t = data.dataframe[data.features + [data.target]].copy()
data = Data(dataframe_t, features = data.features, target = data.target)
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Data Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_plot_number=0
if distribution:
print ('Distributions of features: ')
if type(distribution) == type([1, 1]):
features = distribution
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
for feature in features:
current_plot_number=current_plot_number+1
if data.dataframe[feature].dtype==object or feature in categorical:
f, axes = plt.subplots()
sns.countplot(data.dataframe[feature].dropna())
f.autofmt_xdate()
else:
sns.distplot(data.dataframe[feature].dropna())
if data.dataframe[feature].isnull().any():
plt.title(feature+' (miss = ' + str(round(data.dataframe[feature].isnull().value_counts()[True]/data.dataframe.shape[0],3))+')')
else:
plt.title(feature+' (miss = 0)')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_d.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_d.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Distribution plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_d.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if factorplot:
print ('Factorplot: ')
if type(factorplot) == type([1, 1]):
features = factorplot
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
if factorplot_separate:
for feature in features:
current_plot_number=current_plot_number+1
# edited 21-Jun-2018 by <NAME>
f, axes = plt.subplots(data.dataframe[data.target].drop_duplicates().shape[0], 1, figsize=(4, 4), sharex=True)
f.autofmt_xdate()
#for target_v in data.dataframe[data.target].drop_duplicates():
targets = list(data.dataframe[data.target].drop_duplicates())
for target_i in range(len(targets)):
if data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().any():
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = ' + str(round(data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().value_counts()[True]/data.dataframe[data.dataframe[data.target]==targets[target_i]].shape[0],3))
else:
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = 0'
if data.dataframe[feature].dtype==object or feature in categorical:
ax=sns.countplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i], color = 'm')
ax.set(xlabel=x_label)
else:
sns.distplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i],
axlabel=x_label, color = 'm')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
else:
for feature in features:
current_plot_number=current_plot_number+1
sns.factorplot(x=feature, hue = data.target, data = data.dataframe, kind='count', palette = 'Set1')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if pairplot != []:
current_plot_number=current_plot_number+1
print ('Pairplot')
sns.pairplot(data.dataframe[pairplot].dropna())
if out==True or isinstance(out, str):
plt.savefig(out_images+"pairplot.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Pair plot for '+str(pairplot)+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+"pairplot.png")
plt.show()
if isinstance(out, str):
workbook.close()
#---------------------------------------------------------------
class TargetTrendVisualizer(Processor):
'''
Supports target trend visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, features=None, quantiles=100, magnify_trend=False, magnify_std_number=2, hide_every_even_tick_from=50,
min_size=10, out=False, out_images='TargetTrendVisualizer/', plot_cells=20):
'''
Calculates specified quantiles/takes categories, calculates target rates and sizes, then draws target trends
Parameters:
-----------
data: an object of Data type
features: the list of features to visualize, can be omitted
quantiles: number of quantiles to cut feature values on
magnify_trend: if True, then axis scale for target rate will be corrected to exclude outliers
magnify_std_number: how many standard deviations should be included in magnified scale
hide_every_even_tick_from: if there is too many quantiles then every second tick on x axis will be hidden
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - TargetTrendVisualizer/)
plot_cells: how many cells would plots get in output excel
'''
if features is None:
cycle_features=data.features.copy()
else:
cycle_features=features.copy()
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Target Trend Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_feature_number=0
for f in cycle_features:
if f not in data.dataframe:
print('Feature', f, 'not in input dataframe. Skipping..')
else:
print('Processing', f,'..')
current_feature_number=current_feature_number+1
if data.dataframe[f].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[f].unique().shape[0]<quantiles:
summarized=data.dataframe[[f, data.target]].groupby([f]).agg(['mean', 'size'])
else:
if data.dataframe[f].dropna().shape[0]<min_size*quantiles:
current_quantiles=int(data.dataframe[f].dropna().shape[0]/min_size)
if current_quantiles==0:
print('The number of non-missing observations is less then', min_size,'. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'The number of non-missing observations is less then '+str(min_size)+'. No trend to visualize.')
continue
else:
print('Too few non-missing observations for', quantiles, 'quantiles. Calculating', current_quantiles, 'quantiles..')
else:
current_quantiles=quantiles
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
small_quantiles=summarized[data.target][summarized[data.target]['size']<min_size]['size']
#display(small_quantiles)
if small_quantiles.shape[0]>0:
current_quantiles=int(small_quantiles.sum()/min_size)+summarized[data.target][summarized[data.target]['size']>=min_size].shape[0]
print('There are quantiles with size less then', min_size,'. Attempting', current_quantiles, 'quantiles..')
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index()
if pd.isnull(data.dataframe[f]).any():
with_na=data.dataframe[[f,data.target]][pd.isnull(data.dataframe[f])]
summarized.loc[-1]=[np.nan, with_na[data.target].mean(), with_na.shape[0]]
summarized=summarized.sort_index().reset_index(drop=True)
if summarized.shape[0]==1:
print('Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
continue
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
ax.set_ylabel('Observations')
# blue is for the distribution
if summarized.shape[0]>hide_every_even_tick_from:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=60, ha="right")
xticks = ax.xaxis.get_major_ticks()
for i in range(len(xticks)):
if i%2==0:
xticks[i].label1.set_visible(False)
else:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=45, ha="right")
ax.bar(range(summarized.shape[0]), summarized['size'], zorder=0, alpha=0.3)
ax.grid(False)
ax.grid(axis='y', zorder=1, alpha=0.6)
ax2 = ax.twinx()
ax2.set_ylabel('Target Rate')
ax2.grid(False)
#display(summarized)
if magnify_trend:
ax2.set_ylim([0, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))])
for i in range(len(summarized['mean'])):
if summarized['mean'][i]>np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size'])):
ax2.annotate(str(round(summarized['mean'][i],4)),
xy=(i, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
xytext=(i, np.average(summarized['mean'], weights=summarized['size'])+(magnify_std_number+0.05)*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
rotation=60,
ha='left',
va='bottom',
color='red',
size=8.5
)
# red is for the target rate values
ax2.plot(range(summarized.shape[0]), summarized['mean'], 'ro-', linewidth=2.0, zorder=4)
if out==True or isinstance(out, str):
plt.savefig(out_images+f+".png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+f+".png").size[1]
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.insert_image((current_feature_number-1)*(plot_cells+1)+1, 0, out_images+f+".png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
if isinstance(out, str):
workbook.close()
class CorrelationAnalyzer(Processor):
'''
Produces correlation analysis
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = True, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
drop_with_most_correlations=True, verbose=False, out_before=None, out_after=None, sep=';', cdict = None):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features.
For each highly correlated pair the algorithm chooses the less significant feature and adds it to the delete list.
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples, train sample will be checked)
drop_features: permission to delete correlated features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
drop_with_most_correlations: should the features with the highest number of correlations be excluded first (otherwise just with any number of correlations and the lowest gini)
verbose: flag for detailed output
out_before: file name for export of correlation table before feature exclusion (.csv and .xlsx types are supported)
out_after: file name for export of correlation table after feature exclusion (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
--------
Resulting Data or DataSamples object and the correlation table
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'method' : [method], 'out_before' : out_before, 'out_after' : out_after})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features == [] or features is None:
candidates = sample.features.copy()
else:
candidates = features.copy()
features_to_drop = []
correlations = sample.dataframe[candidates].corr(method = method)
cor_out=correlations.copy()
if cdict is None:
cdict = {'red' : ((0.0, 0.9, 0.9),
(0.5, 0.05, 0.05),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.8, 0.8),
(1.0, 0.0, 0.0)),
'blue' : ((0.0, 0.1, 0.1),
(0.5, 0.1, 0.1),
(1.0, 0.1, 0.1))}
#edited 21.08.2018 by <NAME> - added verbose variant, optimized feature dropping
# edited on Dec-06-18 by <NAME>: added png
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
if out_before is not None:
out_before_png = 'corr_before.png'
if out_before[-4:]=='.csv':
draw_corr.round(2).to_csv(out_before, sep = sep)
out_before_png = out_before[:-4] + '.png'
elif out_before[-5:]=='.xlsx' or out_before[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_before, engine='openpyxl', sheet_name='Correlation (before)')
out_before_png = out_before[:-5] + '.png' if out_before[-5:]=='.xlsx' else out_before[:-4] + '.png'
elif out_before[-4:]=='.png':
out_before_png = out_before
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
fig_before = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_before.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_before.savefig(out_before_png, bbox_inches='tight')
plt.close()
self.stats['out_before'] = out_before_png
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
to_check_correlation=True
while to_check_correlation:
to_check_correlation=False
corr_number={}
significantly_correlated={}
for var in correlations:
var_corr=correlations[var].apply(lambda x: abs(x))
var_corr=var_corr[(var_corr.index!=var) & (var_corr>threshold)].sort_values(ascending=False).copy()
corr_number[var]=var_corr.shape[0]
significantly_correlated[var]=str(var_corr.index.tolist())
if drop_with_most_correlations:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]==max({x:corr_number[x] for x in corr_number if x not in features_to_leave}.values()) and corr_number[x]>0 and x not in features_to_leave}
else:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]>0 and x not in features_to_leave}
if len(with_correlation)>0:
feature_to_drop=min(with_correlation, key=with_correlation.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high correlation with features: %(f)s (Gini=%(g)0.2f)' % {'v':feature_to_drop, 'f':significantly_correlated[feature_to_drop], 'g':with_correlation[feature_to_drop]})
correlations=correlations.drop(feature_to_drop,axis=1).drop(feature_to_drop,axis=0).copy()
to_check_correlation=True
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
out_after_png = 'corr_after.png'
if out_after is not None:
if out_after[-4:]=='.csv':
draw_corr.round(2).to_csv(out_after, sep = sep)
out_after_png = out_after[:-4] + '.png'
elif out_after[-5:]=='.xlsx' or out_after[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_after, engine='openpyxl', sheet_name='Correlation (after)')
out_after_png = out_after[:-5] + '.png' if out_after[-5:]=='.xlsx' else out_after[:-4] + '.png'
elif out_after[-4:]=='.png':
out_after_png = out_after
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
#sns.heatmap(draw_corr.round(2), annot = True, cmap = 'RdBu_r', cbar = False, center = 0).figure.savefig(out_after_png, bbox_inches='tight')
fig_after = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_after.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_after.savefig(out_after_png, bbox_inches='tight')
plt.close()
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
self.stats['out_after'] = out_after_png
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
if verbose:
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, cor_out
def find_correlated_groups(self, data, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
verbose=False, figsize=(12,12), corr_graph_type='connected'):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features and
returns groups of significantly correlated features
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples it's train sample will be checked)
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be included in analysis
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
verbose: flag for detailed output
figsize: the size of correlation connections graph (printed if verbose)
corr_graph_type: type of connectivity to persue in finding groups of correlated features
'connected' - groups are formed from features directly or indirectly connected by singnificant correlation
'complete' - groups are formed from features that are directly connected to each other by significant
correlation (each pair of features from a group will have a significant connection)
Returns
--------
a list of lists representing correlated group
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if features == [] or features is None:
candidates = [x for x in sample.features if x not in features_to_leave]
else:
candidates = [x for x in features if x not in features_to_leave]
correlations = sample.dataframe[candidates].corr(method = method)
if verbose:
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
display(draw_corr.round(2).style.applymap(color_digits,threshold_red=threshold))
G=nx.Graph()
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.loc[correlations.columns[i], correlations.columns[j]]>threshold:
G.add_nodes_from([correlations.columns[i], correlations.columns[j]])
G.add_edge(correlations.columns[i], correlations.columns[j], label=str(round(correlations.loc[correlations.columns[i], correlations.columns[j]],3)))
if verbose:
plt.figure(figsize=(figsize[0]*1.2, figsize[1]))
pos = nx.spring_layout(G, k=100)
edge_labels = nx.get_edge_attributes(G,'label')
nx.draw(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
plt.margins(x=0.2)
plt.show()
correlated_groups=[]
if corr_graph_type=='connected':
for x in nx.connected_components(G):
correlated_groups.append(sorted(list(x)))
elif corr_graph_type=='complete':
for x in nx.find_cliques(G):
correlated_groups.append(sorted(x))
else:
print('Unknown correlation graph type. Please use "connected" or "complete". Return None.')
return None
return correlated_groups
#---------------------------------------------------------------
class VIF(Processor):
'''
Calculates variance inflation factor for each feature
'''
def __init__(self):
self.stats = | pd.DataFrame() | pandas.DataFrame |
"""
database.py
Routines for managing a spectral line database.
TODO - set up routines for a persistent database
"""
import os
import warnings
try:
import tables
from tables import IsDescription, open_file
from tables import StringCol, Int64Col, Float64Col
except ImportError:
warnings.warn(f"PyTables is not installed correctly!")
import tinydb
from tinydb.middlewares import CachingMiddleware
from tinydb.storages import JSONStorage
import pandas as pd
from pyspectools import parsers
from pyspectools import spectra
class SpectralCatalog(tinydb.TinyDB):
"""
Grand unified experimental catalog. Stores assignment and uline information
across the board.
"""
def __init__(self, dbpath=None):
if dbpath is None:
dbpath = os.path.expanduser("~/.pyspectools/pyspec_experiment.db")
super().__init__(
dbpath,
sort_keys=True,
indent=4,
separators=(",", ": "),
storage=CachingMiddleware(JSONStorage),
)
def __exit__(self, exc_type, exc_value, traceback):
"""
Dunder method that should be called when the object is destroyed. This will make sure
the database is saved properly.
"""
self.close()
def add_entry(self, assignment, dup_check=True):
"""
This function adds an Transition object to an existing database. The method will
check for duplicates before adding.
Parameters
----------
assignment - Transition object
Reference to an Transition object
dup_check - bool, optional
If True (default), will check to make sure the Transition object doesn't already exist in
the database.
"""
add = False
if type(assignment) != dict:
new_entry = assignment.__dict__
else:
new_entry = assignment
if dup_check is True:
if any([new_entry == entry for entry in self.all()]) is False:
add = True
else:
warnings.warn("Entry already exists in database.")
else:
add = True
if add is True:
self.insert(new_entry)
def add_catalog(self, catalog_path, name, formula, **kwargs):
"""
Load a SPCAT catalog file into the database. Creates independent Transition objects
from each line of the catalog file. Kwargs are passed into the Transition object,
which will allow additional settings for the Transition object to be accessed.
:param catalog_path:
:param name:
:param formula:
:param kwargs:
:return:
"""
# check if the name and formula exists already
exist_df = self.search_molecule(name)
cat_df = parsers.parse_cat(catalog_path)
if exist_df is not None:
# drop all of the entries that are already in the catalog
exist_freqs = exist_df["frequency"].values
cat_df = cat_df.loc[~cat_df["Frequency"].isin(list(exist_freqs)),]
assign_dict = {"name": name, "formula": formula}
assign_dict.update(**kwargs)
# slice out only the relevant information from the dataframe
select_df = cat_df[["Frequency", "Intensity", "Lower state energy"]]
select_df.columns = ["catalog_frequency", "catalog_intensity", "ustate_energy"]
select_dict = select_df.to_dict(orient="records")
# update each line with the common data entries
assignments = [
spectra.assignment.Transition(**line, **assign_dict).__dict__
for line in select_dict
]
# Insert all of the documents en masse
self.insert_multiple(assignments)
def search_frequency(self, frequency, freq_prox=0.1, freq_abs=True, dataframe=True):
"""\
:param frequency: float, center frequency to search for in the database
:param freq_prox: float, search range tolerance. If freq_abs is True, the absolute value is used (in MHz).
Otherwise, freq_prox is a decimal percentage of the frequency.
:param freq_abs: bool, dictates whether the absolute value of freq_prox is used.
:return:
"""
frequency = float(frequency)
if freq_abs is True:
min_freq = frequency - freq_prox
max_freq = frequency + freq_prox
else:
min_freq = frequency * (1 - freq_prox)
max_freq = frequency * (1 + freq_prox)
Entry = tinydb.Query()
matches = self.search(
(Entry["frequency"] <= max_freq) & (min_freq <= Entry["frequency"])
| (Entry["catalog_frequency"] <= max_freq)
& (min_freq <= Entry["catalog_frequency"])
)
if len(matches) != 0:
if dataframe is True:
return | pd.DataFrame(matches) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
import pandas as pd
import argparse
import glob
from scipy.stats import ttest_ind
# %%
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
default="facilities"
)
parser.add_argument(
'--samplingStrategies',
help='List of sampling strategies by python representation',
choices=['uniform5', 'depthK'],
default="['uniform5', 'depthK']"
)
args = parser.parse_args()
# %%
resultDir = 'results'
problem = args.problem
targets = eval(args.samplingStrategies)
metric_columns = ['acc@1','acc@3','acc@5','acc@10']
# %%
samplingStragety1 = targets[0]
samplingStragety2 = targets[1]
targetfiles_strategy1 = glob.glob(f'{resultDir}/{problem}_{samplingStragety1}_*')
targetfile1 = targetfiles_strategy1[-1]
target_df1 = pd.read_csv(targetfile1)
mean1 = target_df1[metric_columns].mean()
std1 = target_df1[metric_columns].std()
targetfiles_strategy2 = glob.glob(f'{resultDir}/{problem}_{samplingStragety2}_*')
targetfile2 = targetfiles_strategy2[-1]
target_df2 = | pd.read_csv(targetfile2) | pandas.read_csv |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# for time measurement
from datetime import datetime
import re
import os
import pickle
import urllib.request
import xml.etree.ElementTree as ET
# OAI-PMH client library
from sickle import Sickle
# data science imports, the usual suspects
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# general configuration
# enables verbose output during processing
verbose = True
# override locally stored temporary files, re-download files etc.; should be True during first run
forceOverride = True
# static URL pattern for Stabi's digitized collection downloads
metaDataDownloadURLPrefix = "https://content.staatsbibliothek-berlin.de/dc/"
# Berlin State Library internal setting
runningFromWithinStabi = False
# error log file name
errorLogFileName = "oai-analyzer_error.log"
# analysis path prefix
analysisPrefix = "analysis/"
# temporary downloads prefix
tempDownloadPrefix = "oai-analyzer_downloads/"
# file where all retrieved PPNs will be saved to
ppnFileName = analysisPrefix + "ppn_list.log"
# file where all retrieved *ambiguous* PPNs will be saved to
ambiguousPPNFileName = analysisPrefix + "ppn_ambiguous_list.csv"
# True if downloaded METS/MODS documents have to be kept after processing
keepMETSMODS=False
# file path for metadata record pickle
metadataRecordPicklePath = "save_120k_dc_all.pickle"
# path to the DB file
sqlDBPath=analysisPrefix+"oai-analyzer.db"
# do not change the following values
# XML namespace of MODS
modsNamespace = "{http://www.loc.gov/mods/v3}"
def printLog(text):
now = str(datetime.now())
print("[" + now + "]\t" + text)
# forces to output the result of the print command immediately, see: http://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
sys.stdout.flush()
def isValidPPN(ppn):
rePattern = "^PPN\d+[0-9X]?"
p = re.compile(rePattern, re.IGNORECASE)
if p.match(ppn):
return True
else:
return False
def downloadMETSMODS(currentPPN):
"""
Tries to download a METS/MODS file associated with a given PPN.
ATTENTION! Should be surrounded by a try-catch statement as it does not handle network errors etc.
:param currentPPN: The PPN for which the METS/MODS file shall be retrieved.
:return: The path to the downloaded file.
"""
# download the METS/MODS file first in order to find the associated documents
currentDownloadURL = metaDataDownloadURLPrefix + currentPPN + ".mets.xml"
metsModsPath = tempDownloadPrefix + currentPPN + ".xml"
if runningFromWithinStabi:
proxy = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
urllib.request.urlretrieve(currentDownloadURL, metsModsPath)
return metsModsPath
def parseOriginInfo(child):
"""
Parses an originInfo node and its children
:param child: The originInfo child in the element tree.
:return: A dict with the parsed information or None if the originInfo is invalid.
"""
discardNode = True
result = dict()
result["publisher"] = ""
# check if we can directly process the node
if "eventType" in child.attrib:
if child.attrib["eventType"] == "publication":
discardNode = False
else:
# we have to check if the originInfo contains and edition node with "[Electronic ed.]" to discard the node
children = child.getchildren()
hasEdition = False
for c in children:
if c.tag == modsNamespace + "edition":
hasEdition = True
if c.text == "[Electronic ed.]":
discardNode = True
else:
discardNode = False
if not hasEdition:
discardNode = False
if discardNode:
return None
else:
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag == "place":
result["place"] = c.find("{http://www.loc.gov/mods/v3}placeTerm").text.strip()
if cleanedTag == "publisher":
result["publisher"] = c.text.strip()
# check for the most important date (see https://www.loc.gov/standards/mods/userguide/origininfo.html)
if "keyDate" in c.attrib:
result["date"] = c.text.strip()
return result
def parseTitleInfo(child):
result = dict()
result["title"]=""
result["subTitle"]=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
result[cleanedTag]=c.text.strip()
return result
def parseLanguage(child):
result = dict()
result["language"]=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag=="languageTerm":
result["language"]=c.text.strip()
return result
def parseName(child):
result=dict()
role=""
name=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag=="role":
for c2 in c.getchildren():
ct=c2.tag.replace(modsNamespace, "")
if ct=="roleTerm":
role=c2.text.strip()
elif cleanedTag=="displayForm":
name=c.text.strip()
result[role]=name
return result
def parseAccessCondition(child):
result = dict()
result["access"]=child.text.strip()
return result
def processMETSMODS(currentPPN, metsModsPath):
"""
Processes a given METS/MODS file.
:param currentPPN: the current PPN
:param metsModsPath: path to the METS/MODS file
:return: A dataframe with the parsing results.
"""
# parse the METS/MODS file
tree = ET.parse(metsModsPath)
root = tree.getroot()
# only process possibly interesting nodes, i.e.,
nodesOfInterest = ["originInfo", "titleInfo", "language", "name", "accessCondition"]
# stores result dicts created by various parsing function (see below)
resultDicts=[]
# master dictionary, later used for the creation of a dataframe
masterDict={'publisher':"",'place':"",'date':"",'title':"",'subTitle':"",'language':"",'aut':"",'rcp':"",'fnd':"",'access':"",'altoPaths':""}
# find all mods:mods nodes
for modsNode in root.iter(modsNamespace + 'mods'):
for child in modsNode:
# strip the namespace
cleanedTag = child.tag.replace(modsNamespace, "")
#print(cleanedTag)
#print(child)
if cleanedTag in nodesOfInterest:
if cleanedTag == "originInfo":
r = parseOriginInfo(child)
if r:
resultDicts.append(r)
elif cleanedTag=="titleInfo":
r = parseTitleInfo(child)
if r:
resultDicts.append(r)
elif cleanedTag=="language":
r = parseLanguage(child)
if r:
resultDicts.append(r)
elif cleanedTag=="name":
r = parseName(child)
if r:
resultDicts.append(r)
elif cleanedTag=="accessCondition":
r = parseAccessCondition(child)
if r:
resultDicts.append(r)
# we are only interested in the first occuring mods:mods node
break
# get all ALTO file references
altoHrefs=[]
for fileSec in root.iter('{http://www.loc.gov/METS/}fileSec'):
for child in fileSec.iter('{http://www.loc.gov/METS/}fileGrp'):
currentUse=child.attrib['USE']
for fileNode in child.iter('{http://www.loc.gov/METS/}file'):
if currentUse == 'FULLTEXT':
for fLocat in fileNode.iter('{http://www.loc.gov/METS/}FLocat'):
if (fLocat.attrib['LOCTYPE'] == 'URL'):
href = fLocat.attrib['{http://www.w3.org/1999/xlink}href']
altoHrefs.append(href)
r["altoPaths"]=";".join(altoHrefs)
resultDicts.append(r)
# copy results to the master dictionary
for result in resultDicts:
for key in result:
masterDict[key]=[result[key]]
masterDict["ppn"]=[currentPPN]
return pd.DataFrame(data=masterDict)
def convertSickleRecordsToDataFrame(sickleRecords):
availableKeys = dict()
# check for all keys present in the previously downloaded dataset
for i, r in enumerate(sickleRecords):
for k in r.keys():
if not k in availableKeys:
availableKeys[k] = 1
else:
availableKeys[k] = availableKeys[k] + 1
# print(availableKeys)
# create a dictionary for the records
values = dict()
# take the keys as they have found within the downloaded OAI records
keys = availableKeys.keys()
# for every metadata field, create an empty array as the content of the dictionary filed under the key 'k'
for k in keys:
values[k] = []
# in addition, store the PPN (the SBB's unique identifier for digitized content)
values["PPN"] = []
# under circumstances the identifier field of the DC records might be ambiguous, these records are listed here
ambiguousPPNRecords = []
# iterate over all saved records
for record in sickleRecords:
# we cannot iterate over the keys of record.metadata directly because not all records cotain the same fields,...
for k in keys:
# thus we check if the metadata field 'k' has been created above
if k in values:
# append the metadata fields to the dictionary created above
# if the metadata field 'k' is not available input "None" instead
if k in record:
value = record.get(k)[0]
if value:
if value.isdigit():
value = int(value)
else:
# p27 value=value.encode('ISO-8859-1')
# value = value.encode('ISO-8859-1').decode("utf-8", "backslashreplace")
pass
values[k].append(value)
# get the PPN and fix issues with it
if k == "identifier":
if len(record["identifier"]) > 1:
# sometimes there is more than one identifier provided
# check if it is a valid PPN
candidates = [str(record.get(k)[0]), str(record.get(k)[1])]
candidateIndex = 0
candidateCount = 0
i = 0
for c in candidates:
if c.startswith("PPN"):
candidateIndex = i
candidateCount += 1
else:
i += 1
ppn = str(record.get(k)[1])
if candidateCount >= 1:
# print("\tCANDIDATE CONFLICT SOLVED AS: " + candidates[candidateIndex])
# print("\t\t" + str(record.get(k)[0]))
# print("\t\t" + str(record.get(k)[1]))
ambiguousPPNRecords.append(candidates)
ppn = candidates[0]
else:
ppn = str(record.get(k)[0])
values["PPN"].append(ppn)
else:
values[k].append(np.nan)
# create a data frame
df = pd.DataFrame(values)
df['date'] = pd.to_numeric(df['date'], errors='ignore', downcast='integer')
return (df, ambiguousPPNRecords)
def createSupplementaryDirectories():
if not os.path.exists(analysisPrefix):
if verbose:
print("Creating " + analysisPrefix)
os.mkdir(analysisPrefix)
if not os.path.exists(tempDownloadPrefix):
if verbose:
print("Creating " + tempDownloadPrefix)
os.mkdir(tempDownloadPrefix)
if __name__ == "__main__":
# connect to a metadata repository
sickle = Sickle('https://oai.sbb.berlin/oai')
records = sickle.ListRecords(metadataPrefix='oai_dc', set='all')
createSupplementaryDirectories()
errorFile = open(errorLogFileName, "w")
savedRecords = []
# maximum number of downloaded records
# 2:15 h for 100k
maxDocs = 1000 # 100 is just for testing, for more interesting results increase this value to 1000. ATTENTION! this will also take more time for reading data.
if forceOverride:
printLog("Starting OAI record download...")
# initialize some variables for counting and saving the metadata records
savedDocs = 0
# save the records locally as we don't want to have to rely on a connection to the OAI-PMH server all the time
# iterate over all records until maxDocs is reached
# ATTENTION! if you re-run this cell, the contents of the savedRecords array will be altered!
try:
for record in records:
# check if we reach the maximum document value
if savedDocs < maxDocs:
savedDocs = savedDocs + 1
# save the current record to the "savedRecords" array
savedRecords.append(record.metadata)
if savedDocs % 1000 == 0:
printLog("Downloaded %d of %d records." % (savedDocs, maxDocs))
# if so, end the processing of the for-loop
else:
break # break ends the processing of the loop
except Exception as ex:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(ex).__name__, ex.args)
errorFile.write(message + "\n")
printLog("Finished OAI download of " + str(len(savedRecords)) + " records.")
pickle.dump(savedRecords, open(metadataRecordPicklePath, "wb"))
# if savedRecords is empty, we have to load the data from the file system
if not savedRecords:
if os.path.exists(metadataRecordPicklePath):
printLog("Restoring metadata records from " + metadataRecordPicklePath)
savedRecords = pickle.load(open(metadataRecordPicklePath, "rb"))
printLog("Done.")
else:
printLog("Could not depickle metadata records. Re-run with forceOverride option.")
results = convertSickleRecordsToDataFrame(savedRecords)
df = results[0]
ambiguousPPNs = results[1]
# save PPN list
df["PPN"].to_csv(ppnFileName, sep=';', index=False)
# test ambiguous PPNs and save results to a separate file
printLog("Testing ambiguous PPNs.")
ambigPPNFile = open(ambiguousPPNFileName, "w")
ambigPPNFile.write("PPN_1;RESULT_1;PPN_2;RESULT_2;COMMENTS\n")
for testPPNs in ambiguousPPNs:
line = ""
for ppn in testPPNs:
# could it be a PPN?
# if ppn.startswith("PPN"):
# line+=ppn+";"+"OK;"
# else:
# line += ppn + ";" + "NO!;"
line += ppn + ";" + str(isValidPPN(ppn)) + ";"
line += "\n"
ambigPPNFile.write(line)
ambigPPNFile.close()
# process all retrieved PPNs
ppns = df["PPN"].values.tolist()
#debug
#ppns = df["PPN"].values.tolist()[0:1000]
forceOverridePossible=False
if os.path.exists(analysisPrefix + "analyticaldf.xlsx"):
forceOverridePossible=True
if forceOverride:#and forceOverridePossible:
#if True:
printLog("Processing METS/MODS documents.")
resultDFs=[]
processedDocs=0
maxDocs=len(ppns)
for ppn in ppns:
currentMETSMODS = None
processedDocs+=1
if processedDocs % 1000 == 0:
printLog("\tProcessed %d of %d METS/MODS documents." % (processedDocs, maxDocs))
# debug
#tempDF=pd.concat(resultDFs, sort=False)
#tempDF.to_excel(analysisPrefix + "analyticaldf_TEMP.xlsx", index=False)
try:
# debug
#ppn="PPN74616453X"
currentMETSMODS = downloadMETSMODS(ppn)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(ex).__name__, ex.args)
errorFile.write(ppn + "\t" + message + "\n")
if currentMETSMODS:
currentDF=processMETSMODS(ppn, currentMETSMODS)
#debug
#currentDF.to_csv(analysisPrefix + "debug.csv",sep=';',index=False)
resultDFs.append(currentDF)
#raise (SystemExit)
if not keepMETSMODS:
os.remove(currentMETSMODS)
analyticalDF=pd.concat(resultDFs,sort=False)
# store the results permanently
analyticalDF.to_csv(analysisPrefix + "analyticaldf.csv",sep=';',index=False)
analyticalDF.to_excel(analysisPrefix + "analyticaldf.xlsx", index=False)
else:
printLog("Read METS/MODS analysis table from: "+analysisPrefix + "analyticaldf.xlsx")
analyticalDF=pd.read_excel(analysisPrefix + "analyticaldf.xlsx")
print(analyticalDF.columns)
ocrPPNs=[]
# read in OCR'ed PPNs
with open('../ppn_lists/media_with_ocr.csv') as f:
lines = f.readlines()
for line in lines:
line_split = line.split(' ')
ppn_cleaned = line_split[len(line_split) - 1].rstrip()
ocrPPNs.append(ppn_cleaned)
f.close()
# create a dataframe from the OCR PPN list
ocrDF=pd.DataFrame({"ppn":ocrPPNs})
# join the two dataframes to discover all documents that got OCR'ed
joinedDF= | pd.merge(analyticalDF,ocrDF,on='ppn') | pandas.merge |
import pull_mdsplus as pull
import pandas as pd
import numpy as np
import meas_locations as geo
import MDSplus as mds
import itertools
from scipy import interpolate
def load_gfile_mds(shot, time, tree="EFIT01", exact=False, connection=None, tunnel=True):
"""
This is scavenged from the load_gfile_d3d script on the EFIT repository,
except updated to run on python3.
shot: Shot to get gfile for.
time: Time of the shot to load gfile for, in ms.
tree: One of the EFIT trees to get the data from.
exact: If True will raise error if time does not exactly match any gfile
times. False will grab the closest time.
connection: An MDSplus connection to atlas.
tunnel: Set to True if accessing outside DIII-D network.
returns: The requested gfile as a dictionary.
"""
# Connect to server, open tree and go to g-file
if connection is None:
if tunnel is True:
connection = mds.Connection("localhost")
else:
connection = mds.Connection('atlas.gat.com')
connection.openTree(tree, shot)
base = 'RESULTS:GEQDSK:'
# get time slice
print("\nLoading gfile:")
print(" Shot: " + str(shot))
print(" Tree: " + tree)
print(" Time: " + str(time))
signal = 'GTIME'
k = np.argmin(np.abs(connection.get(base + signal).data() - time))
time0 = int(connection.get(base + signal).data()[k])
if (time != time0):
if exact:
raise RuntimeError(tree + ' does not exactly contain time %.2f' %time + ' -> Abort')
else:
print('Warning: ' + tree + ' does not exactly contain time %.2f' %time + ' the closest time is ' + str(time0))
print('Fetching time slice ' + str(time0))
time = time0
# store data in dictionary
g = {'shot': shot, 'time': time}
# get header line
header = connection.get(base + 'ECASE').data()[k]
# get all signals, use same names as in read_g_file
translate = {'MW': 'NR', 'MH': 'NZ', 'XDIM': 'Xdim', 'ZDIM': 'Zdim', 'RZERO': 'R0',
'RMAXIS': 'RmAxis', 'ZMAXIS': 'ZmAxis', 'SSIMAG': 'psiAxis', 'SSIBRY': 'psiSep',
'BCENTR': 'Bt0', 'CPASMA': 'Ip', 'FPOL': 'Fpol', 'PRES': 'Pres',
'FFPRIM': 'FFprime', 'PPRIME': 'Pprime', 'PSIRZ': 'psiRZ', 'QPSI': 'qpsi',
'NBBBS': 'Nlcfs', 'LIMITR': 'Nwall'}
for signal in translate:
g[translate[signal]] = connection.get(base + signal).data()[k]
g['R1'] = connection.get(base + 'RGRID').data()[0]
g['Zmid'] = 0.0
RLIM = connection.get(base + 'LIM').data()[:, 0]
ZLIM = connection.get(base + 'LIM').data()[:, 1]
g['wall'] = np.vstack((RLIM, ZLIM)).T
RBBBS = connection.get(base + 'RBBBS').data()[k][:int(g['Nlcfs'])]
ZBBBS = connection.get(base + 'ZBBBS').data()[k][:int(g['Nlcfs'])]
g['lcfs'] = np.vstack((RBBBS, ZBBBS)).T
KVTOR = 0
RVTOR = 1.7
NMASS = 0
RHOVN = connection.get(base + 'RHOVN').data()[k]
# convert floats to integers
for item in ['NR', 'NZ', 'Nlcfs', 'Nwall']:
g[item] = int(g[item])
# convert single (float32) to double (float64) and round
for item in ['Xdim', 'Zdim', 'R0', 'R1', 'RmAxis', 'ZmAxis', 'psiAxis', 'psiSep', 'Bt0', 'Ip']:
g[item] = np.round(np.float64(g[item]), 7)
# convert single arrays (float32) to double arrays (float64)
for item in ['Fpol', 'Pres', 'FFprime', 'Pprime', 'psiRZ', 'qpsi', 'lcfs', 'wall']:
g[item] = np.array(g[item], dtype=np.float64)
# Construct (R,Z) grid for psiRZ
g['dR'] = g['Xdim']/(g['NR'] - 1)
g['R'] = g['R1'] + np.arange(g['NR'])*g['dR']
g['dZ'] = g['Zdim']/(g['NZ'] - 1)
NZ2 = int(np.floor(0.5*g['NZ']))
g['Z'] = g['Zmid'] + np.arange(-NZ2, NZ2+1)*g['dZ']
# normalize psiRZ
g['psiRZn'] = (g['psiRZ'] - g['psiAxis']) / (g['psiSep'] - g['psiAxis'])
return g
def rbs_into_df(number, probe, conn, start=2500, end=5000, step=500, verbal=False):
"""
Pulls RBS data from the MDSplus tree 'dp_probes' and puts it into a
DataFrame ready for analysis. Require ssh to r2d2 if remote.
number: Probe number.
probe: One of A, B or C.
conn: An MDSplus Connection returned via the pull.thin_connect function.
start: Start of time that will be analyzed (i.e. the first gfile loaded).
end: End of time for analysis (i.e. the last gfile loaded).
step: Time step for the above.
returns: A DataFrame formatted and ready to be filled with data (R-Rsep,
R-Rsep_omp, etc.)
"""
# Create array of times to be sampled.
times = np.arange(start, end, step)
# Get shots probe was in for and Rprobe. Same for U and D sides, obviously.
shots = pull.pull_shots(conn, probe + 'U', verbal=verbal)
rprobe = pull.pull_rprobe(conn, probe + 'U', probe_corr=True, verbal=verbal)
print("Shots to be analyzed: " + str(shots))
# Then pull the RBS data.
print('\nLoading ' + probe + 'U' + str(number) + ' data...')
rbs_dict_U = pull.pull_all_rbs(conn, number, probe + 'U', verbal=verbal)
print('\nLoading ' + probe + 'D' + str(number) + ' data...')
rbs_dict_D = pull.pull_all_rbs(conn, number, probe + 'D', verbal=verbal)
# Now prepare the DataFrame. Will have set of data at each time, at each
# shot. So essentially len(times)*len(shots) DataFrames stacked together.
rbs_df_U = pd.DataFrame(rbs_dict_U)
rbs_df_D = pd.DataFrame(rbs_dict_D)
# Want 'locs' as an index.
rbs_df_U.set_index('locs', inplace=True)
rbs_df_D.set_index('locs', inplace=True)
# Create set of DataFrames, len(times) of them, to be 'stacked' on top of each other.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(times))), keys=times, names=['times'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(times))), keys=times, names=['times'])
# Now do it again, except with shots.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(shots))), keys=shots, names=['shots'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(shots))), keys=shots, names=['shots'])
return rbs_df_U, rbs_df_D, rprobe
def fill_in_rbs_df(rbs_df_U, rbs_df_D, probe, rprobe, conn, verbal=False):
"""
Takes the rbs_df from above and fill it in with R-Rsep, R-Rsep_omp, etc. It
returns all if it, so that it may then be averaged and get the std. dev. of
after all the data colloction has taken place. Requires ssh to atlas if remote.
rbs_df_U: The DataFrame returned from rbs_into_df. Likewise for D.
probe: One of A, B or C.
rprobe: Radial position of probe tip returned from rbs_into_df.
conn: An MDSplus Connection object from the mds.Connection function (different
procedure compared to connecting to r2d2).
returns: Filled in rbs_df.
"""
if verbal:
print("Analyzing atlas relevant data...")
# Get the shots, times and locs from the rbs_df index. np.unique will sort
# the locs (don't want), so returning the indices and reordering will fix this.
shots = np.unique(rbs_df_U.index.get_level_values('shots').values)
times = np.unique(rbs_df_U.index.get_level_values('times').values)
locs_U, order_U = np.unique(rbs_df_U.index.get_level_values('locs').values, return_index=True)
locs_D, order_D = np.unique(rbs_df_D.index.get_level_values('locs').values, return_index=True)
locs_U = locs_U[order_U]
locs_D = locs_D[order_D]
# Extra columns to be filled out.
rbs_df_U['R-Rsep (cm)'] = pd.Series(); rbs_df_D['R-Rsep (cm)'] = | pd.Series() | pandas.Series |
# cvworkflow/kkcalcfunctions.py
import kkcalc
from kkcalc import data
from kkcalc import kk
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def kkcalc_convert(file_path, *, chemical_formula, density, min_ev, max_ev, load_options=None,input_data_type='Beta',add_background=False,fix_distortions=False,curve_tolerance=None,curve_recursion=50):
"""
This file is part of the Kramers-Kronig Calculator software package.
Copyright (c) 2013 <NAME>, <NAME>
The software is licensed under the terms of the zlib/libpng license.
For details see LICENSE.txt
<NAME> "Calculation of the Kramers-Kronig transform of X-ray spectra by a piecewise Laurent polynomial method" Opt. Express 22, (2014) 23628-23639. DOI:10.1364/OE.22.023628
Pip page for kkcalc: https://pypi.org/project/kkcalc/
<NAME> github: https://github.com/benajamin/kkcalc
Cloned repository from <NAME>ick with an easy to follow along example: https://github.com/dschick/kkcalc
Parameters
----------
file_path : pathlib.WindowsPath
File path to NEXAFS beta spreadsheet, csv or txt. 2 columns: energy and intensity.
chemical_formula : string
The chemical formula of the component, i.e. 'C8H8' for polystyrene.
density : float
Density of the component in grams/cc. Typically around 1 grams/cc.
Returns
-------
delta : numpy.ndarray
Real (dispersive) components of the complex index of refraction. Two columns, energy and delta values.
beta : numpy.ndarray
Imaginary (absorptive) components of the complex index of refraction. Two columns, energy and beta values.
Examples
--------
Calculate the complex index of refraction of polystrene (PS) from the NEXAFS of PS given from a txt file.
>>> kkcalc_convert(file_path, *, chemical_formula= 'C8H8', density = 1.05, min_ev=270, max_ev=325)
"""
merge_points=[min_ev, max_ev]
output = kk.kk_calculate_real(file_path,
chemical_formula,
load_options,
input_data_type,
merge_points,
add_background,
fix_distortions,
curve_tolerance,
curve_recursion)
stoichiometry = kk.data.ParseChemicalFormula(chemical_formula)
formula_mass = data.calculate_FormulaMass(stoichiometry)
ASF_E, ASF_Data = kk.data.calculate_asf(stoichiometry)
ASF_Data2 = kk.data.coeffs_to_ASF(ASF_E, np.vstack((ASF_Data, ASF_Data[-1])))
delta = data.convert_data(output[:,[0,1]],'ASF','refractive_index', Density=density, Formula_Mass=formula_mass)
beta = data.convert_data(output[:,[0,2]],'ASF','refractive_index', Density=density, Formula_Mass=formula_mass)
return delta, beta
def kkcalc_plot(delta, beta, *, label, min_ev, max_ev, delta_ylim=[-0.006,0.004], beta_ylim=[0,0.008]):
plt.figure()
plt.plot(delta[:, 0], delta[:, 1], label=label, color = 'r')
plt.legend()
plt.xlim(min_ev, max_ev)
plt.ylim(delta_ylim)
plt.title('{:d} eV - {:d} eV'.format(min_ev, max_ev),fontsize=16)
plt.xlabel('Energy [eV]',fontsize=16)
plt.ylabel(r'$\delta$',fontsize=16)
plt.show()
plt.figure()
plt.plot(beta[:, 0], beta[:, 1], label=label, color = 'b')
plt.legend()
plt.xlim(min_ev, max_ev)
plt.ylim(beta_ylim)
plt.title('{:d} eV - {:d} eV'.format(min_ev, max_ev),fontsize=16)
plt.xlabel('Energy [eV]',fontsize=16)
plt.ylabel(r'$\beta$',fontsize=16)
plt.show()
def component_df(delta, beta, new_q_index, label):
delta_df = pd.DataFrame(delta[:, 1], columns=['delta_'+label])
delta_df = delta_df.set_axis(delta[:, 0], axis=0)
delta_df_new = (delta_df.reindex(delta_df.index.union(new_q_index)).interpolate(method='linear').reindex(new_q_index))
beta_df = pd.DataFrame(beta[:, 1], columns=['beta_'+label])
beta_df = beta_df.set_axis(beta[:, 0], axis=0)
beta_df_new = (beta_df.reindex(beta_df.index.union(new_q_index)).interpolate(method='linear').reindex(new_q_index))
return delta_df_new, beta_df_new
def make_contrast_M_3(delta1, beta1, label1, delta2, beta2, label2, delta3, beta3, label3, new_q_index):
delta_label1 = 'delta_'+label1
delta_label2 = 'delta_'+label2
delta_label3 = 'delta_'+label3
beta_label1 = 'beta_'+label1
beta_label2 = 'beta_'+label2
beta_label3 = 'beta_'+label3
delta1_df, beta1_df = component_df(delta1, beta1, new_q_index, label1)
delta2_df, beta2_df = component_df(delta2, beta2, new_q_index, label2)
delta3_df, beta3_df = component_df(delta3, beta3, new_q_index, label3)
index_df = | pd.DataFrame(delta1_df, columns=[delta_label1]) | pandas.DataFrame |
from __future__ import print_function
import collections
import json
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import keras
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import ShuffleSplit, KFold
import file_utils
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
# import candle
import file_utils
global_cache = {}
SEED = 2018
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
logger = logging.getLogger(__name__)
def set_up_logger(verbose=False):
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
def set_seed(seed=SEED):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
def get_file(url):
fname = os.path.basename(url)
return file_utils.get_file(fname, origin=url, cache_subdir='Pilot1')
def impute_and_scale(df, scaling='std', imputing='mean', dropna='all'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if dropna:
df = df.dropna(axis=1, how=dropna)
else:
empty_cols = df.columns[df.notnull().sum() == 0]
df[empty_cols] = 0
if imputing is None or imputing.lower() == 'none':
mat = df.values
else:
imputer = Imputer(strategy=imputing, axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def discretize(df, col, bins=2, cutoffs=None):
y = df[col]
thresholds = cutoffs
if thresholds is None:
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
df[col] = classes
return df
def save_combined_dose_response():
df1 = load_single_dose_response(combo_format=True, fraction=False)
df2 = load_combo_dose_response(fraction=False)
df = pd.concat([df1, df2])
df.to_csv('combined_drug_growth', index=False, sep='\t')
def load_combined_dose_response(rename=True):
df1 = load_single_dose_response(combo_format=True)
logger.info('Loaded {} single drug dose response measurements'.format(df1.shape[0]))
df2 = load_combo_dose_response()
logger.info('Loaded {} drug pair dose response measurements'.format(df2.shape[0]))
df = pd.concat([df1, df2])
logger.info('Combined dose response data contains sources: {}'.format(df['SOURCE'].unique()))
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2',
'DOSE1': 'Dose1', 'DOSE2': 'Dose2',
'GROWTH': 'Growth', 'STUDY': 'Study'})
return df
def load_single_dose_response(combo_format=False, fraction=True):
# path = get_file(DATA_URL + 'combined_single_drug_growth')
path = get_file(DATA_URL + 'rescaled_combined_single_drug_growth')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
# nrows=10,
dtype={'SOURCE': str, 'DRUG_ID': str,
'CELLNAME': str, 'CONCUNIT': str,
'LOG_CONCENTRATION': np.float32,
'EXPID': str, 'GROWTH': np.float32})
global_cache[path] = df
df['DOSE'] = -df['LOG_CONCENTRATION']
df = df.rename(columns={'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'})
df = df[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
if fraction:
df['GROWTH'] /= 100
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1', 'DOSE': 'DOSE1'})
df['DRUG2'] = np.nan
df['DOSE2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df['DOSE2'] = df['DOSE2'].astype(np.float32)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_combo_dose_response(fraction=True):
path = get_file(DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2',
'PERCENTGROWTH', 'VALID', 'SCREENER', 'STUDY'],
# nrows=10000,
dtype={'CELLNAME': str, 'NSC1': str, 'NSC2': str,
'CONC1': np.float32, 'CONC2': np.float32,
'PERCENTGROWTH': np.float32, 'VALID': str,
'SCREENER': str, 'STUDY': str},
error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df['SOURCE'] = 'ALMANAC.' + df['SCREENER']
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['NCI60.ID']].to_dict()['NCI60.ID']
df['CELL'] = df['CELLNAME'].map(lambda x: cellmap[x])
df['DOSE1'] = -np.log10(df['CONC1'])
df['DOSE2'] = -np.log10(df['CONC2'])
df['DRUG1'] = 'NSC.' + df['NSC1']
df['DRUG2'] = 'NSC.' + df['NSC2']
if fraction:
df['GROWTH'] = df['PERCENTGROWTH'] / 100
else:
df['GROWTH'] = df['PERCENTGROWTH']
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_aggregated_single_response(target='AUC', min_r2_fit=0.3, max_ec50_se=3, combo_format=False, rename=True):
path = get_file(DATA_URL + 'combined_single_response_agg')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, engine='c', sep='\t',
dtype={'SOURCE': str, 'CELL': str, 'DRUG': str, 'STUDY': str,
'AUC': np.float32, 'IC50': np.float32,
'EC50': np.float32, 'EC50se': np.float32,
'R2fit': np.float32, 'Einf': np.float32,
'HS': np.float32, 'AAC1': np.float32,
'AUC1': np.float32, 'DSS1': np.float32})
global_cache[path] = df
total = len(df)
df = df[(df['R2fit'] >= min_r2_fit) & (df['EC50se'] <= max_ec50_se)]
df = df[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
df = df[~df[target].isnull()]
logger.info('Loaded %d dose indepdendent response samples (filtered by EC50se <= %f & R2fit >=%f from a total of %d).', len(df), max_ec50_se, min_r2_fit, total)
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1'})
df['DRUG2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
return df
def load_drug_data(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_desc = pd.concat([df1, df2], axis=1)
df_fp = pd.concat([df_fp, df_fp2]).reset_index(drop=True)
df1 = pd.DataFrame(df_fp.loc[:, 'Drug'])
df2 = df_fp.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=None, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_fp = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
logger.info('Loaded combined dragon7 drug fingerprints: %s', df_fp.shape)
return df_desc, df_fp
def load_drug_descriptors(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
if feature_subset:
df2 = df2[[x for x in df2.columns if x in feature_subset]]
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
df_desc = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
return df_desc
def load_drug_fingerprints(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_fp = | pd.concat([df_fp, df_fp2]) | pandas.concat |
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure_index,
_is_bool_indexer, _default_index)
from pandas.core.daterange import DateRange
from pandas.core.generic import PandasObject
from pandas.core.index import Index, MultiIndex
from pandas.core.indexing import _SeriesIndexer, _maybe_droplevels
import pandas.core.datetools as datetools
import pandas._tseries as _tseries
__all__ = ['Series', 'TimeSeries']
def _numpy_lt_151():
return np.__version__ < '1.5.1'
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
from pandas.core.frame import DataFrame
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(op(self.values, other.values), index=self.index)
new_index = self.index + other.index
this_reindexed = self.reindex(new_index)
other_reindexed = other.reindex(new_index)
arr = op(this_reindexed.values, other_reindexed.values)
return Series(arr, index=new_index)
elif isinstance(other, DataFrame):
return NotImplemented
else:
# scalars
return Series(op(self.values, other), index=self.index)
return wrapper
def _flex_method(op, name):
def f(self, other, fill_value=None):
return self._binop(other, op, fill_value=fill_value)
f.__doc__ = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
Returns
-------
result : Series
""" % name
f.__name__ = name
return f
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, PandasObject):
"""
Generic indexed (labeled) vector, including time series
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
Data is *not* copied from input arrays by default
"""
_AXIS_NUMBERS = {
'index' : 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def __new__(cls, data, index=None, dtype=None, name=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Create array, do *not* copy data by default, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring) and dtype is None:
dtype = np.object_
if dtype is None:
subarr = np.empty(len(index), dtype=type(value))
else:
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
index = _default_index(len(subarr))
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
subarr.name = name
if subarr.index.is_all_dates():
subarr = subarr.view(TimeSeries)
return subarr
def __init__(self, *args, **kwargs):
pass
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
self._index = _ensure_index(index)
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind, fill_value=fill_value)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
try:
if isinstance(self.index, MultiIndex):
return self._multilevel_index(key)
else:
values = self.values
try:
return values[self.index.get_loc(key)]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise
except TypeError:
pass
def _index_with(indexer):
return Series(self.values[indexer],
index=self.index[indexer])
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if _is_bool_indexer(key):
self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return _index_with(key)
# TODO: [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
try:
return _index_with(key)
except Exception:
key = np.asarray(key)
return _index_with(key)
def _multilevel_index(self, key):
values = self.values
try:
loc = self.index.get_loc(key)
if isinstance(loc, slice):
# TODO: what if a level contains tuples??
new_index = self.index[loc]
new_index = _maybe_droplevels(new_index, key)
return Series(values[loc], index=new_index)
else:
return values[loc]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise Exception('Requested index not in this series!')
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return self._get_val_at(self.index.get_loc(key))
else:
return default
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed
"""
return Series(self.values[i:j].copy(), index=self.index[i:j])
def __setitem__(self, key, value):
values = self.values
try:
loc = self.index.get_loc(key)
values[loc] = value
return
except KeyError:
if isinstance(key, (int, np.integer)):
values[key] = value
return
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
self._check_bool_indexer(key)
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask = isnull(key)
if mask.any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
if set([True, False]).issubset(set(key)):
key = np.asarray(key, dtype=bool)
values[key] = value
return
values[key] = value
def _check_bool_indexer(self, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
if isinstance(key, Series) and key.dtype == np.bool_:
if not key.index.equals(self.index):
raise Exception('can only boolean index with like-indexed '
'Series or raw ndarrays')
def __setslice__(self, i, j, value):
"""Set slice equal to given value(s)"""
ndarray.__setslice__(self, i, j, value)
def __repr__(self):
"""Clean string representation of a Series"""
if len(self.index) > 500:
return self._make_repr(50)
elif len(self.index) > 0:
return _seriesRepr(self.index, self.values)
else:
return '%s' % ndarray.__repr__(self)
def _make_repr(self, max_vals=50):
vals = self.values
index = self.index
num = max_vals // 2
head = _seriesRepr(index[:num], vals[:num])
tail = _seriesRepr(index[-(max_vals - num):], vals[-(max_vals - num):])
return head + '\n...\n' + tail + '\nlength: %d' % len(vals)
def toString(self, buffer=sys.stdout, nanRep='NaN'):
print >> buffer, _seriesRepr(self.index, self.values,
nanRep=nanRep)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values)
def copy(self):
return Series(self.values.copy(), index=self.index)
#-------------------------------------------------------------------------------
# Arithmetic operators
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
__div__ = _arith_method(operator.div, '__div__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__pow__ = _arith_method(operator.pow, '__pow__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__radd__ = _arith_method(operator.add, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
# Inplace operators
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__idiv__ = __div__
__ipow__ = __pow__
#-------------------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self):
"""
Return number of observations of Series.
Returns
-------
nobs : int
"""
return notnull(self.values).sum()
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-null values
"""
return self._ndarray_statistic('sum')
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-null values
"""
return self._ndarray_statistic('mean')
def _ndarray_statistic(self, funcname):
arr = self.values
retVal = getattr(arr, funcname)()
if isnull(retVal):
arr = remove_na(arr)
if len(arr) == 0:
return np.nan
retVal = getattr(arr, funcname)()
return retVal
def quantile(self, q=0.5):
"""
Return value at the given quantile
Parameters
----------
q : quantile
0 <= q <= 1
Returns
-------
q : float
"""
from scipy.stats import scoreatpercentile
return scoreatpercentile(self.valid().values, q * 100)
def describe(self):
"""
Generate various summary statistics of columns, excluding NaN values
Returns
-------
DataFrame
"""
names = ['count', 'mean', 'std', 'min',
'10%', '50%', '90%', 'max']
data = [self.count(), self.mean(), self.std(), self.min(),
self.quantile(.1), self.median(), self.quantile(.9),
self.max()]
return Series(data, index=names)
def min(self, axis=None, out=None):
"""
Minimum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = np.inf
return arr.min()
def max(self, axis=None, out=None):
"""
Maximum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = -np.inf
return arr.max()
def std(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased standard deviation of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.std(nona, axis, dtype, out, ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased variance of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.var(nona, axis, dtype, out, ddof)
def skew(self):
"""
Unbiased skewness of the non-null values
Returns
-------
skew : float
"""
y = np.array(self.values)
mask = notnull(y)
count = mask.sum()
np.putmask(y, -mask, 0)
A = y.sum() / count
B = (y**2).sum() / count - A**2
C = (y**3).sum() / count - A**3 - 3*A*B
return (np.sqrt((count**2-count))*C) / ((count-2)*np.sqrt(B)**3)
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves NaN values
Extra parameters are to preserve ndarray interface.
Returns
-------
"""
arr = self.values.copy()
do_mask = not issubclass(self.dtype.type, np.int_)
if do_mask:
mask = isnull(arr)
np.putmask(arr, mask, 0.)
result = arr.cumsum()
if do_mask:
np.putmask(result, mask, np.nan)
return Series(result, index=self.index)
def cumprod(self, axis=0, dtype=None, out=None):
"""
Overriding numpy's built-in cumprod functionality
"""
arr = self.values.copy()
do_mask = not issubclass(self.dtype.type, np.int_)
if do_mask:
mask = isnull(arr)
np.putmask(arr, mask, 1.)
result = arr.cumprod()
if do_mask:
np.putmask(result, mask, np.nan)
return Series(result, index=self.index)
def median(self):
"""
Compute median value of non-null values
"""
arr = self.values
if arr.dtype != np.float_:
arr = arr.astype(float)
arr = arr[notnull(arr)]
return _tseries.median(arr)
def corr(self, other):
"""
Compute correlation two Series, excluding missing values
Parameters
----------
other : Series object
Returns
-------
correlation : float
"""
commonIdx = self.valid().index.intersection(other.valid().index)
if len(commonIdx) == 0:
return nan
this = self.reindex(commonIdx)
that = other.reindex(commonIdx)
return np.corrcoef(this, that)[0, 1]
def diff(self):
"""
1st discrete difference of object
Returns
-------
TimeSeries
"""
return (self - self.shift(1))
def autocorr(self):
"""
Lag-1 autocorrelation
Returns
-------
TimeSeries
"""
return self.corr(self.shift(1))
def clip(self, upper=None, lower=None):
"""
Trim values at input threshold(s)
Parameters
----------
lower : float, default None
upper : float, default None
Returns
-------
y : Series
"""
result = self
if lower is not None:
result = result.clip_lower(lower)
if upper is not None:
result = result.clip_upper(upper)
return result
def clip_upper(self, threshold):
"""Return copy of series with values above given value truncated"""
return np.where(self > threshold, threshold, self)
def clip_lower(self, threshold):
"""Return copy of series with values below given value truncated"""
return np.where(self < threshold, threshold, self)
#-------------------------------------------------------------------------------
# Iteration
def keys(self):
"Alias for Series index"
return self.index
@property
def values(self):
"""
Return Series as ndarray
Returns
-------
arr : numpy.ndarray
"""
return self.view(ndarray)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return itertools.izip(iter(self.index), iter(self))
#-------------------------------------------------------------------------------
# Combination
def append(self, other):
"""
Concatenate two Series. The indices should not overlap
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = np.concatenate((self.index, other.index))
new_index = Index(new_index)
new_index._verify_integrity()
new_values = np.concatenate((self, other))
return Series(new_values, index=new_index)
def _binop(self, other, func, fill_value=None):
"""
Parameters
----------
other : Series
Returns
-------
combined : Series
"""
# TODO: docstring
assert(isinstance(other, Series))
new_index = self.index
this = self
if not self.index.equals(other.index):
new_index = self.index + other.index
this = self.reindex(new_index)
other = other.reindex(new_index)
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isnull(this_vals)
other_mask = isnull(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
result = func(this_vals, other_vals)
return Series(result, index=new_index)
add = _flex_method(operator.add, 'add')
sub = _flex_method(operator.sub, 'subtract')
mul = _flex_method(operator.mul, 'multiply')
div = _flex_method(operator.div, 'divide')
def combine(self, other, func, fill_value=nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or the
other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index + other.index
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
new_values[i] = func(self.get(idx, fill_value),
other.get(idx, fill_value))
else:
new_index = self.index
new_values = func(self.values, other)
return Series(new_values, index=new_index)
def combineFirst(self, other):
"""
Combine Series values, choosing calling Series's values first.
Parameters
----------
other : Series
Returns
-------
y : Series
formed as union of two Series
"""
if self.index.equals(other.index):
new_index = self.index
# save ourselves the copying in this case
this = self
else:
new_index = self.index + other.index
this = self.reindex(new_index)
other = other.reindex(new_index)
result = Series(np.where(isnull(this), other, this), index=new_index)
return result
#----------------------------------------------------------------------
# Reindexing, sorting
def sort(self, axis=0, kind='quicksort', order=None):
"""
Overridden NumPy sort, taking care with missing values
"""
sortedSeries = self.order(na_last=True)
self[:] = sortedSeries
self.index = sortedSeries.index
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overriding numpy's built-in cumsum functionality
"""
values = self.values
mask = isnull(values)
if mask.any():
result = values.copy()
notmask = -mask
result[notmask] = np.argsort(values[notmask])
return Series(result, index=self.index)
else:
return Series(np.argsort(values), index=self.index)
def order(self, na_last=True, ascending=True, **kwds):
"""
Sorts Series object, by value, maintaining index-value object
Parameters
----------
na_last : boolean (optional, default=True)
Put NaN's at beginning or end
Returns
-------
y : Series
"""
def _try_mergesort(arr):
# easier to ask forgiveness than permission
try:
return arr.argsort(kind='mergesort')
except TypeError:
# stable sort not available for object dtype
return arr.argsort()
if 'missingAtEnd' in kwds: # pragma: no cover
warnings.warn("missingAtEnd is deprecated, use na_last",
FutureWarning)
na_last = kwds['missingAtEnd']
arr = self.values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
good = -bad
idx = np.arange(len(self))
argsorted = _try_mergesort(arr[good])
if not ascending:
argsorted = argsorted[::-1]
if na_last:
n = sum(good)
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
else:
n = sum(bad)
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
return Series(arr[sortedIdx], index=self.index[sortedIdx])
def sortlevel(self, level=0, ascending=True):
"""
Sort multilevel index by chosen level. Data will be lexicographically
sorted by the chosen level followed by the other levels (in order)
Parameters
----------
level : int
ascending : bool, default True
Returns
-------
sorted : Series
"""
if not isinstance(self.index, MultiIndex):
raise Exception('can only sort by level with a hierarchical index')
new_index, indexer = self.index.sortlevel(level, ascending=ascending)
new_values = self.values.take(indexer)
return Series(new_values, index=new_index)
def unstack(self, level=-1):
"""
"Unstack" Series with multi-level index to produce DataFrame
Parameters
----------
level : int, default last level
Level to "unstack"
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
unstacker = _Unstacker(self.values, self.index, level=level)
return unstacker.get_result()
#----------------------------------------------------------------------
# function application
def map(self, arg):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function).
Parameters
----------
arg : function, dict, or Series
Returns
-------
y : Series
same index as caller
"""
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = Series(arg)
indexer, mask = _tseries.getMergeVec(self, arg.index.indexMap)
notmask = -mask
new_values = arg.view(np.ndarray).take(indexer)
if notmask.any():
if issubclass(new_values.dtype.type, np.integer):
new_values = new_values.astype(float)
np.putmask(new_values, notmask, np.nan)
newSer = Series(new_values, index=self.index)
return newSer
else:
return Series([arg(x) for x in self], index=self.index)
merge = map
def apply(self, func):
"""
Call function on elements on array. Can be ufunc or Python function
expecting only single values
Parameters
----------
func : function
Returns
-------
y : Series
"""
try:
return func(self)
except Exception:
return Series([func(x) for x in self], index=self.index)
applymap = apply
def reindex(self, index=None, method=None):
"""Conform Series to new Index
Parameters
----------
index : array-like
Preferably an Index object (to avoid duplicating data)
method : {'backfill', 'bfill', 'pad', 'ffill', None}
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Returns
-------
reindexed : Series
"""
if self.index.equals(index):
return self.copy()
index = | _ensure_index(index) | pandas.core.common._ensure_index |
#!/usr/bin/env python
#----------------------------------------------------------------------#
'''
A module to analyze token trends on the BSC blockchain.
This is very much a work in progress.
'''
#----------------------------------------------------------------------#
# System Module Imports
import os
import sys
import datetime
import configparser
# Additional Module Imports
import tqdm
import pandas as pd
import requests
# Local Imports
#----------------------------------------------------------------------#
# Read in my API keys from a config file
config = configparser.ConfigParser()
config.read(os.path.join(os.getenv('HOME'), '.config', 'api_keys.ini'))
#----------------------------------------------------------------------#
# BITQUERY API
#----------------------------------------------------------------------#
url_bitquery = 'https://graphql.bitquery.io'
#----------------------------------------------------------------------#
def run_query(query): # A simple function to use requests.post to make the API call.
headers = {'X-API-KEY': config['bitquery']['key']}
request = requests.post(url_bitquery,
json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed and return code is {}. {}'.format(request.status_code, query))
#----------------------------------------------------------------------#
def q_pancake_recent_daily(start):
return '''{
ethereum(network: bsc) {
dexTrades(
options: {limit: 10000, desc: "trades"}
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
quoteCurrency: {is: "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c"}
) {
timeInterval {
day(count: 1)
}
baseCurrency {
symbol
address
}
baseAmount
quoteCurrency {
symbol
address
}
quoteAmount
trades: count
quotePrice
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (start,)
#----------------------------------------------------------------------#
def q_ohlc_periods(
address,
start,
period= 'minute',
periods_per_candle= 1,
limit_candles= None,
quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Construct a query to obtain OHLC data for a given address.'
# Apply the limit if one was given
limit = (limit_candles is not None) and f'options: {{limit: {limit_candles}, asc: "timeInterval.{period}"}}' or ''
# Now construct and return the query
return '''{
ethereum(network: bsc) {
dexTrades(%s
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
baseCurrency: {is: "%s"}
quoteCurrency: {is: "%s"}
) {
timeInterval {
%s(count: %s)
}
baseCurrency {
symbol
address
}
trades: count
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (limit, start, address, quote_address, period, periods_per_candle)
#----------------------------------------------------------------------#
def q_tokens_created(start_time, end_time):
return '''{
ethereum(network: bsc) {
smartContractCalls(
options: {asc: "block.height", limit: 2147483647}
smartContractMethod: {is: "Contract Creation"}
smartContractType: {is: Token}
time: {after: "%s", before: "%s"}
) {
transaction {
hash
}
block {
height
timestamp {
iso8601
}
}
smartContract {
contractType
address {
address
annotation
}
currency {
name
symbol
decimals
tokenType
}
}
caller {
address
}
}
}
}
''' % (start_time, end_time)
#----------------------------------------------------------------------#
def get_recent_tokens(from_days_ago= 5, to_days_ago= 4):
'Find all tokens registered within a given time period.'
# Construct the query
now = datetime.datetime.now()
start = now - datetime.timedelta(days=from_days_ago)
end = now - datetime.timedelta(days= to_days_ago)
query = q_tokens_created(start.isoformat(), end.isoformat())
# Now run the query
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: New tokens query failed with {result["errors"]}')
# Collect info on each new token
new_tokens = [
{
'created' : datetime.datetime.fromisoformat(record['block']['timestamp']['iso8601'].rstrip('Z')),
'owner' : record['caller']['address'],
'address' : record['smartContract']['address']['address'],
'decimals' : record['smartContract']['currency']['decimals'],
'name' : record['smartContract']['currency']['name'],
'symbol' : record['smartContract']['currency']['symbol'],
'tokenType' : record['smartContract']['currency']['tokenType'],
}
for record in result['data']['ethereum']['smartContractCalls']
]
return new_tokens
#----------------------------------------------------------------------#
def float_nan(value):
if value is None:
return float('nan')
return float(value)
#----------------------------------------------------------------------#
def get_ohlc(address, start_time, period= 'minute', periods_per_candle= 1, limit_candles= 24*60, quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Obtain OHLC data on an address.'
# Construct and run a query to get OHLC data
query = q_ohlc_periods(address, start_time, period, periods_per_candle, limit_candles, quote_address)
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: OHLC query ({address}, {start_time}, {period}, {periods_per_candle}, {limit_candles}) failed with {result["errors"]}')
trades = result['data']['ethereum']['dexTrades']
times = [ | pd.Timestamp(trade['timeInterval']['minute']) | pandas.Timestamp |
#!/usr/bin/env python
'''
Tools for generating SOWFA MMC inputs
'''
__author__ = "<NAME>"
__date__ = "May 16, 2019"
import numpy as np
import pandas as pd
import os
import gzip as gz
boundaryDataHeader = """/*--------------------------------*- C++ -*----------------------------------*\\
========= |
\\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\\\ / O peration | Website: https://openfoam.org
\\\\ / A nd | Version: 6
\\\\/ M anipulation |
\\*---------------------------------------------------------------------------*/
// generated by mmctools.coupling.sowfa.BoundaryCoupling
// https://github.com/a2e-mmc/mmctools/tree/dev
{N:d}
("""
class InternalCoupling(object):
"""
Class for writing data to SOWFA-readable input files for internal coupling
"""
def __init__(self,
dpath,
df,
dateref=None,
datefrom=None,
dateto=None):
"""
Initialize SOWFA input object
Usage
=====
dpath : str
Folder to write files to
df : pandas.DataFrame
Data (index should be called datetime)
dateref : str, optional
Reference datetime, used to construct a pd.DateTimeIndex
with SOWFA time 0 corresponding to dateref; if not
specified, then the time index will be the simulation time
as a pd.TimedeltaIndex
datefrom : str, optional
Start date of the period that will be written out, if None
start from the first timestamp in df; only used if dateref
is specified
dateto : str, optional
End date of the period that will be written out, if None end
with the last timestamp in df; only used if dateref is
specified
"""
self.dpath = dpath
# Create folder dpath if needed
if not os.path.isdir(dpath):
os.mkdir(dpath)
# Handle input with multiindex
if isinstance(df.index, pd.MultiIndex):
assert df.index.names[0] == 'datetime', 'first multiindex level is not "datetime"'
assert df.index.names[1] == 'height', 'second multiindex level is not "height"'
df = df.reset_index(level=1)
# Use dataframe between datefrom and dateto
if datefrom is None:
datefrom = df.index[0]
if dateto is None:
dateto = df.index[-1]
# Make copy to avoid SettingwithcopyWarning
self.df = df.loc[(df.index>=datefrom) & (df.index<=dateto)].copy()
assert(len(self.df.index.unique())>0), 'No data for requested period of time'
# Store start date for ICs
self.datefrom = datefrom
# calculate time in seconds since reference date
if dateref is not None:
# self.df['datetime'] exists and is a DateTimeIndex
dateref = pd.to_datetime(dateref)
tdelta = pd.Timedelta(1,unit='s')
self.df.reset_index(inplace=True)
self.df['t_index'] = (self.df['datetime'] - dateref) / tdelta
self.df.set_index('datetime',inplace=True)
elif isinstance(df.index, pd.TimedeltaIndex):
# self.df['t'] exists and is a TimedeltaIndex
self.df['t_index'] = self.df.index.total_seconds()
else:
self.df['t_index'] = self.df.index
def write_BCs(self,
fname,
fieldname,
fact=1.0
):
"""
Write surface boundary conditions to SOWFA-readable input file for
solver (to be included in $startTime/qwall)
Usage
=====
fname : str
Filename
fieldname : str or list-like
Name of the scalar field (or a list of names of vector field
components) to be written out; 0 may be substituted to
indicate an array of zeroes
fact : float
Scale factor for the field, e.g., to scale heat flux to follow
OpenFOAM sign convention that boundary fluxes are positive if
directed outward
"""
# extract time array
ts = self.df.t_index.values
nt = ts.size
# check if scalar or vector
if isinstance(fieldname, (list,tuple)):
assert len(fieldname) == 3, 'expected 3 vector components'
fieldnames = fieldname
fmt = [' (%g', '(%.12g', '%.12g', '%.12g))',]
else:
fieldnames = [fieldname]
fmt = [' (%g', '%.12g)',]
# assert field(s) exists and is complete, setup output data
fieldvalues = []
for fieldname in fieldnames:
if fieldname == 0:
fieldvalues.append(np.zeros_like(ts))
else:
assert(fieldname in self.df.columns), \
'Field '+fieldname+' not in df'
assert(~ | pd.isna(self.df[fieldname]) | pandas.isna |
#!/usr/bin/python3
import sys
import json
import pandas as pd
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
def autentica(client_id, client_secret):
client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
return spotipy.Spotify(client_credentials_manager = client_credentials_manager)
def nome_artista(sp, uri_artista):
artist = sp.artist(uri)
return artist['name'].replace(' ','_')
def features_top_tracks(sp, uri_artista):
top_tracks = sp.artist_top_tracks(uri)
uri_tracks = []
for track in top_tracks['tracks']:
uri_tracks.append(track['uri'])
return sp.audio_features(uri_tracks)
def converte_json(features, artist):
with open('features_' + artist.lower() + '.json','w') as fp:
json.dump(features, fp)
print('Features convertido para json.')
def converte_csv(features, artist):
df = | pd.DataFrame(features) | pandas.DataFrame |
"""
Given a software, find similar software using source code
Currently based on software name that exist in the dataset
TODO: find similar software using source code that is not
in the existing pool
"""
from LASCAD.LDA.Clustering import Clustering
import pandas as pd
import numpy as np
from scipy.spatial.distance import cosine
from heapq import heappop, heappush
from scipy.stats import entropy
import multiprocessing
import os
from ast import literal_eval as make_tuple
class SimilarSoftwareEngine:
def __init__(self, NUM_TOPICS=50, max_df=0.5, min_df=0.1, n_clusters=20, maxTopSimilar=100,
dataset='showcase1', verbose=True, normalize=True, loadSaved=False):
self.clustering = Clustering(NUM_TOPICS, max_df, min_df, dataset, verbose, normalize)
self.projects = self.clustering.proj_topic.index.values
self.n_clutsers = n_clusters
self.maxTopSimilar = maxTopSimilar
similarAppsFilename = '../results/similarApps/' + 'similarApps_' + self.clustering.suffix + '.csv'
if loadSaved:
self.projectsMap = pd.read_csv(similarAppsFilename)
self.projectsMap.drop('QueryProject', axis=1, inplace=True)
self.projectsMap = self.projectsMap.as_matrix()
for i, row in enumerate(self.projectsMap):
self.projectsMap[i] = [make_tuple(j) for j in row]
else:
self.createDistanceMatrix()
df = | pd.DataFrame(self.projectsMap) | pandas.DataFrame |
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import korbinian
import sys
from multiprocessing import Pool
##########parameters#############
list_number = 2
data_dir = r"/Volumes/Musik/Databases"
data_dir = r"D:\Databases"
repeat_randomisation = False
seq_len = 2000
max_num_positions_mutated = 1600
n_mutations_array = np.arange(0, max_num_positions_mutated)
real_perc_aa_subst_array = n_mutations_array / seq_len
real_perc_aa_ident_array = 1 - real_perc_aa_subst_array
########################################################################################
# #
# Setup paths for input and output files #
# #
########################################################################################
List_rand_TM = os.path.normpath(os.path.join(data_dir, "summaries/{ln:02d}/List{ln:02d}_rand/List{ln:02d}_rand_TM.csv".format(ln=list_number)))
List_rand_nonTM = os.path.normpath(os.path.join(data_dir, "summaries/{ln:02d}/List{ln:02d}_rand/List{ln:02d}_rand_nonTM.csv".format(ln=list_number)))
pickle_with_artificial_AAIMONs = os.path.normpath(os.path.join(data_dir, "summaries/{ln:02d}/List{ln:02d}_rand/List{ln:02d}_rand_AAIMONs.pickle".format(ln=list_number)))
fig_AAIMON_vs_perc_aa_sub_png = os.path.normpath(os.path.join(data_dir, "summaries/{ln:02d}/List{ln:02d}_rand/List{ln:02d}_rand_AAIMON_vs_aa_sub.png".format(ln=list_number)))
fig_AAIMON_vs_perc_aa_sub_pdf = os.path.normpath(os.path.join(data_dir, "summaries/{ln:02d}/List{ln:02d}_rand/List{ln:02d}_rand_AAIMON_vs_aa_sub.pdf".format(ln=list_number)))
########################################################################################
# #
# Get the AA propensity series for TM and nonTM #
# #
########################################################################################
aa_prop_TM = | pd.Series.from_csv(List_rand_TM, sep="\t") | pandas.Series.from_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 17:03:06 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index' ,'CDAX Index' ,
'ASX Index', 'TPX Index', 'SHCOMP Index' ,
'SZCOMP Index', 'XUTUM Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '2004-1-1'
today = date.today().strftime('%Y%m%d')
firstday = '19991230'
window_long = 52
window_short = 13
ohlc_tickers = ['OPEN','HIGH','LOW', 'LAST']
prices_open = con.bdh(index_tickers, 'PX OPEN',firstday , today)
prices_open.columns = [i[0] for i in prices_open.columns]
prices_open_int = prices_open.interpolate(method='linear')[index_tickers]
prices_open_w = prices_open_int.groupby(pd.Grouper(freq='W')).first()
prices_high = con.bdh(index_tickers, 'PX HIGH',firstday , today)
prices_high.columns = [i[0] for i in prices_high.columns]
prices_high_int = prices_high.interpolate(method='linear')[index_tickers]
prices_high_w = prices_high_int.groupby(pd.Grouper(freq='W')).max()
prices_low = con.bdh(index_tickers, 'PX LOW',firstday , today)
prices_low.columns = [i[0] for i in prices_low.columns]
prices_low_int = prices_low.interpolate(method='linear')[index_tickers]
prices_low_w = prices_low_int.groupby(pd.Grouper(freq='W')).min()
prices_close = con.bdh(index_tickers, 'PX LAST',firstday , today)
prices_close.columns = [i[0] for i in prices_close.columns]
prices_close_int = prices_close.interpolate(method='linear')[index_tickers]
prices_close_w = prices_close_int.groupby( | pd.Grouper(freq='W') | pandas.Grouper |
"""
Abstract Base Class for PfLine.
"""
from __future__ import annotations
# from . import single, multi #<-- moved to end of file
from ..ndframelike import NDFrameLike
from ..mixins import PfLineText, PfLinePlot, OtherOutput
from ...prices.utils import duration_bpo
from ...prices import convert
from ...tools import nits
from ...tools.types import Quantity, Value
from abc import abstractmethod
from typing import Dict, Iterable, Mapping, Union, TYPE_CHECKING
import pandas as pd
# Developer notes: we would like to be able to handle 2 cases with volume AND financial
# information. We would like to...
# ... handle the situation where, for some timestamp, the volume q == 0 but the revenue
# r != 0, because this occasionally arises for the sourced volume, e.g. after buying
# and selling the same volume at unequal price. So: we want to be able to store q and r.
# ... keep price information even if the volume q == 0, because at a later time this price
# might still be needed, e.g. if a perfect hedge becomes unperfect. So: we want to be
# able to store q and p.
# Both cases can be catered to. The first as a 'SinglePfLine', where the timeseries for
# q and r are used in the instance creation. The price is not defined at the timestamp in
# the example, but can be calculated for other timestamps, and downsampling is also still
# possble.
# The second is a bit more complex. It is possible as a 'MultiPfLine'. This has then 2
# 'SinglePfLine' instances as its children: one made from each of the timeseries for q
# and p.
if TYPE_CHECKING:
from .single import SinglePfLine # noqa
from .multi import MultiPfLine # noqa
class PfLine(NDFrameLike, Mapping, PfLineText, PfLinePlot, OtherOutput):
"""Class to hold a related energy timeseries. This can be volume timeseries with q
[MWh] and w [MW], a price timeseries with p [Eur/MWh] or both.
"""
def __new__(cls, data):
# Catch case where user actually called PfLine().
if cls is PfLine:
subclasses = [single.SinglePfLine, multi.MultiPfLine]
# If data is instance of subclass: return copy of the object.
for subcls in subclasses:
if isinstance(data, subcls):
return data # TODO: make copy instead
# Try passing data to subclasses to see if they can handle it.
for subcls in subclasses:
try:
return subcls(data)
except (ValueError, TypeError):
pass
raise NotImplementedError(
f"None of the subclasses ({', '.join([subcls.__name__ for subcls in subclasses])}) knows how to handle this data."
)
# Otherwise, do normal thing.
return super().__new__(cls)
# Additional abstract methods to be implemented by descendents.
@property
@abstractmethod
def children(self) -> Dict[str, PfLine]:
"""Children of this instance, if any."""
...
@property
@abstractmethod
def w(self) -> pd.Series:
"""(Flattened) power timeseries in [MW]."""
...
@property
@abstractmethod
def q(self) -> pd.Series:
"""(Flattened) energy timeseries in [MWh]."""
...
@property
@abstractmethod
def p(self) -> pd.Series:
"""(Flattened) price timeseries in [Eur/MWh]."""
...
@property
@abstractmethod
def r(self) -> pd.Series:
"""(Flattened) revenue timeseries in [Eur]."""
...
@property
@abstractmethod
def kind(self) -> str:
"""Kind of data that is stored in the instance. Possible values:
- 'q': volume data only; properties .q [MWh] and .w [MW] are available.
- 'p': price data only; property .p [Eur/MWh] is available.
- 'all': price and volume data; properties .q [MWh], .w [MW], .p [Eur/MWh], .r [Eur] are available.
"""
...
@abstractmethod
def df(
self, cols: Iterable[str], flatten: bool = True, has_units: bool = False
) -> pd.DataFrame:
"""DataFrame for portfolio line in default units.
Parameters
----------
cols : str, optional (default: all that are available)
The columns (w, q, p, r) to include in the dataframe.
flatten : bool, optional (default: True)
- If True, include only aggregated timeseries (4 or less; 1 per dimension).
- If False, include all children and their (intermediate and final)
aggregations.
has_units : bool, optional (default: True)
- If True, return dataframe with ``pint`` units. (The unit can be extracted
as a column level with ``.pint.dequantify()``).
- If False, return dataframe with float values.
Returns
-------
pd.DataFrame
"""
...
@abstractmethod
def __bool__(self) -> bool:
"""Return True if object (i.e., its children) contains any non-zero data."""
...
# Implemented directly here.
@property
def summable(self) -> str:
"""Which attributes/colums of this PfLine can be added to those of other PfLines
to get consistent/correct new PfLine."""
return {"p": "p", "q": "q", "all": "qr"}[self.kind]
@property
def available(self) -> str: # which time series have values
"""Attributes/columns that are available. One of {'wq', 'p', 'wqpr'}."""
return {"p": "p", "q": "wq", "all": "wqpr"}[self.kind]
def flatten(self) -> SinglePfLine:
"""Return flat instance, i.e., without children."""
return single.SinglePfLine(self)
@property
def volume(self) -> SinglePfLine:
"""Return (flattened) volume-only PfLine."""
return single.SinglePfLine({"q": self.q})
@property
def price(self) -> SinglePfLine:
"""Return (flattened) price-only PfLine."""
return single.SinglePfLine({"p": self.p})
def _set_col_val(self, col: str, val: pd.Series | Value) -> SinglePfLine:
"""Set or update a timeseries and return the modified instance."""
# Get pd.Series of other, in correct unit.
if isinstance(val, float) or isinstance(val, int):
val = pd.Series(val, self.index)
elif isinstance(val, Quantity):
val = pd.Series(val.magnitude, self.index, dtype=nits.g(val.units))
if self.kind == "all" and col == "r":
raise NotImplementedError(
"Cannot set `r`; first select `.volume` or `.price` before applying `.set_r()`."
)
# Create pd.DataFrame.
# TODO: Use InOp
data = {col: val.astype(nits.pintunit(nits.name2unit(col)))}
if col in ["w", "q", "r"] and self.kind in ["p", "all"]:
data["p"] = self["p"]
elif col in ["p", "r"] and self.kind in ["q", "all"]:
data["q"] = self["q"]
df = pd.DataFrame(data)
return single.SinglePfLine(df)
def set_w(self, w: Union[pd.Series, float, int, Quantity]) -> SinglePfLine:
"""Set or update power timeseries [MW]; returns modified (and flattened) instance."""
return self._set_col_val("w", w)
def set_q(self, q: Union[pd.Series, float, int, Quantity]) -> SinglePfLine:
"""Set or update energy timeseries [MWh]; returns modified (and flattened) instance."""
return self._set_col_val("q", q)
def set_p(self, p: Union[pd.Series, float, int, Quantity]) -> SinglePfLine:
"""Set or update price timeseries [Eur/MWh]; returns modified (and flattened) instance."""
return self._set_col_val("p", p)
def set_r(self, r: Union[pd.Series, float, int, Quantity]) -> SinglePfLine:
"""Set or update revenue timeseries [MW]; returns modified (and flattened) instance."""
return self._set_col_val("r", r)
def set_volume(self, other: PfLine) -> SinglePfLine:
"""Set or update volume information; returns modified (and flattened) instance."""
if not isinstance(other, PfLine) or other.kind != "q":
raise ValueError(
"Can only set volume from a PfLine instance with kind=='q'. Use .volume to obtain from given instance."
)
return self.set_q(other.q)
def set_price(self, other: PfLine) -> SinglePfLine:
"""Set or update price information; returns modified (and flattened) instance."""
if not isinstance(other, PfLine) or other.kind != "p":
raise ValueError(
"Can only set price from a PfLine instance with kind=='p'. Use .price to obtain from given instance."
)
return self.set_p(other.p)
def po(self: PfLine, freq: str = "MS") -> pd.DataFrame:
"""Decompose the portfolio line into peak and offpeak values. Takes simple averages
of volume [MW] and price [Eur/MWh] - does not hedge!
Parameters
----------
freq : {'MS' (months, default), 'QS' (quarters), 'AS' (years)}
Frequency of resulting dataframe.
Returns
-------
pd.DataFrame
The dataframe shows a composition into peak and offpeak values.
"""
if self.index.freq not in ["15T", "H"]:
raise ValueError(
"Only PfLines with (quarter)hourly values can be turned into peak and offpeak values."
)
if freq not in ["MS", "QS", "AS"]:
raise ValueError(
f"Value of paramater ``freq`` must be one of {'MS', 'QS', 'AS'} (got: {freq})."
)
prods = ("peak", "offpeak")
# Get values.
dfs = []
if "w" in self.available:
vals = convert.tseries2bpoframe(self.w, freq)
vals.columns = | pd.MultiIndex.from_product([vals.columns, ["w"]]) | pandas.MultiIndex.from_product |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import json
import pandas as pd
url = "https://glyconnect.expasy.org/api/glycosylations"
# In[2]:
## send the correct params to query the api
params = {'taxonomy':'Severe acute respiratory syndrome coronavirus 2 (2019-nCoV)', 'protein': 'Recombinant Spike glycoprotein (HEK293) - DRAFT DATA'}
# Severe acute respiratory syndrome coronavirus2 (2019-nCoV)&protein=Recombinant Spike glycoprotein (HEK293)
response = requests.get(url ,params=params)
# In[3]:
my_response = response.json()
df_dump = pd.DataFrame()
for r in range(len(my_response['results'])):
df_results_uniprots = | pd.DataFrame(my_response['results'][r]['protein']['uniprots'],index=[r]) | pandas.DataFrame |
#-*- coding: utf-8 -*-PART II
#使用K-Means算法聚类消费行为特征数据
"""
Created on Fri Dec 20 20:39:11 2019
@author: winhl
"""
import pandas as pd
inputfile = 'C:/Users/winhl/Downloads/kongtiao/喂丝间.xlsx'
#data=pd.DataFrame(columns=('时间','1#','2#','3#'))
df_tmp = []
for i in range(0,7,2):
temp1 = pd.read_excel(inputfile,nrows=100000,skiprows = 1,header=None,usecols=[i]) #读取数据
#data = pd.concat([data,temp1],axis=1)
df_tmp.append(temp1.values)
# del temp1
data = | pd.DataFrame(df_tmp) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import tensorflow_decision_forests as tfdf
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd
import gradio as gr
import urllib
input_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income"
input_column_header = "income_level"
#Load data
BASE_PATH = input_path
CSV_HEADER = [ l.decode("utf-8").split(":")[0].replace(" ", "_")
for l in urllib.request.urlopen(f"{BASE_PATH}.names")
if not l.startswith(b"|")][2:]
CSV_HEADER.append(input_column_header)
train_data = | pd.read_csv(f"{BASE_PATH}.data.gz", header=None, names=CSV_HEADER) | pandas.read_csv |
# -*- coding: utf-8 -*-
# import pytest
import pandas as pd
import pandas.testing as tm
import xnd
from pandas.core.internals import ExtensionBlock
import numpy as np
import xndframes as xf
TEST_ARRAY = ["Test", "string", None]
def test_constructors():
v1 = xf.XndframesArray(TEST_ARRAY)
assert isinstance(v1.dtype, xf.XndframesDtype)
v2 = xf.XndframesArray(np.array(TEST_ARRAY))
assert isinstance(v2.dtype, xf.XndframesDtype)
v3 = xf.XndframesArray(xnd.xnd(TEST_ARRAY))
assert isinstance(v3.dtype, xf.XndframesDtype)
def test_concatenate_blocks():
v1 = xf.XndframesArray(TEST_ARRAY)
sa = pd.Series(v1)
result = pd.concat([sa, sa], ignore_index=True)
EXPECTED_ARRAY = xnd.xnd(["Test", "string", None, "Test", "string", None])
expected = pd.Series(xf.XndframesArray(EXPECTED_ARRAY))
tm.assert_series_equal(result, expected)
def test_series_constructor():
v = xf.XndframesArray(TEST_ARRAY)
result = pd.Series(v)
assert result.dtype == v.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
def test_dataframe_constructor():
v = xf.XndframesArray(TEST_ARRAY)
df = | pd.DataFrame({"A": v}) | pandas.DataFrame |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '问题数量占比'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
# '患者基本信息': ['select count(distinct caseid) as num from overall where in_time is null or out_time is null','select count(distinct caseid) as num from overall'],
'入院时间': ['select count(distinct caseid) as num from overall where in_time is null ',
'select count(distinct caseid) as num from overall'],
'出院时间': ['select count(distinct caseid) as num from overall where out_time is null',
'select count(distinct caseid) as num from overall'],
'手术': ['select count(1) as num from oper2 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from oper2 '],
'给药': ['select count(1) as num from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ','select count(1) as num from ANTIBIOTICS '],
'入出转': ['select count(1) as num from DEPARTMENT where BEGINTIME is null or ENDTIME is null ','select count(1) as num from DEPARTMENT '],
'菌检出': ['select count(1) as num from BACTERIA where REQUESTTIME is null ','select count(1) as num from BACTERIA '],
'体温': ['select count(1) as num from TEMPERATURE where RECORDDATE is null ','select count(1) as num from TEMPERATURE '],
'药敏': ['select count(1) as num from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from DRUGSUSCEPTIBILITY '],
'检查': ['select count(1) as num from EXAM where EXAM_DATE is null ','select count(1) as num from EXAM '],
'生化': ['select count(1) as num from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from ROUTINE2 '],
'三管': ['select count(1) as num from TREATMENT1 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from TREATMENT1 '],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图一',bus)
return res_数据时间缺失及汇总
# 更新一级图一
@app.callback(
Output('first_level_first_fig','figure'),
Output('general_situation_first_level_first_fig_data','data'),
Input('general_situation_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(general_situation_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
if general_situation_first_level_first_fig_data is None:
general_situation_first_level_first_fig_data = {}
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
general_situation_first_level_first_fig_data = json.loads(general_situation_first_level_first_fig_data)
if db_con_url['hosname'] != general_situation_first_level_first_fig_data['hosname']:
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
first_level_first_fig_data = pd.read_json(general_situation_first_level_first_fig_data['first_level_first_fig_data'], orient='split')
general_situation_first_level_first_fig_data = dash.no_update
#
fig_概览一级_时间缺失 = make_subplots(specs=[[{"secondary_y": True}]])
res_数据时间缺失及汇总 = first_level_first_fig_data.sort_values(['问题数'], ascending=False)
# 各业务缺失数量--柱形图
fig_概览一级_时间缺失.add_trace(
go.Bar(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数'], name="问题数量",
marker_color=px.colors.qualitative.Dark24, ),
secondary_y=False,
)
# 各业务缺失数量占比--折线图
fig_概览一级_时间缺失.add_trace(
go.Scatter(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数量占比'], name="问题数量占比", ),
secondary_y=True,
)
# 设置X轴title
fig_概览一级_时间缺失.update_xaxes(tickangle=45,title_text="业务指标")
# 设置Y轴title
fig_概览一级_时间缺失.update_yaxes(title_text="缺失数量", secondary_y=False)
fig_概览一级_时间缺失.update_yaxes(title_text="缺失占比(%)", secondary_y=True)
# 设置水平图例及位置
fig_概览一级_时间缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# 设置图片边距
fig_概览一级_时间缺失.update_layout(margin=dict(l=20, r=20, t=20, b=20), )
return fig_概览一级_时间缺失,general_situation_first_level_first_fig_data
# 下载一级图一明细
@app.callback(
Output('first_level_first_fig_data_detail', 'data'),
Input('first_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
prevent_initial_call=True,
)
def download_first_level_first_fig_data_detail(n_clicks,db_con_url):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
engine = create_engine(db_con_url['db'])
bus_dic = {
'入院时间': 'select * from overall where in_time is null ',
'出院时间': 'select * from overall where out_time is null',
'手术': 'select * from oper2 where BEGINTIME is null or ENDTIME is null ',
'给药': 'select * from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ',
'入出转': 'select * from DEPARTMENT where BEGINTIME is null or ENDTIME is null ',
'菌检出': 'select * from BACTERIA where REQUESTTIME is null ',
'药敏': 'select * from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ',
'检查': 'select * from EXAM where EXAM_DATE is null',
'生化': 'select * from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ',
'三管': 'select * from TREATMENT1 where BEGINTIME is null or ENDTIME is null ',
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key],con=engine)
if temp.shape[0]>0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'],columns=[key])
error_df.to_excel(writer, sheet_name = key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务时间缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '关键字缺失占比'])
bus_dic = {'用药目的': [f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (GOAL is null or replace(GOAL,' ','') is null)",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'药敏结果': [f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null",
f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'手术名称': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (OPER_NAME is null or replace(OPER_NAME,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术切口等级': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'出入院科室': [f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' and ( IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null )",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( DEPT is null or replace(DEPT,' ','') is null)",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "]
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图二', bus)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('first_level_second_fig','figure'),
Output('general_situation_first_level_second_fig_data','data'),
Input('general_situation_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_second_fig(general_situation_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_second_fig_data is None:
general_situation_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
general_situation_first_level_second_fig_data = json.loads(general_situation_first_level_second_fig_data)
if db_con_url['hosname'] != general_situation_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps( general_situation_first_level_second_fig_data)
else:
if general_situation_first_level_second_fig_data['btime'] != btime or general_situation_first_level_second_fig_data['etime'] != etime:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(general_situation_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
general_situation_first_level_second_fig_data = dash.no_update
print("一级第二张图数据:")
print(first_level_second_fig_data)
fig_概览一级_关键字缺失 = make_subplots()
res_数据关键字缺失及汇总 = first_level_second_fig_data.sort_values(['关键字缺失占比'], ascending=False)
fig_概览一级_关键字缺失.add_trace(
go.Bar(x=res_数据关键字缺失及汇总['业务类型'], y=res_数据关键字缺失及汇总['关键字缺失占比'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览一级_关键字缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览一级_关键字缺失.update_yaxes(title_text="关键字缺失占比(%)")
fig_概览一级_关键字缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_关键字缺失,general_situation_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('first_level_second_fig_data_detail', 'data'),
Input('first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_second_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'用药目的': f"select * from ANTIBIOTICS where (GOAL is null or replace(GOAL,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'药敏结果': f"select * from drugsusceptibility where (SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null) and REQUESTTIME is not null and substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' ",
'手术名称': f"select * from oper2 where (OPER_NAME is null or replace(OPER_NAME,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}'",
'手术切口等级': f"select * from oper2 where (WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'出入院科室': f" select * from overall where (IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null) and in_time is not null and substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}' ",
'入出转科室': f"select * from department where (DEPT is null or replace(DEPT,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务关键字缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第三张图数据
def get_first_lev_third_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '科室信息映射问题占比'])
bus_dic = {'入院科室': [f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'出院科室': [
f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [
f" select count(1) as num from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'抗菌药物医嘱科室': [
f" select count(1) as num from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术科室': [
f" select count(1) as num from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from OPER2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'菌检出送检科室': [
f" select count(1) as num from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from BACTERIA where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'药敏送检科室': [
f" select count(1) as num from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from DRUGSUSCEPTIBILITY where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'体温科室': [
f" select count(1) as num from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}') ",
f"select count(1) as num from TEMPERATURE where substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' "],
'治疗科室': [
f" select count(1) as num from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from TREATMENT1 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'常规科室': [
f" select count(1) as num from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from ROUTINE2 where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0], con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1], con=engine)['num'][0]
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, count_时间为空, count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, -1, -1, -1]
return res_数据科室信息缺失及汇总
# 更新一级图三
@app.callback(
Output('first_level_third_fig','figure'),
Output('general_situation_first_level_third_fig_data','data'),
Input('general_situation_first_level_third_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(general_situation_first_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_third_fig_data is None:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data={}
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
general_situation_first_level_third_fig_data = json.loads(general_situation_first_level_third_fig_data)
if db_con_url['hosname'] != general_situation_first_level_third_fig_data['hosname']:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
if general_situation_first_level_third_fig_data['btime'] != btime or general_situation_first_level_third_fig_data['etime'] != etime:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
first_level_third_fig_data = pd.read_json(general_situation_first_level_third_fig_data['first_level_third_fig_data'], orient='split')
general_situation_first_level_third_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
res_数据科室信息缺失及汇总 = first_level_third_fig_data.sort_values(['科室信息映射问题占比'], ascending=False)
fig_概览一级_科室映射缺失.add_trace(
go.Bar(x=res_数据科室信息缺失及汇总['业务类型'], y=res_数据科室信息缺失及汇总['科室信息映射问题占比'], marker_color=px.colors.qualitative.Dark24 )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="科室信息映射问题占比(%)")
fig_概览一级_科室映射缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_科室映射缺失,general_situation_first_level_third_fig_data
# 下载一级图三明细
@app.callback(
Output('first_level_third_fig_data_detail', 'data'),
Input('first_level_third_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'入院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'出院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'入出转科室': f" select * from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and substr(t1.BEGINTIME,1,10) >='{btime}' and substr(t1.BEGINTIME,1,10) <='{etime}' ",
'抗菌药物医嘱科室': f" select * from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'手术科室': f" select * from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'菌检出送检科室': f" select * from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'药敏送检科室': f" select * from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'体温科室': " select * from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,10)>='{btime}' and substr(t1.RECORDDATE,1,10)<='{etime}') ",
'治疗科室': f" select * from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'常规科室': f" select * from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}科室映射缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概二级各业务逻辑问题数据
def get_second_level_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.DataFrame(columns=['问题数据数量', '问题'])
ques_dic = {
'出院时间小于等于入院时间' : f""" select count(1) from overall where in_time is not null and out_time is not null and in_time >= out_time and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')""",
'存在测试患者数据' : f""" select count(1) from overall where (pname like '%测试%' or pname like '%test%') and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'存在住院时长超四个月患者' : f""" select count(1) from overall where (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')
""",
'存在住院天数不足一天患者' : f""" select count(1) from overall where (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 ) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'转科时间在出入院时间之外' : f""" select count(1) from department t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'转入时间大于等于转出时间' : f""" select count(1) from department where BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗开始时间大于等于结束时间' : f""" select count(1) from TREATMENT1 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗时间在出入院时间之外' : f""" select count(1) from TREATMENT1 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'医嘱开始时间大于结束时间' : f""" select count(1) from ANTIBIOTICS where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}')""",
'医嘱时间在出入院时间之外' : f""" select count(1) from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'送检时间大于等于报告时间' : f""" select count(1) from BACTERIA where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}')""",
'送检时间在出入院时间之外' : f""" select count(1) from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'药敏送检时间大于等于报告时间' : f""" select count(1) from DRUGSUSCEPTIBILITY where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and ( substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' )""",
'药敏送检时间在出入院时间之外' : f""" select count(1) from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'手术开始时间大于结束时间' : f""" select count(1) from OPER2 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' )""",
'手术时间在出入院时间之外' : f""" select count(1) from OPER2 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'OPERID重复' : f""" select count(1) from oper2 where operid in (select operid from oper2 group by operid having count(operid)>1) and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' ) order by operid """,
'体温值异常' : f""" select count(1) from TEMPERATURE where (VALUE > 46 or VALUE < 34 or VALUE is null) and ( substr(RECORDDATE,1,7) >='{btime}' and substr(RECORDDATE,1,7) <='{etime}') """,
'体温测量时间在出入院时间之外' : f""" select count(1) from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and ( substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
'入出转入科时间重复': f""" select count(1) from department t1,
(select caseid ,begintime from department where substr(begintime,1,7)>='{btime}' and substr(begintime,1,7)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
}
for ques in ques_dic:
try:
ques_df = pd.read_sql(ques_dic[ques], con=engine)
ques_df.columns = ['问题数据数量']
ques_df['问题'] = ques
res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append( ques_df )
except:
res_业务逻辑问题数据汇总.loc[res_业务逻辑问题数据汇总.shape[0]] = [ -1 , ques ]
print('二级图 ' , ques)
return res_业务逻辑问题数据汇总
# def get_second_level_fig_date(engine,btime,etime):
# res_业务逻辑问题数据汇总 = pd.DataFrame(columns=['问题数据数量', '问题','month'])
# ques_dic = {
# '出院时间小于等于入院时间' : f""" select count(1) as 问题数据数量, '出院时间小于等于入院时间' as 问题, substr(in_time,1,7) as month from overall where in_time is not null and out_time is not null and in_time >= out_time group by substr(in_time,1,7) """,
# '存在测试患者数据' : f""" select count(1) as 问题数据数量, '存在测试患者数据' as 问题, substr(in_time,1,7) as month from overall where (pname like '%测试%' or pname like '%test%') group by substr(in_time,1,7) """,
# '存在住院时长超四个月患者' : f""" select count(1) as 问题数据数量, '存在住院时长超四个月患者' as 问题, substr(in_time,1,7) as month from overall where
# (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
# or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120))
# group by substr(in_time,1,7) )
# """,
# '存在住院天数不足一天患者' : f""" select count(1) as 问题数据数量, '存在住院天数不足一天患者' as 问题, substr(in_time,1,7) as month from overall where
# (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 )
# group by substr(in_time,1,7) """,
# '转科时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '转科时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from department t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '转入时间大于等于转出时间' : f""" select count(1) as 问题数据数量, '转入时间大于等于转出时间' as 问题, substr(t1.BEGINTIME,1,7) as month from department where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
#
# '治疗开始时间大于等于结束时间' : f""" select count(1) as 问题数据数量, '治疗开始时间大于等于结束时间' as 问题, substr(BEGINTIME,1,7) as month from TREATMENT1 where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME
# group by substr(BEGINTIME,1,7)
# """,
# '治疗时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '治疗时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from TREATMENT1 t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '医嘱开始时间大于结束时间' : f""" select count(1) as 问题数据数量, '医嘱开始时间大于结束时间' as 问题, substr(BEGINTIME,1,7) as month from ANTIBIOTICS where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
# '医嘱时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '医嘱时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '送检时间大于等于报告时间' : f""" select count(1) as 问题数据数量, '送检时间大于等于报告时间' as 问题, substr(REQUESTTIME,1,7) as month from BACTERIA where
# REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME
# group by substr( REQUESTTIME,1,7)
# """,
# '送检时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '送检时间在出入院时间之外' as 问题, substr(t1.REQUESTTIME,1,7) as month from BACTERIA t1,overall t2 where
# ( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
# group by substr(t1.REQUESTTIME,1,7)
# """,
# '药敏送检时间大于等于报告时间' : f""" select count(1) as 问题数据数量, '药敏送检时间大于等于报告时间' as 问题, substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where
# REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME
# group by substr( REQUESTTIME,1,7)
# """,
# '药敏送检时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '药敏送检时间在出入院时间之外' as 问题, substr( t1.REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY t1,overall t2 where
# ( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
# group by substr(t1.REQUESTTIME,1,7)
# """,
# '手术开始时间大于结束时间' : f""" select count(1) as 问题数据数量, '手术开始时间大于结束时间' as 问题, substr(BEGINTIME,1,7) as month from OPER2 where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
# '手术时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '手术时间在出入院时间之外' as 问题, substr( t1.BEGINTIME,1,7) as month from OPER2 t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# 'OPERID重复' : f""" select count(1) as 问题数据数量, 'OPERID重复' as 问题, substr(BEGINTIME,1,7) as month from oper2 where
# operid in (select operid from oper2 group by operid having count(operid)>1)
# group by substr( BEGINTIME,1,7)
# """,
# '体温值异常' : f""" select count(1) as 问题数据数量, '体温值异常' as 问题, substr(RECORDDATE,1,7) as month from TEMPERATURE where
# (VALUE > 46 or VALUE < 34 or VALUE is null) group by substr( RECORDDATE,1,7) """,
# '体温测量时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '体温测量时间在出入院时间之外' as 问题, substr(t1.RECORDDATE,1,7) as month from TEMPERATURE t1,overall t2 where
# ( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
# group by substr( t1.RECORDDATE,1,7)
# """,
# }
#
# for ques in ques_dic:
# try:
# # ques_df = pd.read_sql(ques_dic[ques], con=engine)
# # ques_df.columns = ['问题数据数量']
# # ques_df['问题'] = ques
# # res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append( ques_df )
# res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append(pd.read_sql(ques_dic[ques], con=engine) )
# except:
# res_业务逻辑问题数据汇总.loc[res_业务逻辑问题数据汇总.shape[0]] = [ -1 , ques ,]
# print('二级图 ' , ques)
# return res_业务逻辑问题数据汇总
# 获取概二级各业务逻辑问题明细数据数据
# 更新二级图
@app.callback(
Output('second_level_fig','figure'),
Output('general_situation_secod_level_fig_data','data'),
Input('general_situation_secod_level_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(general_situation_secod_level_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_secod_level_fig_data is None:
general_situation_secod_level_fig_data = {}
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split', date_format='iso')
general_situation_secod_level_fig_data['hosname'] = db_con_url['hosname']
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
general_situation_secod_level_fig_data = json.loads(general_situation_secod_level_fig_data)
if db_con_url['hosname'] != general_situation_secod_level_fig_data['hosname']:
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split',date_format='iso')
general_situation_secod_level_fig_data['hosname'] = db_con_url['hosname']
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
if general_situation_secod_level_fig_data['btime'] != btime or general_situation_secod_level_fig_data['etime'] != etime:
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split',date_format='iso')
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
second_level_fig_date = pd.read_json(general_situation_secod_level_fig_data['second_level_fig_date'], orient='split')
general_situation_secod_level_fig_data = dash.no_update
print('二级图数据:')
print(second_level_fig_date)
fig_概览二级 = second_level_fig_date
fig_概览二级_业务逻辑问题 = make_subplots()
fig_概览二级 = fig_概览二级.sort_values(['问题数据数量'],ascending=False)
fig_概览二级_业务逻辑问题.add_trace(
go.Bar(x=fig_概览二级['问题'], y=fig_概览二级['问题数据数量'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览二级_业务逻辑问题.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览二级_业务逻辑问题.update_yaxes(title_text="问题数据数量", )
fig_概览二级_业务逻辑问题.update_xaxes(title_text="业务问题", )
return fig_概览二级_业务逻辑问题,general_situation_secod_level_fig_data
# 下载二级图明细
@app.callback(
Output('second_level_fig_date_detail','data'),
Input('second_level_fig_data_detail_down', 'n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_second_level_fig(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
ques_dic = {
'出院时间小于等于入院时间': f""" select * from overall where in_time is not null and out_time is not null and in_time >= out_time and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}')""",
'存在测试患者数据': f""" select * from overall where (pname like '%测试%' or pname like '%test%') and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}') """,
'存在住院时长超四个月患者': f""" select * from overall where (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)) and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}')
""",
'存在住院天数不足一天患者': f""" select * from overall where (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 ) and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}') """,
'转科时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from department t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'转入时间大于等于转出时间': f""" select * from department where BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}') """,
'治疗开始时间大于等于结束时间': f""" select * from TREATMENT1 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}') """,
'治疗时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TREATMENT1 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'医嘱开始时间大于结束时间': f""" select * from ANTIBIOTICS where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}')""",
'医嘱时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'送检时间大于等于报告时间': f""" select * from BACTERIA where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and (substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}')""",
'送检时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}')
""",
'药敏送检时间大于等于报告时间': f""" select * from DRUGSUSCEPTIBILITY where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and ( substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' )""",
'药敏送检时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}')
""",
'手术开始时间大于结束时间': f""" select * from OPER2 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and ( substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' )""",
'手术时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from OPER2 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'OPERID重复': f""" select * from oper2 where operid in (select operid from oper2 group by operid having count(operid)>1) and ( substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ) order by operid """,
'体温值异常': f""" select * from TEMPERATURE where (VALUE > 46 or VALUE < 34 or VALUE is null) and ( substr(RECORDDATE,1,10) >='{btime}' and substr(RECORDDATE,1,10) <='{etime}') """,
'体温测量时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and ( substr(t1.RECORDDATE,1,10)>='{btime}' and substr(t1.RECORDDATE,1,10)<='{etime}')
""",
'体温测量时间在出入院时间之外': f""" select t1.* from department t1,
(select caseid ,begintime from department where substr(begintime,1,10)>='{btime}' and substr(begintime,1,10)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
'入出转入科时间重复': f""" select t1.* from department t1,
(select caseid ,begintime from department where substr(begintime,1,7)>='{btime}' and substr(begintime,1,7)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in ques_dic.keys():
try:
temp = pd.read_sql(ques_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}全院数据逻辑问题明细.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览三级第一张图数据
def get_third_level_first_fig_date(engine):
res_全业务 = pd.DataFrame(columns=['num', 'month', '业务类型'])
bus_dic = {
'入院人数':"select count(distinct caseid) as num ,substr(in_time,1,7) as month,'入院人数' as 业务类型 from overall where in_time is not null group by substr(in_time,1,7) having substr(in_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(in_time,1,7) >= '1990-01' order by substr(in_time,1,7)",
'出院人数':"select count(distinct caseid) as num ,substr(out_time,1,7) as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by substr(out_time,1,7) having substr(out_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(out_time,1,7) >= '1990-01' order by substr(out_time,1,7)",
'抗菌药物医嘱数':"select count( distinct CASEID||ORDERNO||ANAME ) as num ,substr(BEGINTIME,1,7) as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'手术台数':"select count( distinct CASEID||OPERID ) as num ,substr(BEGINTIME,1,7) as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'菌检出结果记录数':"select count( distinct CASEID||TESTNO||BACTERIA ) as num ,substr(REQUESTTIME,1,7) as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'药敏结果记录数':"select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,substr(REQUESTTIME,1,7) as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'体温测量数':"select count( distinct CASEID||RECORDDATE ) as num ,substr(RECORDDATE,1,7) as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) having substr(RECORDDATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(RECORDDATE,1,7) >= '1990-01' order by substr(RECORDDATE,1,7)",
'入出转记录数':"select count( distinct CASEID||BEGINTIME||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'常规结果记录数':"select count( distinct CASEID||TESTNO||RINDEX ) as num ,substr(REQUESTTIME,1,7) as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'影像检查记录数':"select count( distinct CASEID||EXAM_NO ) as num ,substr(EXAM_DATE,1,7) as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) having substr(EXAM_DATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(EXAM_DATE,1,7) >= '1990-01' order by substr(EXAM_DATE,1,7)",
'治疗记录数':"select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'中心静脉插管记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
'呼吸机记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
'泌尿道插管记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
}
for bus in bus_dic:
res_全业务 = res_全业务.append(pd.read_sql(bus_dic[bus],con=engine))
return res_全业务
# 获取概览三级第一张图数据
# def get_third_level_first_fig_date(engine,date_type):
# res_全业务 = pd.DataFrame(columns=['num', 'month', '业务类型'])
# if date_type == 'month':
# bus_dic = {
# '入院人数': "select count(distinct caseid) as num ,substr(in_time,1,7) as month,'入院人数' as 业务类型 from overall where in_time is not null group by substr(in_time,1,7) having substr(in_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(in_time,1,7) >= '1990-01' order by substr(in_time,1,7)",
# '出院人数': "select count(distinct caseid) as num ,substr(out_time,1,7) as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by substr(out_time,1,7) having substr(out_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(out_time,1,7) >= '1990-01' order by substr(out_time,1,7)",
# '抗菌药物医嘱数': "select count( distinct CASEID||ORDERNO||ANAME ) as num ,substr(BEGINTIME,1,7) as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '手术台数': "select count( distinct CASEID||OPERID ) as num ,substr(BEGINTIME,1,7) as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '菌检出结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA ) as num ,substr(REQUESTTIME,1,7) as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '药敏结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,substr(REQUESTTIME,1,7) as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '体温测量数': "select count( distinct CASEID||RECORDDATE ) as num ,substr(RECORDDATE,1,7) as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) having substr(RECORDDATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(RECORDDATE,1,7) >= '1990-01' order by substr(RECORDDATE,1,7)",
# '入出转记录数': "select count( distinct CASEID||BEGINTIME||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '常规结果记录数': "select count( distinct CASEID||TESTNO||RINDEX ) as num ,substr(REQUESTTIME,1,7) as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '影像检查记录数': "select count( distinct CASEID||EXAM_NO ) as num ,substr(EXAM_DATE,1,7) as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) having substr(EXAM_DATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(EXAM_DATE,1,7) >= '1990-01' order by substr(EXAM_DATE,1,7)",
# '治疗记录数': "select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '中心静脉插管记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# '呼吸机记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# '泌尿道插管记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# }
# for bus in bus_dic:
# temp = pd.read_sql(bus_dic[bus], con=engine)
# res_全业务 = res_全业务.append(temp)
# return res_全业务
# else:
# bus_dic = {
# '入院人数': "select count(distinct caseid) as num ,to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'入院人数' as 业务类型 from overall where in_time is not null group by to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '出院人数': "select count(distinct caseid) as num ,to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '抗菌药物医嘱数': "select count( distinct CASEID||ORDERNO||ANAME ) as num , to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '手术台数': "select count( distinct CASEID||OPERID ) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '菌检出结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA ) as num , to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '药敏结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '体温测量数': "select count( distinct CASEID||RECORDDATE ) as num ,to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '入出转记录数': "select count( distinct CASEID||BEGINTIME||DEPT ) as num , to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '常规结果记录数': "select count( distinct CASEID||TESTNO||RINDEX ) as num ,to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '影像检查记录数': "select count( distinct CASEID||EXAM_NO ) as num ,to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '治疗记录数': "select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '中心静脉插管记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '呼吸机记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '泌尿道插管记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# }
#
# for bus in bus_dic:
# temp = pd.read_sql(bus_dic[bus],con=engine)
# temp['month'] = temp['month'].str.replace('-','年') +'周'
# res_全业务 = res_全业务.append(temp)
# return res_全业务
# 三级第一张图更新
@app.callback(
Output('third_level_first_fig','figure'),
Output('general_situation_third_level_first_fig_data', 'data'),
Input('general_situation_third_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
Input('third_level_first_window_choice', 'value'),
# Input('third_level_first_date_type_choice', 'value'),
# prevent_initial_call=True,
)
# def update_third_level_first_fig(general_situation_third_level_first_fig_data,db_con_url,count_time,window,date_type):
def update_third_level_first_fig(general_situation_third_level_first_fig_data,db_con_url,count_time,window):
# print(date_type)
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_third_level_first_fig_data is None:
general_situation_third_level_first_fig_data = {}
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'week')
# general_situation_third_level_first_fig_data['week'] = third_level_first_fig_date.to_json(orient='split', date_format='iso')
# third_level_first_fig_date = get_third_level_first_fig_date(engine,'month')
# general_situation_third_level_first_fig_data['month'] = third_level_first_fig_date.to_json(orient='split',date_format='iso')
third_level_first_fig_date = get_third_level_first_fig_date(engine)
general_situation_third_level_first_fig_data['third_level_first_fig_date'] = third_level_first_fig_date.to_json(orient='split',date_format='iso')
general_situation_third_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_third_level_first_fig_data = json.dumps(general_situation_third_level_first_fig_data)
else:
general_situation_third_level_first_fig_data = json.loads(general_situation_third_level_first_fig_data)
if db_con_url['hosname'] != general_situation_third_level_first_fig_data['hosname']:
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'week')
# general_situation_third_level_first_fig_data['week'] = third_level_first_fig_date.to_json( orient='split', date_format='iso')
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'month')
# general_situation_third_level_first_fig_data['month'] = third_level_first_fig_date.to_json( orient='split', date_format='iso')
third_level_first_fig_date = get_third_level_first_fig_date(engine)
general_situation_third_level_first_fig_data[ 'third_level_first_fig_date'] = third_level_first_fig_date.to_json(orient='split', date_format='iso')
general_situation_third_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_third_level_first_fig_data = json.dumps( general_situation_third_level_first_fig_data)
else:
third_level_first_fig_date = pd.read_json(general_situation_third_level_first_fig_data['third_level_first_fig_date'],orient='split')
general_situation_third_level_first_fig_data = dash.no_update
# 布林图子图顺序
# bus = ['入院人数', '入出转记录数', '抗菌药物医嘱数', '手术台数', '菌检出结果记录数', '药敏结果记录数', '体温测量数', '常规结果记录数', '影像检查记录数', '治疗记录数']
bus = [ '抗菌药物医嘱数', '手术台数', '菌检出结果记录数', '药敏结果记录数', '体温测量数', '常规结果记录数', '影像检查记录数', '治疗记录数','中心静脉插管记录数','呼吸机记录数','出院人数','泌尿道插管记录数','入出转记录数','入院人数']
# print(third_level_first_fig_date)
fig = make_subplots(rows= 7 , cols=2, shared_xaxes=True)
# btime = pd.read_sql(f"select to_char(to_date('{btime}-01','yyyy-mm-dd'),'iyyy-iw') as week from dual",con=engine)['week'][0].replace('-','年')+'周' if date_type == 'week' else btime
# etime = pd.read_sql(f"select to_char(to_date('{etime}-01','yyyy-mm-dd'),'iyyy-iw') as week from dual",con=engine)['week'][0].replace('-','年')+'周' if date_type == 'week' else etime
for i in range(1, 8):
temp1 = bus[(i - 1) * 2]
temp2 = bus[i * 2 - 1]
df1 = third_level_first_fig_date[third_level_first_fig_date['业务类型'] == temp1]
df1 = df1[ (df1['month']>=btime) & (df1['month']<=etime) ]
df1 = df1.sort_values(['month'])
df2 = third_level_first_fig_date[third_level_first_fig_date['业务类型'] == temp2]
df2 = df2[ (df2['month'] >= btime) & (df2['month'] <= etime)]
df2 = df2.sort_values(['month'])
print(df1, df2)
fig.add_trace(
go.Scatter(x=df1['month'], y=df1['num'], name=bus[(i - 1) * 2]),
row=i, col=1
)
data = df1[['month', 'num']]
mean_data = np.array([data[i: i + window]['num'].mean() for i in range(len(data) - window + 1)]) # 计算移动平均线,转换为ndarray对象数据类型是为了更方便的计算上下轨线
std_data = np.array([data[i: i + window]['num'].std() for i in range(len(data) - window + 1)]) # 计算移动标准差
up_line = pd.DataFrame(mean_data + 2 * std_data, columns=['num']) # 上轨线
down_line = | pd.DataFrame(mean_data - 2 * std_data, columns=['num']) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
#Code starts here
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data['Better_Event'] = None
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
#print(top_10)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
set1 = set(top_10_summer)
set2 = set(top_10_winter)
set3 = set(top_10)
s1 = set1.intersection(set2)
common = list(s1.intersection(set3))
print(common)
# --------------
#Code starts here
import matplotlib.pyplot
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
#print(top_10)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Winter")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],winter_df['Total_Winter'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],top_df['Total_Medals'])
plt.show()
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pickle
import random
from datetime import datetime
import nltk
import numpy
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from app import db, create_app
import numpy as np
from random import randint
from matplotlib import pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import pandas as pd
import seaborn as sns
from joblib import dump, load
from app.base.db_models import ModelAPIMethods
from numpy import array
from numpy import argmax
from sklearn.preprocessing import OneHotEncoder
import mylib.data_manipulation.DataCoderProcessor
from app.base.db_models.ModelProfile import ModelProfile
from mylib.core.ModelProcessor import ModelProcessor
from mylib.data_manipulation.AdjustDataFrame import remove_null_values, encode_data_frame, encode_prediction_data_frame, \
decode_predicted_values, deletemodelfiles, encode_one_hot, encode_one_hot_input_features, convert_data_to_sample
from mylib.data_manipulation.DataCoderProcessor import DataCoderProcessor
from mylib.utiles.CVSReader import getcvsheader, get_new_headers_list, reorder_csv_file
from mylib.utiles.CVSReader import get_only_file_name
from mylib.db_helper.AttributesHelper import add_features, add_labels, delete_encoded_columns, get_model_id, \
encode_testing_features_values, get_features, get_labels, get_encoded_columns, add_api_details, \
update_api_details_id
import os
import pickle
from random import randint
from flask import current_app
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn import naive_bayes
from app import db
from app.base.db_models.ModelProfile import ModelProfile
from app.base.db_models.ModelLabels import ModelLabels
from app.base.db_models.ModelEncodedColumns import ModelEncodedColumns
from app.base.db_models.ModelFeatures import ModelFeatures
from app.base.db_models.ModelAPIDetails import ModelAPIDetails
from mylib.data_manipulation.AdjustDataFrame import remove_null_values
from mylib.db_helper.AttributesHelper import add_features, add_labels, delete_encoded_columns, get_model_id, \
encode_testing_features_values, get_features
from mylib.utiles.CVSReader import get_only_file_name
from mylib.utiles.CVSReader import getcvsheader, get_new_headers_list, reorder_csv_file
class ModelController:
def __init__(self):
''' Constructor for this class. '''
# Create some member animals
self.members = ['Tiger', 'Elephant', '<NAME>']
def saveDSFile(self):
return 'file uploaded successfully'
pkls_location = 'pkls/'
scalars_location = 'scalars/'
df_location = 'app/data/'
image_location = 'app/'
root_path = '../app/'
output_docs_location = 'app/base/output_docs/'
def run_prediction_model(root_path, csv_file_location, predicted_columns, ds_source, ds_goal, demo):
if demo == 'DEMO':
return run_demo_model(root_path, csv_file_location, predicted_columns, ds_source, ds_goal)
else:
return run_prod_model(root_path, csv_file_location, predicted_columns, ds_source, ds_goal)
def run_prod_model(root_path, csv_file_location, predicted_columns, ds_source, ds_goal):
# ------------------Preparing data frame-------------------------#
cvs_header = getcvsheader(csv_file_location)
new_headers_list = get_new_headers_list(cvs_header, predicted_columns)
reordered_data = reorder_csv_file(csv_file_location, new_headers_list)
data = | pd.read_csv(csv_file_location) | pandas.read_csv |
import re
from pathlib import Path
import json
import logging
import os
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from configparser import ConfigParser
from typing import List, Dict, Any
from datetime import datetime, timedelta
import dateutil
import requests
import pandas as pd
import sqlalchemy
from ratelimit import rate_limited
from peerscout.utils.collection import parse_list
from peerscout.utils.tqdm import tqdm
from peerscout.utils.requests import configure_session_retry
from peerscout.utils.threading import lazy_thread_local
from .convertUtils import unescape_and_strip_tags_if_not_none, flatten
from .preprocessingUtils import get_data_path
from ..shared.database import connect_managed_configured_database
from ..shared.app_config import get_app_config
LOGGER = logging.getLogger(__name__)
PERSON_ID = 'person_id'
DEFAULT_MAX_WORKERS = 10
DEFAULT_RATE_LIMIT_COUNT = 50
DEFAULT_RATE_LIMIT_INTERVAL_SEC = 1
DEFAULT_MAX_RETRY = 10
DEFAULT_RETRY_ON_STATUS_CODES = [429, 500, 502, 503, 504]
Person = Dict[str, Any]
PersonList = List[Person]
ENRICH_DATA_CONFIG_SECTION = 'enrich-data'
class Columns:
FIRST_NAME = 'first_name'
LAST_NAME = 'last_name'
ORCID = 'ORCID'
def get(url, session=None):
LOGGER.debug('requesting: %s', url)
response = (session or requests).get(url)
LOGGER.debug('response received: %s (%s)', url, response.status_code)
response.raise_for_status()
return response.text
def get_current_time():
return datetime.now()
def get_file_time(file_path):
return datetime.fromtimestamp(os.path.getmtime(file_path))
def is_file_expired(file_path, expire_after_secs):
return (
expire_after_secs and
get_file_time(file_path) + timedelta(seconds=expire_after_secs) <= get_current_time()
)
def create_cache(f, cache_dir, serializer, deserializer, suffix='', expire_after_secs=0):
"""
Simple cache implementation that makes it easy to inspect the cached response,
a file with the encoded url.
"""
cache_path = Path(cache_dir)
cache_path.mkdir(exist_ok=True, parents=True)
clean_pattern = re.compile(r'[^\w]')
def clean_fn(fn):
return clean_pattern.sub('_', fn)
def cached_f(*args):
cache_file = cache_path.joinpath(
Path(clean_fn(','.join([str(x) for x in args]))).name + suffix)
LOGGER.debug("filename: %s", cache_file)
if cache_file.is_file() and not is_file_expired(cache_file, expire_after_secs):
return deserializer(cache_file.read_bytes())
result = f(*args)
if result is not None:
cache_file.write_bytes(serializer(result))
return result
return cached_f
def str_serializer(x):
if isinstance(x, str):
return x.encode('utf-8')
return x
def str_deserializer(b):
return b.decode('utf-8')
def create_str_cache(*args, **kwargs):
return create_cache(*args, **kwargs, serializer=str_serializer, deserializer=str_deserializer)
def parse_datetime_object(datetime_obj):
if datetime_obj is None:
return None
return dateutil.parser.parse(datetime_obj.get('date-time'))
def extract_manuscript(item):
return {
'title': unescape_and_strip_tags_if_not_none(' '.join(item.get('title', []))),
'abstract': unescape_and_strip_tags_if_not_none(item.get('abstract', None)),
'doi': item.get('DOI', None),
'subject_areas': item.get('subject', []),
'created_timestamp': parse_datetime_object(item.get('created', None)),
'manuscript_type': unescape_and_strip_tags_if_not_none(item.get('type'))
}
def contains_author_with_orcid(item, orcid):
return True in [
author['ORCID'].endswith(orcid)
for author in item.get('author', [])
if 'ORCID' in author
]
def is_first_name(given_name, first_name):
return given_name == first_name or given_name.startswith(first_name + ' ')
def contains_author_with_name(item, first_name, last_name):
return len([
author
for author in item.get('author', [])
if is_first_name(author.get('given', ''), first_name) and
author.get('family', '') == last_name
]) > 0
def remove_duplicates(objs):
if len(objs) < 2:
return objs
return | pd.DataFrame(objs) | pandas.DataFrame |
import os
import datetime
import numpy as np
import pandas as pd
pd.set_option('mode.chained_assignment', None)
from sortasurvey import observing
def make_data_products(survey):
"""
After target selection process is complete, information is saved to several csvs.
All information is stored as attributes to the Survey class but running this function
synthesizes all the relevant information into easier-to-read files.
Parameters
----------
survey : survey.Survey
updated Survey class object containing algorithm selections
"""
if survey.emcee:
if survey.verbose and survey.progress:
survey.pbar.update(1)
if survey.n == survey.iter:
if survey.verbose:
print(" - %d MC steps completed"%(int(survey.n)))
print(" - algorithm took %d seconds to run"%(int(survey.ranking_time)))
else:
if survey.verbose:
print(" - algorithm took %d seconds to run"%(int(survey.ranking_time)))
if survey.save:
if not survey.emcee:
survey = make_directory(survey)
else:
if survey.n == 1:
survey = make_directory(survey)
if not os.path.exists('%s/%d/'%(survey.path_save,survey.n)):
os.makedirs('%s/%d/'%(survey.path_save,survey.n))
else:
return
survey = make_final_sample(survey)
survey = make_ranking_steps(survey)
survey = assign_priorities(survey)
# survey = final_costs(survey)
survey = program_overlap(survey)
get_stats(survey)
def make_directory(survey, i=1):
"""
Makes a directory for the output files
Parameters
----------
survey : survey.Survey
updated Survey class object containing algorithm selections
Returns
-------
survey : survey.Survey
updated Survey class object with the new 'path_save' attribute
"""
now = datetime.datetime.now()
name = now.strftime("%m-%d-%y")
newdir = '%s/%s-%d'%(survey.outdir,name,i)
if not os.path.exists(newdir):
os.makedirs(newdir)
else:
while os.path.exists(newdir):
i += 1
newdir = '%s/%s-%d'%(survey.outdir,name,i)
os.makedirs(newdir)
survey.path_save = newdir
return survey
def make_final_sample(survey, special=["SC2A", "SC4", "SC2Bii"],
cols_to_drop=['select_DG','TSM','SC3_bin_rank','drop','finish','false','n_select']):
"""
Makes a directory for the output files
Parameters
----------
survey : survey.Survey
updated Survey class object containing algorithm selections
Returns
-------
survey : survey.Survey
updated Survey class object with the new 'path_save' attribute
"""
for science in special:
if science in survey.programs.index.values.tolist():
if science == 'SC2A' or science == 'SC4':
# SC2A+SC4 have different observing approaches than a majority of TKS programs
changes = survey.df.query('in_%s == 1 and in_other_programs == 1'%science)
method = survey.programs.loc[science, 'method']
for i in changes.index.values.tolist():
df_temp = changes.loc[i]
nobs_goal = int(float((method.split('-')[1]).split('=')[-1]))
survey.df.loc[i, "nobs_goal"] = nobs_goal
tottime = observing.cost_function(df_temp, method, include_archival=False)
survey.df.loc[i, "tot_time"] = round(tottime/3600.,3)
remaining_nobs = int(nobs_goal - survey.df.loc[i, "nobs"])
if remaining_nobs < 0:
remaining_nobs = 0
survey.df.loc[i, "rem_nobs"] = remaining_nobs
lefttime = observing.cost_function(df_temp, method)
survey.df.loc[i, "rem_time"] = round(lefttime/3600.,3)
elif science == 'SC2Bii':
# we need to also add in our RM targets
for target in survey.programs.loc['SC2Bii', 'high_priority']:
survey.df.loc[survey.df['toi'] == target,'in_SC2Bii'] = 1
if np.isnan(survey.df.loc[survey.df['toi'] == target, 'priority'].values.tolist()[0]):
survey.df.loc[survey.df['toi'] == target, 'priority'] = survey.df['priority'].max()+1
start = np.array([0]*len(survey.df))
for science in survey.programs.index.values.tolist():
start += survey.df['in_%s'%science].values.tolist()
survey.df['in_other_programs'] = start
else:
# feel free to add other special cases here
pass
survey.df.drop(columns=cols_to_drop, errors='ignore', inplace=True)
if survey.verbose and not survey.emcee:
query_all = survey.df.query('in_other_programs != 0')
query_star = query_all.drop_duplicates(subset = 'tic')
print(' - %d targets were selected, containing a total of %d planets'%(len(query_star),len(query_all)))
print(' - Making data products, including:')
if survey.save:
if survey.emcee:
survey.df.to_csv('%s/%d/TOIs_perfect_final.csv'%(survey.path_save, survey.n), index=False)
else:
survey.df.to_csv('%s/%s_final.csv'%(survey.path_save, (survey.path_sample.split('/')[-1]).split('.')[0]), index=False)
if survey.verbose and not survey.emcee:
print(' - a copy of the updated sample')
survey.final = survey.df.copy()
return survey
def make_ranking_steps(survey):
"""
Saves every step of the target selection process, referred to as the 'track'
(and is actually the attribute it is saved as in the Survey). This is saved
as 'ranking_steps.csv' to the current run's 'path_save' directory.
Parameters
----------
survey : survey.Survey
Survey class object containing algorithm selections
Returns
-------
survey : survey.Survey
updated Survey class object with the new 'ranking_steps' attribute
"""
reorder = get_columns('track', survey.sciences.name.values.tolist())
track = | pd.DataFrame.from_dict(survey.track[survey.n], orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import datetime
def main():
base_path = 'data/train/'
for year in range(2015, 2022):
for month in range(1, 13):
print(year, month)
if len(str(month)) == 1:
month_str = '0' + str(month)
else:
month_str = str(month)
date_str = str(year) + month_str
if date_str == '202104':
break
final_path = base_path + 'CARD_SUBWAY_MONTH_' + date_str + '.csv'
target_path = base_path + 'formatted/CARD_SUBWAY_MONTH_' + date_str + '.csv'
df = pd.read_csv(final_path)
# Reformat data to datetime
df['date'] = df['사용일자'].apply(lambda d: datetime.datetime.strptime(str(d), '%Y%m%d').strftime('%m/%d/%Y'))
df.drop('사용일자', inplace=True, axis=1)
df = df[['date', '노선명', '역명', '승차총승객수', '하차총승객수', '등록일자']]
df.to_csv(final_path,encoding='utf-8-sig', index=False)
def remove_index():
base_path = 'data/train/'
for year in range(2015, 2022):
for month in range(1, 13):
print(year, month)
if len(str(month)) == 1:
month_str = '0' + str(month)
else:
month_str = str(month)
date_str = str(year) + month_str
if date_str == '202005':
break
final_path = base_path + 'CARD_SUBWAY_MONTH_' + date_str + '.csv'
target_path = base_path + 'formatted/CARD_SUBWAY_MONTH_' + date_str + '.csv'
df = pd.read_csv(final_path, index_col=0)
df.to_csv(final_path, index=False)
# Create csv for each month by averaging the on/off count for each station
def monthly_mean():
base_path = 'data/train/raw_data/'
for year in range(2015, 2022):
for month in range(1, 13):
print(year, month)
if len(str(month)) == 1:
month_str = '0' + str(month)
else:
month_str = str(month)
date_str = str(year) + month_str
if date_str == '202104':
break
final_path = base_path + 'CARD_SUBWAY_MONTH_' + date_str + '.csv'
target_path = base_path + 'monthly_mean/CARD_SUBWAY_MONTH_' + date_str + '.csv'
df = pd.read_csv(final_path)
station_names = df['역명'].unique()
mean_df = pd.DataFrame(columns = ['date','노선명','역명','승차총승객수','하차총승객수'])
# Loop through each station and calculate mean value
for station in station_names:
df_station = df[df['역명'] == station]
on_mean = df_station['승차총승객수'].mean()
off_mean =df_station['하차총승객수'].mean()
data = {
'date': datetime.datetime(year, month, 1),
'노선명': df_station['노선명'].unique()[0],
'역명': station,
'승차총승객수': on_mean,
'하차총승객수': off_mean
}
mean_df = mean_df.append(data, ignore_index=True)
mean_df.columns = ['date','노선명','역명','승차총승객수','하차총승객수']
mean_df.to_csv(target_path, encoding='utf-8-sig', index=False)
# Congregate monthly data into a single csv for each year.
def congregate_yearly():
base_path = 'data/train/'
for year in range(2015, 2022):
year_df = pd.DataFrame(columns = ['date','노선명','역명','승차총승객수','하차총승객수'])
for month in range(1, 13):
print(year, month)
# Building CSV path
if len(str(month)) == 1:
month_str = '0' + str(month)
else:
month_str = str(month)
date_str = str(year) + month_str
if date_str == '202104':
break
final_path = base_path + 'monthly_mean/CARD_SUBWAY_MONTH_' + date_str + '.csv'
target_path = base_path + 'yearly_congregated/CARD_SUBWAY_MONTH_' + str(year)
month_df = | pd.read_csv(final_path) | pandas.read_csv |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
def preprocess_data(train,test):
y=train['is_screener']
id_test=test['patient_id']
train=train.drop(['patient_id','is_screener'],axis=1)
test=test.drop(['patient_id'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir('/home/cuoco/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
train_ex_file=('../input/train_patients_to_exclude.csv.gz')
train_ex=pd.read_csv(train_ex_file,low_memory=False)
train=train[train.patient_id.isin(train_ex.patient_id)==False]
test_ex_file=('../input/test_patients_to_exclude.csv.gz')
test_ex=pd.read_csv(test_ex_file,low_memory=False)
test=test[test.patient_id.isin(test_ex.patient_id)==False]
print(train.shape,test.shape)
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine=pd.read_csv('../features/procedure_vaccine.csv.gz')
procedure_vagi=pd.read_csv('../features/procedure_vagi.csv.gz')
procedure_plan_type=pd.read_csv('../features/procedure_plan_type.csv.gz')
rx_payment=pd.read_csv('../features/rx_payment.csv.gz')
train_pract_screen_ratio=pd.read_csv('../features/train_pract_screen_ratio.csv.gz')
test_pract_screen_ratio=pd.read_csv('../features/test_pract_screen_ratio.csv.gz')
visits=pd.read_csv('../features/visits.csv.gz')
diagnosis_train_counts=pd.read_csv('../features/train_diagnosis_cbsa_counts.csv.gz')
#print (diagnosis_train_counts.shape)
#print(np.unique(len(diagnosis_train_counts['patient_id'])))
diagnosis_test_counts=pd.read_csv('../features/test_diagnosis_cbsa_counts.csv.gz')
state_screen_percent=pd.read_csv('../features/state_screen_percent.csv')
days_supply_distribution=pd.read_csv('../features/days_supply_distribution.csv')
surgical_procedure_type_code_counts_train=pd.read_csv('../features/surgical_procedure_type_code_counts_train.csv.gz')
print (surgical_procedure_type_code_counts_train.shape)
print(np.unique(len(surgical_procedure_type_code_counts_train['patient_id'])))
surgical_procedure_type_code_counts_test= | pd.read_csv('../features/surgical_procedure_type_code_counts_test.csv.gz') | pandas.read_csv |
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from finmarketpy.economics.techindicator import TechParams, TechIndicator
tech_params = TechParams(fillna=True, atr_period=14, sma_period=3,
green_n=4, green_count=9, red_n=2, red_count=13)
tech_ind = TechIndicator()
dates = pd.date_range(start='1/1/2018', end='1/08/2018')
def get_cols_name(n):
return ['Asset%d.close' % x for x in range(1, n + 1)]
def test_sma():
indicator_name = 'SMA'
# Test Case 1: constant prices
cols = get_cols_name(1)
data_df = pd.DataFrame(index=dates, columns=cols, data=1)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=1)
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 2: Normal case with one single security
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 3: Normal case with multiple securities
cols = get_cols_name(10)
col_prices = np.array(range(1, 9))
data_df = pd.DataFrame(index=dates, columns=cols, data=np.tile(col_prices, (len(cols), 1)).T)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 4: Decreasing price with multiple securities
cols = get_cols_name(10)
col_prices = np.array(range(8, 0, -1))
data_df = pd.DataFrame(index=dates, columns=cols, data=np.tile(col_prices, (len(cols), 1)).T)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 5: With SOME missing data
cols = get_cols_name(1)
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
data_df.iloc[3] = np.nan
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=[np.nan, np.nan, 2, 2.67, 3.67,
4.67, 6, 7])
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
assert_frame_equal(df.apply(lambda x: round(x, 2)), expected_df)
| assert_frame_equal(signal_df, expected_signal_df) | pandas.testing.assert_frame_equal |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy
def calculate(mylist):
return | pd.DataFrame(mylist[1:],columns=mylist[0]) | pandas.DataFrame |
import unittest
import pandas as pd
import pytest
import riptable as rt
# N.B. TL;DR We have to import the actual implementation module to override the module global
# variable "tm.N" and "tm.K".
# In pandas 1.0 they move the code from pandas/util/testing.py to pandas/_testing.py.
# The "import pandas.util.testing" still works but because it doesn't contain the actual code
# our attempt to override the "tm.N" and "tm.K" will not change the actual value for
# makeTimeDataFrame, which will produce data with different shape and make the test
# "test_accum_table" fail. Maybe we want to reconsider using the pandas internal testing utils.
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm
from riptable import *
from numpy.testing import (
assert_array_equal,
assert_almost_equal,
assert_array_almost_equal,
)
from riptable.rt_numpy import arange
# To create AccumTable test data
from riptable.Utils.pandas_utils import dataset_from_pandas_df
from riptable.rt_datetime import DateTimeNano
tm.N = 3
tm.K = 5
class Accum2_Test(unittest.TestCase):
'''
TODO: add more tests for different types
'''
def test_accum2(self):
c = cut(arange(10), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(10.0), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(11), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3])) == 0)
c = cut(FA([2, 4, 6, 8, 10]), FA([0, 2, 4, 6, 8, 10]))
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
def test_qcut(self):
c = qcut(arange(10), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4])) == 0)
c = qcut(arange(11), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4])) == 0)
c = qcut(range(5), 3, labels=["good", "medium", "bad"])
self.assertTrue(sum(c._np - FA([2, 2, 3, 4, 4])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
def test_cut_errors(self):
with self.assertRaises(ValueError):
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e', 'f'],
)
def test_simple_cats(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# no filter
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 7)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i], data[i])
def test_simple_cats_filter_accum(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered accum object
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_simple_cats_filter_operation(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered operation
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_multikey_cats(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted no filter
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], data[i])
# sorted no filter
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], sorted_data[i])
@pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')
def test_multikey_cats_filter_accum_sorted(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], sort_gb=True)
c2 = Categorical([unsorted_str, ints], sort_gb=True)
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
# TODO fix this regression that was masked due to duplicate test names
# self.assertAlmostEqual(arr[i + 1], sorted_data[i])
def test_multikey_cats_filter_accum_ordered(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], sorted_data[i])
def test_dataset_accum2(self):
# test from accum2 off dataset and with a filter
ds = Dataset({'test': arange(10), 'data': arange(10) // 2})
x = ds.accum2('data', 'test').sum(ds.test, filter=ds.data == 3)
totalcol = x.summary_get_names()[0]
self.assertEqual(x[totalcol][3], 13)
def test_accum2_mean(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).mean(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Mean']
for i in range(len(symbols)):
s_mean = ds[ds.symbol == symbols[i], :].time.mean()
self.assertEqual(footer[i + 1], s_mean)
for i in range(7):
s_mean = ds[ds.data == i, :].time.mean()
self.assertEqual(totalcol[i], s_mean)
def test_accum2_median(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).median(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Median']
for i in range(len(symbols)):
s_median = ds[ds.symbol == symbols[i], :].time.median()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = ds[ds.data == i, :].time.median()
self.assertEqual(totalcol[i], s_median)
def test_accum2_nanmedian_with_filter(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
# N.B. make a copy here for testing
symbol_categorical = Cat(1 + arange(200) % 5, symbols)
# N.B. Categorical.copy and Categorical constructor doesn't do deep copy?!
ds.symbol = Cat(1 + arange(200) % 5, symbols)
chosen_symbols = ['AMZN', 'AAPL']
filt = symbol_categorical.isin(chosen_symbols)
ac = Accum2(ds.data, ds.symbol)
stat1 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat1[stat1.summary_get_names()[0]]
footer = stat1.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
chosen_symbols = ['IBM', 'FB']
filt = symbol_categorical.isin(chosen_symbols)
stat2 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat2[stat2.summary_get_names()[0]]
footer = stat2.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
def test_showfilter_label_subclass(self):
d = Date.range('20190201', '20190210')
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, Date))
self.assertTrue(result.YLabel.isnan()[0])
d = DateTimeNano.random(10)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateTimeNano))
self.assertTrue(result.YLabel.isnan()[0])
d = DateSpan(arange(10, 20))
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateSpan))
self.assertTrue(result.YLabel.isnan()[0])
d = TimeSpan(np.random.rand(10) * 10_000_000_000)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, TimeSpan))
self.assertTrue(result.YLabel.isnan()[0])
def test_apply(self):
arrsize = 200
numrows = 7
ds = Dataset({'time': arange(arrsize * 1.0)})
ds.data = np.random.randint(numrows, size=arrsize)
ds.data2 = np.random.randint(numrows, size=arrsize)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(arrsize) % len(symbols), symbols)
ds.accum2('symbol', 'data').sum(ds.data2)
ds.accum2('symbol', 'data').sum(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=False)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=True)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=False)
f = logical(arange(200) % 2)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, filter=f)
ds.accum2('symbol', 'data').apply_reduce(
np.median, ds.data2, filter=f, showfilter=True
)
ds.accum2('symbol', 'data').median(ds.data2, filter=f, showfilter=True)
def test_apply_nonreduce(self):
arrsize = 200
numrows = 7
ds = rt.Dataset({'time': rt.arange(arrsize * 1.0)})
ds.data = arange(arrsize) % numrows
ds.data2 = (arange(arrsize) + 3) % numrows
symbols = [
'AAPL',
'AMZN',
'FB',
'GOOG',
'IBM',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'17',
'18',
]
ds.symbol = rt.Cat(1 + rt.arange(arrsize) % len(symbols), symbols)
result = ds.symbol.apply_reduce(
lambda x, y: np.sum(np.minimum(x, y)), (ds.data, ds.data)
)
ac = ds.accum2('symbol', 'data')
newds = ac.apply_nonreduce(np.cumsum)
ds2 = ac.apply_reduce(
lambda x, y: np.sum(np.maximum(x, y)), (newds.data, newds.data2)
)
x = np.maximum(newds.data, newds.data2)
y = ac.apply_nonreduce(
lambda x, y: np.maximum(x, y), (newds.data, newds.data2)
)[0]
self.assertTrue(np.all(x == y))
class AccumTable_Test(unittest.TestCase):
@pytest.mark.skip(reason="Test needs to be re-written to remove the np.random.seed usage -- it's not stable across numpy versions.")
def test_accum_table(self):
# Create the test data
def unpivot(frame):
N, K = frame.shape
data = {
'value': frame.values.ravel('F'),
'variable': np.asarray(frame.columns).repeat(N),
'date': np.tile(np.asarray(frame.index), K),
}
return | pd.DataFrame(data, columns=['date', 'variable', 'value']) | pandas.DataFrame |
'''
This file is used to extract features for gait classification. Machine learning model parameters are included.
Users will have to provide their own data and ground truths to train the model. Input data is raw accelerometer data
from wearable sensor on wrist location.
'''
import pandas as pd
from signal_preprocessing import preprocess
from features import signal_features as sf
import constants
def extract_gait_classification_features(window_data_df, channels, fs):
'''
Extract signal features applicable for gait classification for a given 3 second window of raw accelerometer data.
:param window_data_df: Pandas DataFrame with columns ['ts','x','y','z']
:param channels: Desired channels to run features on (Ex: ['x','y','z'])
:param fs: Sampling rate of raw accelerometer data (Float)
:return: DataFrame of calculated features on 3 second windows for given raw data
'''
features = pd.DataFrame()
# Compute signal entropy
feat_df_signal_entropy = sf.signal_entropy(window_data_df, channels)
# Compute correlation coefficient
feat_df_corr_coef = sf.correlation_coefficient(window_data_df, [['x_bp_filt_[0.25, 3.0]', 'y_bp_filt_[0.25, 3.0]'],
['x_bp_filt_[0.25, 3.0]', 'z_bp_filt_[0.25, 3.0]'],
['y_bp_filt_[0.25, 3.0]', 'z_bp_filt_[0.25, 3.0]']])
# Compute RMS
feat_df_signal_rms = sf.signal_rms(window_data_df, channels)
# Compute range
feat_df_signal_range = sf.signal_range(window_data_df, channels)
# Compute IQR of Autocovariance
feat_df_iqr_auto = sf.iqr_of_autocovariance(window_data_df, channels)
# Compute Dominant Frequency
sampling_rate = fs
frequncy_cutoff = 12.0
feat_df_dom_freq = sf.dominant_frequency(window_data_df, sampling_rate, frequncy_cutoff, channels)
# Compute mean cross rate
feat_df_mean_cross_rate = sf.mean_cross_rate(window_data_df, channels)
# Compute range count percentage
feat_df_range_count_percentage = sf.range_count_percentage(window_data_df, channels, min_value=-0.1, max_value=0.1)
features = features.join(feat_df_signal_entropy, how='outer')
features = features.join(feat_df_corr_coef, how='outer')
features = features.join(feat_df_signal_rms, how='outer')
features = features.join(feat_df_signal_range, how='outer')
features = features.join(feat_df_iqr_auto, how='outer')
features = features.join(feat_df_dom_freq, how='outer')
features = features.join(feat_df_mean_cross_rate, how='outer')
features = features.join(feat_df_range_count_percentage, how='outer')
return features
def build_gait_classification_feature_set(raw_accelerometer_data_df, fs):
'''
Pre-process raw accelerometer data and compute signal based features on data.
:param raw_accelerometer_data_df: Raw accelerometer data in a Pandas DataFrame wth columns = ['ts','x','y','z']
:param fs: Sampling rate of raw accelerometer data (Float)
:return: Pandas DataFrame of calculated features for given raw accelerometer data
'''
# Initialize final DataFrame
final_feature_cache = | pd.DataFrame() | pandas.DataFrame |
from nose.tools import *
from os.path import abspath, dirname, join
import numpy as np
import pandas as pd
from scipy.stats import norm, lognorm
import wntr
testdir = dirname(abspath(str(__file__)))
datadir = join(testdir,'..','..','tests','networks_for_testing')
packdir = join(testdir,'..','..','..')
FC1 = wntr.scenario.FragilityCurve()
FC1.add_state('Major', 2, {'Default': norm(loc=1, scale=2)})
FC1.add_state('Minor', 1, {'Default': norm(loc=0, scale=1)})
FC2 = wntr.scenario.FragilityCurve()
FC2.add_state('Minor', 1, {'Default': lognorm(0.25, loc=0, scale=1),
'3': lognorm(0.2, loc=0, scale=1)})
FC2.add_state('Major', 2, {'Default': lognorm(0.25, loc=1, scale=2)})
#x = np.linspace(-5,5,100)
#for name, state in FC2.states():
# dist=state.distribution['Default']
# plt.plot(x,dist.cdf(x), label=name)
#plt.ylim((0,1))
#plt.legend()
def test_get_priority_map():
priority_map = FC1.get_priority_map()
assert_dict_equal(priority_map, {None: 0, 'Minor': 1, 'Major': 2})
def test_cdf_probability():
x = pd.Series({'1': 0, '2': 1, '3': 2})
Pr = FC1.cdf_probability(x)
assert_equal(Pr.loc['1','Minor'], 0.5)
assert_less(Pr.loc['2','Minor']-0.841, 0.001)
assert_less(Pr.loc['3','Minor']-0.977, 0.001)
assert_equal(Pr.loc['2','Major'], 0.5)
def test_sample_damage_state():
x = | pd.Series({'1': 0, '2': 1, '3': 2}) | pandas.Series |
"""Data visualization functions"""
from fastapi import APIRouter, HTTPException, Depends
from pydantic import BaseModel
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from app.ml import City, validate_city
from app.data.files.state_abbr import us_state_abbrev as abbr
router = APIRouter()
MODEL_CSV = 'https://media.githubusercontent.com/media/CityScape-Datasets/Workspace_Datasets/main/Models/nn_model/nn_model.csv'
class CityData():
"""
Locates specific city data
- Demographics
- Employement -> industry, employment
- Crime -> violent crime, property crime
- Air Quality Index
"""
def __init__(self, current_city):
self.current_city = current_city
self.dataframe = pd.read_csv(MODEL_CSV)
self.subset = self.dataframe[self.dataframe['City'] == self.current_city.city]
def demographics(self):
return ['Hispanic', 'White', 'Black', 'Native', 'Asian', 'Pacific']
def industry(self):
return ['PrivateWork', 'PublicWork', 'SelfEmployed', 'FamilyWork']
def employment(self):
return ['Professional', 'Service', 'Office', 'Construction', 'Production']
def crime(self):
return ['Violent crime', 'Property crime', 'Arson']
def violent_crime(self):
return ['Murder and nonnegligent manslaughter','Rape', 'Robbery', 'Aggravated assault']
def property_crime(self):
return ['Burglary','Larceny- theft', 'Motor vehicle theft']
def air_quality_index(self):
return ['Days with AQI', 'Good Days', 'Moderate Days','Unhealthy for Sensitive Groups Days', 'Unhealthy Days','Very Unhealthy Days', 'Hazardous Days', 'Max AQI', '90th Percentile AQI', 'Median AQI', 'Days CO', 'Days NO2', 'Days Ozone', 'Days SO2', 'Days PM2.5', 'Days PM10']
@router.post("/api/demographics_graph")
async def demographics_plot(current_city:City):
"""
Visualize demographic information for city
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Demographics
city_demographics = city_data.subset[city_data.demographics()]
city_demographics['Not Specified'] = 100 - city_demographics.sum(axis=1) # Accounting for people that did not respond
melt = pd.melt(city_demographics)
melt.columns = ['demographic', 'percentage']
fig = px.pie(melt, values ='percentage', names ='demographic')
fig.update_layout(
title={
'text': f'Demographics in {city}',
'y':0.98,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.show()
return fig.to_json()
@router.post("/api/employment_graph")
async def employment_plot(current_city:City):
"""
Visualize employment information for city
- see industry breakdown and employment type
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Industry
industry_type = city_data.subset[city_data.industry()]
industry_melt = pd.melt(industry_type)
industry_melt.columns = ['industry', 'percentage']
# Employment Type
employment_type = city_data.subset[city_data.employment()]
type_melt = pd.melt(employment_type)
type_melt.columns = ['employment type', 'percentage']
#Create subplots
fig = make_subplots(rows=1, cols=2, subplot_titles = (f'Industry in {city}', f'Employment Types in {city}'))
fig.add_trace(go.Bar(x = industry_melt['industry'], y = industry_melt['percentage'],
marker = dict(color = industry_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 1)
fig.add_trace(go.Bar(x =type_melt['employment type'], y =type_melt['percentage'],
marker = dict(color = type_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 2)
fig.update_layout(
coloraxis=dict(colorscale = 'Bluered_r'),
coloraxis_showscale = False,
showlegend = False)
fig.show()
return fig.to_json()
@router.post("/api/crime_graph")
async def crime_plot(current_city:City):
"""
Visualize crime information for city
- see overall crime breakdown
- visualize breakdown of violent crime and property crime
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Crime Categories
crime_type = city_data.subset[city_data.crime()]
crime_melt = pd.melt(crime_type)
crime_melt.columns = ['categories', 'total']
# Violent Crime
violent_crime_type = city_data.subset[city_data.violent_crime()]
violent_crime_type_melt = pd.melt(violent_crime_type)
violent_crime_type_melt.columns = ['violent crime type', 'total']
# Property Crime
property_crime_type = city_data.subset[city_data.property_crime()]
property_crime_melt = pd.melt(property_crime_type)
property_crime_melt.columns = ['property crime type', 'total']
#Create subplots
fig = make_subplots(
rows=2, cols=2,
subplot_titles = (f"Crime Breakdown in {city}", f"Violent Crime Breakdown in {city}", f"Property Crime Breakdown in {city}"),
specs = [[{"type":"xy", 'rowspan':2}, {"type": "pie"}],
[None, {"type": "pie"}]],
)
fig.add_trace(go.Bar(name = 'Crime Types', x = crime_melt['categories'], y = crime_melt['total']),
row = 1, col = 1)
fig.add_trace(go.Pie(values = violent_crime_type_melt['total'],
labels = violent_crime_type_melt['violent crime type']),
row = 1, col = 2)
fig.add_trace(go.Pie(values = property_crime_melt['total'],
labels = property_crime_melt['property crime type']),
row = 2, col = 2)
fig.show()
return fig.to_json()
@router.post("/api/aqi_graph")
async def air_quality_plot(current_city:City):
"""
Visualize air quality information for city
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Air Quality
air_quality_details = city_data.subset[city_data.air_quality_index()]
air_quality_melt = pd.melt(air_quality_details)
air_quality_melt.columns = ['air quality indicators', 'days']
fig = make_subplots(rows = 1, cols = 1)
fig.add_trace(go.Bar(x = air_quality_melt['days'], y = air_quality_melt['air quality indicators'],
marker = dict(color = air_quality_melt['days'], coloraxis = "coloraxis"), orientation = 'h'))
fig.update_layout(
coloraxis=dict(colorscale = 'Viridis'),
coloraxis_showscale = False,
xaxis_range = [0, 360],
title={
'text': f'Air Quality in {city}',
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.show()
return fig.to_json()
POPULATION_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv'
FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_prediction.csv'
@router.post('/api/population_forecast_graph')
async def population_forecast_graph(city:City):
"""
Create visualization of historical and forecasted population
args:
- city: str -> The target city
- periods: int -> number of years to forecast for
returns:
Visualization of population forecast
- 10 year of historical data
- forecasts for number of years entered
"""
city = validate_city(city)
location = [city.city + ', ' + city.state]
# Historical population data
population = pd.read_csv(POPULATION_CSV)
population = population[population['City,State'].isin(location)]
population = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population.melt(id_vars=['City,State'], var_name='ds', value_name='y')
population_melt['ds'] = (population_melt['ds']).astype(int)
# Predictions
forecast = pd.read_csv(FORECAST_CSV)
predictions = forecast[forecast['City,State'].isin(location)][9:]
predictions['year'] = (predictions['year']).astype(int)
# Graph Data
ax = population_melt.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
predictions[['year', 'yhat']].plot(ax = ax, x = 'year', y = 'yhat', label = "Forecast")
# Fill to show upper and lower bounds
# Graph predictions including the upper and lower bounds
fig = go.Figure()
fig.add_trace(go.Scatter(
name = 'Original',
x = population_melt['ds'],
y = population_melt['y'],
fill = None,
mode = 'lines',
line_color = 'black',
showlegend = True
))
fig.add_trace(go.Scatter(
name = 'Forecast',
x = predictions['year'],
y = predictions['yhat'],
fill = None,
mode = 'lines',
line_color = 'red',
showlegend = True
))
fig.add_trace(go.Scatter(
name = 'Lower Bound',
x = predictions['year'],
y = predictions['yhat_lower'],
fill = None,
mode = 'lines',
line_color = 'gray'
))
fig.add_trace(go.Scatter(
name = 'Upper Bound',
x = predictions['year'],
y = predictions['yhat_upper'],
fill='tonexty',
mode='lines',
line_color = 'gray'
))
# Edit the layout
fig.update_layout({
'autosize':True,
'title': f'{location[0]} Population Forecast',
'title_x': 0.5,
'xaxis_title': 'Year',
'yaxis_title': 'Population'
})
fig.update_yaxes(automargin = True)
fig.update_xaxes(automargin = True, nticks=20)
fig.show()
return fig.to_json()
FMR_0 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr0.csv'
FMR_0_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr0_predictions.csv'
FMR_1 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr1.csv'
FMR_1_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr1_predictions.csv'
FMR_2 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr2.csv'
FMR_2_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr2_predictions.csv'
FMR_3 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr3.csv'
FMR_3_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr3_predictions.csv'
@router.post('/api/rental_forecast_graph')
def rental_forecast_graph(city:City, bed):
"""
Create visualization of historical and forecasted \n
Fair Market Rents for studios - 3 bedrooms
args:
- city: str -> The target city
- beds: int -> number of beds (0,1,2,3)
returns:
Visualization of Rental forecast
- 5 year of historical data
- 10 years forecasted data
"""
city = validate_city(city)
location = [city.city + ', ' + city.state]
if bed == "0":
RENTAL_CSV = FMR_0
RENTAL_FORECAST_CSV = FMR_0_FORECAST_CSV
elif bed == "1":
RENTAL_CSV = FMR_1
RENTAL_FORECAST_CSV = FMR_1_FORECAST_CSV
elif bed == "2":
RENTAL_CSV = FMR_2
RENTAL_FORECAST_CSV = FMR_2_FORECAST_CSV
else:
RENTAL_CSV = FMR_3
RENTAL_FORECAST_CSV = FMR_3_FORECAST_CSV
# Historical Rental data
rental = pd.read_csv(RENTAL_CSV)
rental = rental[rental['city, state'].isin(location)]
rental = rental.drop(columns = ['metro_code', 'state_alpha', 'areaname'])
rental_melt = rental.melt(id_vars=['city, state'], var_name='ds', value_name='y')
rental_melt['ds'] = | pd.to_datetime(rental_melt['ds']) | pandas.to_datetime |
from matplotlib import pyplot as plt
import pickle
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, \
classification_report
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import seaborn as sn
"""
Help functions including functions for saving the confusion matrix/scores, data scaling etc.
"""
CIC_col_names = ['Flow Duration', 'Tot Fwd Pkts', 'Tot Bwd Pkts', 'TotLen Fwd Pkts',
'TotLen Bwd Pkts', 'Fwd Pkt Len Max', 'Fwd Pkt Len Min', 'Fwd Pkt Len Mean', 'Fwd Pkt Len Std',
'Bwd Pkt Len Max',
'Bwd Pkt Len Min', 'Bwd Pkt Len Mean', 'Bwd Pkt Len Std', 'Flow Byts/s', 'Flow Pkts/s', 'Flow IAT Mean',
'Flow IAT Std',
'Flow IAT Max', 'Flow IAT Min', 'Fwd IAT Tot', 'Fwd IAT Mean', 'Fwd IAT Std', 'Fwd IAT Max', 'Fwd IAT Min',
'Bwd IAT Tot',
'Bwd IAT Mean', 'Bwd IAT Std', 'Bwd IAT Max', 'Bwd IAT Min', 'Fwd PSH Flags', 'Bwd PSH Flags',
'Fwd URG Flags',
'Bwd URG Flags', 'Fwd Header Len', 'Bwd Header Len', 'Fwd Pkts/s', 'Bwd Pkts/s', 'Pkt Len Min',
'Pkt Len Max',
'Pkt Len Mean', 'Pkt Len Std', 'Pkt Len Var', 'FIN Flag Cnt', 'SYN Flag Cnt', 'RST Flag Cnt',
'PSH Flag Cnt',
'ACK Flag Cnt', 'URG Flag Cnt', 'CWE Flag Count', 'ECE Flag Cnt', 'Down/Up Ratio', 'Pkt Size Avg',
'Fwd Seg Size Avg',
'Bwd Seg Size Avg', 'Fwd Byts/b Avg', 'Fwd Pkts/b Avg', 'Fwd Blk Rate Avg', 'Bwd Byts/b Avg',
'Bwd Pkts/b Avg',
'Bwd Blk Rate Avg', 'Subflow Fwd Pkts', 'Subflow Fwd Byts', 'Subflow Bwd Pkts', 'Subflow Bwd Byts',
'Init Fwd Win Byts',
'Init Bwd Win Byts', 'Fwd Act Data Pkts', 'Fwd Seg Size Min', 'Active Mean', 'Active Std', 'Active Max',
'Active Min',
'Idle Mean', 'Idle Std', 'Idle Max', 'Idle Min', 'Label']
CIC_types_dict = {'Flow Duration': int, 'Tot Fwd Pkts': int, 'Tot Bwd Pkts': int, 'TotLen Fwd Pkts': int,
'TotLen Bwd Pkts': int, 'Label': 'string'}
CIC_types_dict.update({col: float for col in CIC_col_names if col not in CIC_types_dict})
def saveConfMatrix(y_true, y_pred, filepath_csv, filepath_png):
cm = confusion_matrix(y_true, y_pred)
pd.DataFrame(cm).to_csv(filepath_csv)
# cm_display = ConfusionMatrixDisplay(cm).plot()
df_cfm = pd.DataFrame(cm, index=['0', '1'], columns=['0', '1'])
# plt.figure(figsize=(15, 12))
cfm_plot = sn.heatmap(df_cfm, annot=True, fmt='.1f')
cfm_plot.figure.savefig(filepath_png)
def saveScores(y_true, y_pred, filepath):
# print("Metrics")
report = classification_report(y_true, y_pred, output_dict=True)
stats = | pd.DataFrame(report) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 05:32:10 2021
@author: <NAME>
The following script analyzes in vivo field data for cannula infusion experiments.
To utilize this script, simply update the filepath with the folder to be analyzed
and the savepath with the folder to save the post-processed data to, then press run.
"""
from scipy.io import loadmat
import numpy as np
import pandas as pd
frequency_bands = np.array(['2-6', '6-12', '15-30', '40-70', '70-120'])
filepath = 'C:/Users/<NAME>/Desktop/WB4101 natcom temp/analysis_bla/exported/' # path to folder containing exported .mat files from MatWAND
savepath = 'C:/Users/<NAME>/Desktop/WB4101 natcom temp/analysis_bla/' # path to folder where post-processed data will be saved
# This code will iterate through each exported bandpass filtered .mat file in the folderpath and save the post-processed data across frequency bands in the savepath
# This code accepts 60 minute long bandpass filtered data (5 second bins [2.5 second overlap] = 24 datapoints per minute) exported from MatWAND.
# Importing data of a different duration or samplerate will result in misleading outputs.
for g in frequency_bands:
if g == '2-6' :
x = loadmat(filepath + 'bla_2_6 Hz_time.mat')
elif g == '6-12' :
x = loadmat(filepath + 'bla_6_12 Hz_time.mat')
elif g == '15-30' :
x = loadmat(filepath + 'bla_15_30 Hz_time.mat')
elif g == '40-70' :
x = loadmat(filepath + 'bla_40_70 Hz_time.mat')
elif g == '70-120' :
x = loadmat(filepath + 'bla_70_120 Hz_time.mat')
# restructure data into 4 treatments
powerarea = x['power_area']
base = {} # Baseline
inj1 = {} # Saline
inj2 = {} # Saline or Antagonist (depending on the experiment being analyzed)
inj3 = {} # Drug or Drug w/ Antagonist (depending on the experiment being analyzed)
power_area = {}
for i in range(powerarea.shape[0]) :
power_area[str(i)] = powerarea[i][:]
power_area[str(i)] = np.reshape(power_area[str(i)], (4, int(len(power_area[str(i)])/4)))
base[str(i)] = power_area[str(i)][0]
inj1[str(i)] = power_area[str(i)][1]
inj2[str(i)] = power_area[str(i)][2]
inj3[str(i)] = power_area[str(i)][3]
# Normalize to baseline and restructure into 1 minute bins
base_norm = {}
for key, value in base.items():
base_norm[str(key)] = value/np.mean(value)
base_norm[str(key)] = np.mean(base_norm[str(key)].reshape(-1,24), axis = 1)
inj1_norm = {}
for (key, value), (k, v) in zip(inj1.items(), base.items()):
inj1_norm[str(key)] = value/np.mean(v)
inj1_norm[str(key)] = np.mean(inj1_norm[str(key)].reshape(-1,24), axis = 1)
inj2_norm = {}
for (key, value), (k, v) in zip(inj2.items(), base.items()):
inj2_norm[str(key)] = value/np.mean(v)
inj2_norm[str(key)] = np.mean(inj2_norm[str(key)].reshape(-1,24), axis = 1)
inj3_norm = {}
for (key, value), (k, v) in zip(inj3.items(), base.items()):
inj3_norm[str(key)] = value/np.mean(v)
inj3_norm[str(key)] = np.mean(inj3_norm[str(key)].reshape(-1,24), axis = 1)
# export normalized timeseries data to excel
base_norm = pd.DataFrame(base_norm)
base_norm.to_excel(savepath + 'base_'+g+'.xlsx')
inj1_norm = pd.DataFrame(inj1_norm)
inj1_norm.to_excel(savepath + 'inj1_'+g+'.xlsx')
inj2_norm = pd.DataFrame(inj2_norm)
inj2_norm.to_excel(savepath + 'inj2_'+g+'.xlsx')
inj3_norm = pd.DataFrame(inj3_norm)
inj3_norm.to_excel(savepath + 'inj3_'+g+'.xlsx')
# Get average first 10 mins values for Two-Way ANOVA w/ multiple comparisons * [normalized data] *
base_mc = np.array([])
for key, value in base_norm.items():
base_mc = np.append(base_mc, np.mean(value[:(int(len(value)/6))]))
inj1_mc = np.array([])
for key, value in inj1_norm.items():
inj1_mc = np.append(inj1_mc, np.mean(value[:(int(len(value)/6))]))
inj2_mc = np.array([])
for key, value in inj2_norm.items():
inj2_mc = np.append(inj2_mc, np.mean(value[:(int(len(value)/6))]))
inj3_mc = np.array([])
for key, value in inj3_norm.items():
inj3_mc = np.append(inj3_mc, np.mean(value[:(int(len(value)/6))]))
if g == '2-6' :
slow_theta = np.concatenate(([base_mc], [inj1_mc], [inj2_mc], [inj3_mc]), axis=0)
elif g == '6-12' :
fast_theta = np.concatenate(([base_mc], [inj1_mc], [inj2_mc], [inj3_mc]), axis=0)
elif g == '15-30' :
beta = np.concatenate(([base_mc], [inj1_mc], [inj2_mc], [inj3_mc]), axis=0)
elif g == '40-70' :
slow_gamma = np.concatenate(([base_mc], [inj1_mc], [inj2_mc], [inj3_mc]), axis=0)
elif g == '70-120' :
fast_gamma = np.concatenate(([base_mc], [inj1_mc], [inj2_mc], [inj3_mc]), axis=0)
# Export average normalized first 10 mins values to excel
slow_theta = | pd.DataFrame(slow_theta) | pandas.DataFrame |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(random_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-random{i}', dataset_name,
f'data/graphs/random/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['random_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_erdos(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, erdos_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(erdos_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-erdos{i}', dataset_name,
f'data/graphs/erdos/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['erdos_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# print(f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites')
# print(dataset.edge_index.shape)
# print(dataset.edge_index)
# if last_edge is None:
# last_edge = dataset.edge_index
# continue
# print((1-last_edge.eq(last_edge).double()).sum())
# continue
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_degree_cat(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
e = num_edges
hubs_experiment = 'global_edges'
for i in range(inits):
for frm in range(0,100,percentile):
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_degree_cat/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_constant_nodes(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for frm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio}nodes_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_constant_nodes/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_attack_target(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for atkfrm in range(0,100,percentile):
for tgtfrm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
atkto = atkfrm + percentile
tgtto = tgtfrm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio:.3f}nodes_{i}_{hubs_experiment}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}', dataset_name,
f'data/graphs/injected_edges_attack_target/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['atkfrm'] = atkfrm
df_cur['atkto'] = atkto
df_cur['tgtfrm'] = tgtfrm
df_cur['tgtto'] = tgtto
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_sbm_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_label_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples,hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-label_sbm_{hubs_experiment}', dataset_name,
f'data/graphs/label_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_conf(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, conf_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(conf_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-confmodel{i}', dataset_name,
f'data/graphs/confmodel/{dataset_name}/{dataset_name}_confmodel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['confmodel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_shifting(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, shifting_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for change in 'CL':
for inc in [True, False]:
for r in [0.16,0.32,0.64]: #[0.02,0.04,0.08]:
for i in range(shifting_inits):
output_prefix = f'data/graphs/shifting/{dataset_name}/{dataset_name}_shifting'
output_suffix = '.cites'
graph_path = f'{output_prefix}_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}'
if not os.path.exists(graph_path):
print(f'File not found: {graph_path}')
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}_shifting_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}',
dataset_name, graph_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph_num'] = i
df_cur['inc'] = inc
df_cur['change'] = change
df_cur['r'] = r
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm_label(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm_label{i}', dataset_name,
f'data/graphs/sbm_label/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modcm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modcm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modcm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modcm{i}', dataset_name,
f'data/graphs/modcm/{dataset_name}/{dataset_name}_modcm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modcm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modsbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modsbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modsbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modsbm{i}', dataset_name,
f'data/graphs/modsbm/{dataset_name}/{dataset_name}_modsbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modsbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_reglabel(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, reglabel_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(reglabel_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-reglabel{i}', dataset_name,
f'data/graphs/reglabel/{dataset_name}/{dataset_name}_reglabel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['reglabel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
################## Synthetic part #####################################
def load_communities(path):
with open(path, 'rb') as handle:
ret = pickle.load(handle)
return ret
def load_labels(path):
label = {}
with open(path, 'r') as handle:
label = {}
for line in handle:
s = line.strip().split()
label[s[0]] = s[-1]
return label
def agg(x):
return len(x.unique())
def calc_uncertainty(df_community,dataset_name,labeled=False,seed=0):
if dataset_name == 'cora':
df_community.label = df_community.label.apply(lambda x : ''.join([c for c in x if c.isupper()]))
if labeled:
df_community = df_community[df_community[f'labeled{seed}']]
communities = df_community.community.unique()
labels = df_community.label.unique()
mtx = df_community.pivot_table(index='community', columns='label',values='node',aggfunc=agg).fillna(0) / len(df_community)
def Pmarg(c):
return len(df_community[df_community.community == c]) / len(df_community)
def Pcond(l,c):
return mtx.loc[c,l]/Pmarg(c)
H = 0
for c in communities:
h = 0
for l in labels:
if Pcond(l,c) == 0:
continue
h += Pcond(l,c) * np.log2(1./Pcond(l,c))
H += h * Pmarg(c)
def Pl(l):
return len(df_community[df_community.label == l]) / len(df_community)
Hl = 0
for l in labels:
if Pl(l) == 0:
continue
Hl += Pl(l) * np.log2(1./Pl(l))
IG = Hl-H
return IG/Hl
def eval_sbm_swap(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits, is_sbm):
step = 10
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = | pd.DataFrame() | pandas.DataFrame |
import IPython
import base64
import cv2
import json
import numpy as np
import pandas as pd
import pravega.grpc_gateway as pravega
from matplotlib import pyplot as plt
import time
def ignore_non_events(read_events):
for read_event in read_events:
if len(read_event.event) > 0:
yield read_event
def opencv_image_to_mpl(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
class StreamBase():
def __init__(self, pravega_client, scope, stream, create=False):
self.pravega_client = pravega_client
self.scope = scope
self.stream = stream
if create:
self.create_stream()
def create_stream(self, min_num_segments=1):
return self.pravega_client.CreateStream(pravega.pb.CreateStreamRequest(
scope=self.scope,
stream=self.stream,
scaling_policy=pravega.pb.ScalingPolicy(min_num_segments=min_num_segments),
))
def get_stream_info(self):
return self.pravega_client.GetStreamInfo(pravega.pb.GetStreamInfoRequest(
scope=self.scope,
stream=self.stream,
))
class OutputStream(StreamBase):
def __init__(self, pravega_client, scope, stream, create=True):
super(OutputStream, self).__init__(pravega_client, scope, stream, create)
def write_video_from_file(self, filename, crop=None):
cap = cv2.VideoCapture(filename)
video_frames = self.opencv_video_frame_generator(cap)
cropped_video_frames = (self.cropped_video_frame(f, crop) for f in video_frames)
events_to_write = self.video_frame_write_generator(cropped_video_frames)
write_response = self.pravega_client.WriteEvents(events_to_write)
return write_response
def opencv_video_frame_generator(self, vidcap):
while True:
pos_frames = vidcap.get(cv2.CAP_PROP_POS_FRAMES)
success, image = vidcap.read()
if not success:
return
video_frame = dict(
image=image,
frameNumber=int(pos_frames),
timestamp=int(time.time() * 1000),
)
yield video_frame
def cropped_video_frame(self, video_frame, crop):
if crop:
left, top, right, bottom = crop
video_frame['image'] = video_frame['image'][top:bottom, left:right]
return video_frame
def video_frame_write_generator(self, video_frame_iter, camera=0):
for video_frame in video_frame_iter:
event_dict = video_frame.copy()
event_dict['camera'] = camera
event_dict['ssrc'] = 0
success, png_array = cv2.imencode('.png', video_frame['image'])
event_dict['data'] = base64.b64encode(png_array.tobytes()).decode(encoding='UTF-8')
del event_dict['image']
to_log_dict = event_dict.copy()
to_log_dict['data'] = '(%d bytes)' % len(event_dict['data'])
# print('video_frame_write_generator: ' + json.dumps(to_log_dict))
event_json = json.dumps(event_dict)
event_bytes = event_json.encode(encoding='UTF-8')
event_to_write = pravega.pb.WriteEventsRequest(
scope=self.scope,
stream=self.stream,
event=event_bytes,
routing_key=str(camera),
)
yield event_to_write
class UnindexedStream(StreamBase):
def __init__(self, pravega_client, scope, stream):
super(UnindexedStream, self).__init__(pravega_client, scope, stream)
def read_events(self, from_stream_cut=None, to_stream_cut=None):
read_events_request = pravega.pb.ReadEventsRequest(
scope=self.scope,
stream=self.stream,
from_stream_cut=from_stream_cut,
to_stream_cut=to_stream_cut,
)
return ignore_non_events(self.pravega_client.ReadEvents(read_events_request))
def read_event_to_video_frame(self, read_event):
event_json = read_event.event
video_frame = json.loads(event_json)
image_png = base64.b64decode(video_frame['data'])
del video_frame['data']
image_png_array = np.frombuffer(image_png, dtype=np.uint8)
image_array = cv2.imdecode(image_png_array, cv2.IMREAD_UNCHANGED)
video_frame['image_array'] = image_array
video_frame['timestamp'] = pd.to_datetime(video_frame['timestamp'], unit='ms', utc=True)
return video_frame
def read_video_frames(self, from_stream_cut=None, to_stream_cut=None):
read_events = self.read_events(from_stream_cut, to_stream_cut)
return (self.read_event_to_video_frame(read_event) for read_event in read_events)
def play_video(self, from_stream_cut=None, to_stream_cut=None, show_frame_interval=1):
read_events = self.read_video_frames(from_stream_cut, to_stream_cut)
for i, video_frame in enumerate(read_events):
if i % show_frame_interval == 0:
IPython.display.clear_output(wait=True)
plt.title('frameNumber=%d, timestamp=%s' % (video_frame['frameNumber'], video_frame['timestamp']))
plt.imshow(opencv_image_to_mpl(video_frame['image_array']));
plt.show()
class IndexedStream():
def __init__(self, pravega_client, scope, stream, from_stream_cut=None, timestamp_col='timestamp'):
self.pravega_client = pravega_client
self.scope = scope
self.stream = stream
self.from_stream_cut = from_stream_cut
self.timestamp_col = timestamp_col
self.index_df = None
def build_index(self):
stream_info = self.pravega_client.GetStreamInfo(
pravega.pb.GetStreamInfoRequest(scope=self.scope, stream=self.stream))
# print('stream_info=%s' % str(stream_info))
from_stream_cut = stream_info.head_stream_cut if self.from_stream_cut is None else self.from_stream_cut
to_stream_cut = stream_info.tail_stream_cut
read_events_request = pravega.pb.ReadEventsRequest(
scope=self.scope,
stream=self.stream,
from_stream_cut=from_stream_cut,
to_stream_cut=to_stream_cut,
)
# print(read_events_request)
read_events = ignore_non_events(self.pravega_client.ReadEvents(read_events_request))
index_list = [self.read_event_to_index(read_event) for read_event in read_events]
df = | pd.DataFrame(index_list) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
from mock import MagicMock
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from nose.tools import assert_raises, assert_equals
from sparkmagic.livyclientlib.command import Command
import sparkmagic.utils.constants as constants
from sparkmagic.livyclientlib.sendpandasdftosparkcommand import (
SendPandasDfToSparkCommand,
)
def test_send_to_scala():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._scala_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._scala_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_r():
input_variable_name = "input"
input_variable_value = | pd.DataFrame({"A": [1], "B": [2]}) | pandas.DataFrame |
"""
Tests compressed data parsing functionality for all
of the parsers defined in parsers.py
"""
import os
from pathlib import Path
import zipfile
import pytest
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def buffer(request):
return request.param
@pytest.fixture
def parser_and_data(all_parsers, csv1):
parser = all_parsers
with open(csv1, "rb") as f:
data = f.read()
expected = parser.read_csv(csv1)
return parser, data, expected
@pytest.mark.parametrize("compression", ["zip", "infer", "zip2"])
def test_zip(parser_and_data, compression):
parser, data, expected = parser_and_data
with tm.ensure_clean("test_file.zip") as path:
with zipfile.ZipFile(path, mode="w") as tmp:
tmp.writestr("test_file", data)
if compression == "zip2":
with open(path, "rb") as f:
result = parser.read_csv(f, compression="zip")
else:
result = parser.read_csv(path, compression=compression)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("compression", ["zip", "infer"])
def test_zip_error_multiple_files(parser_and_data, compression):
parser, data, expected = parser_and_data
with tm.ensure_clean("combined_zip.zip") as path:
inner_file_names = ["test_file", "second_file"]
with zipfile.ZipFile(path, mode="w") as tmp:
for file_name in inner_file_names:
tmp.writestr(file_name, data)
with pytest.raises(ValueError, match="Multiple files"):
parser.read_csv(path, compression=compression)
def test_zip_error_no_files(parser_and_data):
parser, _, _ = parser_and_data
with tm.ensure_clean() as path:
with zipfile.ZipFile(path, mode="w"):
pass
with pytest.raises(ValueError, match="Zero files"):
parser.read_csv(path, compression="zip")
def test_zip_error_invalid_zip(parser_and_data):
parser, _, _ = parser_and_data
with | tm.ensure_clean() | pandas._testing.ensure_clean |
# coding: utf-8
# In[1]:
#first commit -Richie
import pandas as pd
import numpy as np
# In[2]:
data_message = | pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_message.tar.gz',compression='gzip') | pandas.read_csv |
import pandas as pd
import path_utils
from Evolve import Evolve, replot_evo_dict_from_dir
import traceback as tb
import os, json, shutil
import numpy as np
import matplotlib.pyplot as plt
import itertools
from copy import deepcopy
import pprint as pp
from tabulate import tabulate
import seaborn as sns
import shutil
import psutil, time
import ray
'''
This is very similar to Benchmark.py, but that one was designed (when I had a
previous setup in mind) to run a set of parameters MULTIPLE times each. I.e.,
it would create an Evolve object and do evo_obj.evolve() several times to create
a distribution. Now, there's no "time dependence", so we really just want to be
able to look at separate parameter settings, but only running them once each.
I'm also getting rid of the whole "solved" aspect for now because it's based
on numbers that are hard to explain, making it a bit pointless.
run_param_dict() is the most basic function, just doing an evolution for a passed
param_dict. Other functions basically involve calling it given various inputs.
'''
################################ Statistics functions
@path_utils.timer
def run_param_dict(param_dict, N_gen, N_trials, base_dir):
'''
Pass a single params dict to run an evolve() of, including the env_name.
Also pass an output_dir, or it will use the default output folder.
This only runs each setting ONCE.
'''
# deepcopy, just to be safer
params = deepcopy(param_dict)
assert 'env_name' in params.keys(), 'Must supply an env_name!'
env_name = params['env_name']
params.pop('env_name')
params['base_dir'] = base_dir
try:
# Run a single parameters setting
e = Evolve(env_name, **params)
evo_dict = e.evolve(N_gen, N_trials=N_trials, print_gen=True)
e.save_all_evo_stats(evo_dict, save_plots=True)
return evo_dict
except:
print(f'\n\nError in evolve with params: {params}. Traceback:\n')
print(tb.format_exc())
print('\n\nAttempting to continue...\n\n')
return {}
@ray.remote
def run_param_dict_wrapper(param_dict, N_gen, N_trials, base_dir):
# If a run_fname_label is provided, use that to create a more informative dir name.
# Otherwise, just use the date.
if 'run_fname_label' in param_dict.keys():
run_fname_label = param_dict['run_fname_label']
else:
run_fname_label = 'vary_params'
# Make plots for this params set
if 'run_plot_label' in param_dict.keys():
run_plot_label = param_dict['run_plot_label']
else:
run_plot_label = run_fname_label
# Run dir for this set of params
params_dir = os.path.join(base_dir, '{}_{}'.format(run_fname_label, path_utils.get_date_str()))
os.mkdir(params_dir)
# Doing this so it just saves directly to this dir, which has a more
# informative name than Evolve.__init__() would create.
param_dict['run_dir'] = params_dir
print('\n\nNow running with params:')
pp.pprint(param_dict, width=1)
print('\n\n')
stats_dict = run_param_dict(param_dict, N_gen, N_trials, base_dir)
return stats_dict
@path_utils.timer
def run_multi_envs(env_list, **kwargs):
'''
Iterates over a list of env names you give it,
running them and recording info.
'''
N_gen = kwargs.get('N_gen', 1000)
N_trials = kwargs.get('N_trials', 1000)
# Create dir for the results of this stats set.
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Dict to hold results on timing, etc.
stats_dict = {}
for env_name in env_list:
print(f'\nGetting stats for env {env_name} now...\n')
param_dict = deepcopy(kwargs)
param_dict['env_name'] = env_name
stats_dict[env_name] = run_param_dict(param_dict, N_gen, N_trials, stats_dir)
# Save distributions to file
with open(os.path.join(stats_dir, 'multi_env_stats.json'), 'w+') as f:
json.dump(stats_dict, f, indent=4)
def run_classic_control_envs(**kwargs):
'''
Loads gym_envs_info.json. This contains info about the envs we want to analyze.
It then calls run_multi_envs() for the classic control envs.
'''
with open(os.path.join(path_utils.get_src_dir(), 'gym_envs_info.json'), 'r') as f:
envs_dict = json.load(f)
env_list = [k for k,v in envs_dict.items() if v['env_type']=='classic_control']
print(f'Getting stats for: {env_list}')
run_multi_envs(env_list, **kwargs)
def run_param_dict_list(params_dict_list, **kwargs):
'''
Pass this a list of dicts, where each has the different parameters you want
to gather stats for.
It then iterates through this list, doing a run for each dict.
Note that it modifies the passed params_dict_list to add the results to it.
'''
# Create dir for the results of this stats run if one isn't provided.
stats_dir = kwargs.get('stats_dir', None)
if stats_dir is None:
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Produce results in parallel
for d in params_dict_list:
# For non-ray use
'''d['result'] = run_param_dict_wrapper( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)'''
# For use with ray
d['result_ID'] = run_param_dict_wrapper.remote( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)
# Retrieve results from ID
for d in params_dict_list:
d['stats_dict'] = ray.get(d['result_ID'])
d.pop('result_ID')
#d['stats_dict'] = d['result'] # for non-ray use
#d.pop('result')
# Return passed list, which should have dicts
# modified with the results
return params_dict_list
@path_utils.timer
def run_vary_params(constant_params_dict, vary_params_dict, **kwargs):
'''
This is a convenience function to easily vary parameters for analysis.
You pass it constant_params_dict, which is a dict with the values that
you want to remain constant between runs. Then, pass it vary_params_dict,
which should have each parameter that you want to vary as a list of the values
it should take.
Example:
constant_params_dict = {
'env_name' : 'CartPole-v0',
'N_gen' : 1000,
'N_dist' : 100,
'NN' : 'FFNN_multilayer'
}
vary_params_dict = {
'N_hidden_units' : [2, 4, 8],
'act_fn' : ['tanh', 'relu']
}
This will do 3*2 = 6 runs, for each of the combinations of varying parameters.
'''
# Create informative dir name
vary_params = list(vary_params_dict.keys())
stats_dir = os.path.join(
path_utils.get_output_dir(),
'Stats_vary_{}_{}'.format('_'.join(vary_params), path_utils.get_date_str()))
print(f'\nSaving statistics run to {stats_dir}')
os.mkdir(stats_dir)
# Create runs dir
all_runs_dir = os.path.join(stats_dir, 'all_runs')
print(f'\nSaving all runs to {all_runs_dir}')
os.mkdir(all_runs_dir)
# Create dict of const and vary params, as separate items
all_params = {
'const_params' : constant_params_dict,
'vary_params' : vary_params_dict
}
other_run_params = ['N_gen', 'N_trials']
for p in other_run_params:
if p in kwargs.keys():
all_params[p] = kwargs.get(p, None)
# Save params to file
with open(os.path.join(stats_dir, 'all_params.json'), 'w+') as f:
json.dump(all_params, f, indent=4)
# Flatten list, pass to other function
flat_param_list = vary_params_cross_products(constant_params_dict, vary_params_dict)
flat_param_list = run_param_dict_list(flat_param_list, stats_dir=all_runs_dir, **kwargs)
# Parse results
for d in flat_param_list:
# For now I'll still keep vary_params_stats.csv, but I think it's not
# actually necessary.
# Get rid of this now
d.pop('stats_dict')
# Save results to csv for later parsing/plotting
df = pd.DataFrame(flat_param_list)
print(tabulate(df, headers=df.columns.values, tablefmt='psql'))
df_fname = os.path.join(stats_dir, 'vary_params_stats.csv')
df.to_csv(df_fname, index=False)
################################# Plotting functions
def plot_all_agg_stats(stats_dir):
'''
For plotting all the heatmaps/etc for a stats_dir.
'''
agg_stats_dir = os.path.join(stats_dir, 'agg_stats')
if os.path.exists(agg_stats_dir):
shutil.rmtree(agg_stats_dir)
print(f'\nSaving all aggregate stats to {agg_stats_dir}')
os.mkdir(agg_stats_dir)
all_params_fname = os.path.join(stats_dir, 'all_params.json')
with open(all_params_fname, 'r') as f:
all_params_dict = json.load(f)
# Import all scores
all_scores_fname = os.path.join(stats_dir, 'all_scores.csv')
df = pd.read_csv(all_scores_fname)
vary_params_dict = all_params_dict['vary_params']
const_params_dict = all_params_dict['const_params']
vary_params = list(vary_params_dict.keys())
N_vary_params = len(vary_params)
# Only need to do if more than 2 params were varied.
if N_vary_params >= 2:
# Get envs info to find out percent of runs that "solved" the env.
with open(os.path.join(path_utils.get_src_dir(), 'gym_envs_info.json'), 'r') as f:
envs_dict = json.load(f)
# Create heatmap plots dir
heatmap_dir = os.path.join(agg_stats_dir, 'heatmap_plots')
if os.path.exists(heatmap_dir):
shutil.rmtree(heatmap_dir)
print(f'\nSaving heatmap plots to {heatmap_dir}')
os.mkdir(heatmap_dir)
# Iterate over all unique pairs of vary params, plot heatmaps of them
for pair in itertools.combinations(vary_params, 2):
print(f'Making heatmaps for {pair}')
other_params_flat = [(k, v) for k,v in vary_params_dict.items() if k not in pair]
other_params = [x[0] for x in other_params_flat]
other_vals = [x[1] for x in other_params_flat]
print(f'other params: {other_params}')
# Create dir for specific pivot
pivot_name = 'vary_{}_{}'.format(*pair)
pivot_dir = os.path.join(heatmap_dir, pivot_name)
os.mkdir(pivot_dir)
# Select for each of the combos of the other params.
for other_params_set in itertools.product(*other_vals):
# This part just selects for the rows that have the correct
# params/etc.
other_sel_dict = dict(zip(other_params, other_params_set))
fname_label = path_utils.param_dict_to_fname_str(other_sel_dict)
df_sel = df.loc[(df[list(other_sel_dict)] == pd.Series(other_sel_dict)).all(axis=1)]
df_no_scores = df_sel.drop('all_scores', axis=1)
#print(df_no_scores.columns.values)
df_params_only = df_no_scores.drop_duplicates()
all_row_dfs = []
# Iterate through table, for each run label, find its corresponding dir,
# walk through it, get all its scores, create a dataframe from them,
# then concatenate all these df's into a big one, that we can plot.
for index, row in df_params_only.iterrows():
# Only get the params varied, turn them into a dict
row_dict = row[df_no_scores.columns.values].to_dict()
df_row = df_sel.loc[(df[list(row_dict)] == pd.Series(row_dict)).all(axis=1)]
row_scores = df_row['all_scores'].values
row_dict['index'] = index
row_dict['mean_score'] = np.mean(row_scores)
row_dict['best_score'] = np.max(row_scores)
solved_reward = envs_dict[row_dict['env_name']]['solved_avg_reward']
N_solved_scores = np.sum(np.where(row_scores >= solved_reward))
row_dict['percent_solved_scores'] = N_solved_scores/len(row_scores)
# pandas has the nice perk that if you create a df from a dict where
# some of the entries are constants and one entry is a list, it duplicates
# the constant values.
row_df = pd.DataFrame(row_dict, index=[index])
all_row_dfs.append(row_df)
all_scores_df = pd.concat(all_row_dfs)
#print(tabulate(all_scores_df, headers=all_scores_df.columns.values, tablefmt='psql'))
heatmap_plot(all_scores_df, *pair, 'mean_score', pivot_dir, label=fname_label)
heatmap_plot(all_scores_df, *pair, 'best_score', pivot_dir, label=fname_label)
heatmap_plot(all_scores_df, *pair, 'percent_solved_scores', pivot_dir, label=fname_label)
#heatmap_plot(df_sel, *pair, perc_cutoff_str, pivot_dir, label=fname_label)
def heatmap_plot(df, xvar, yvar, zvar, output_dir, **kwargs):
#df = pd.read_csv(csv_fname)
df = df.pivot(yvar, xvar, zvar)
plt.close('all')
plt.figure()
ax = plt.gca()
label = kwargs.get('label', '')
sns.heatmap(df, annot=True, fmt=".1f", cmap='viridis', ax=ax)
ax.set_title(f'{zvar} for constant {label}')
plt.savefig(os.path.join(output_dir, f'vary_{xvar}_{yvar}__{zvar}_heatmap__const_{label}.png'))
if kwargs.get('show_plot', False):
plt.show()
def make_total_score_df(stats_dir):
# Load csv that holds the names of all the dirs
stats_overview_fname = os.path.join(stats_dir, 'vary_params_stats.csv')
overview_df = pd.read_csv(stats_overview_fname)
# unique fname labels in table
run_fname_labels = overview_df.run_fname_label.unique()
# Get the params that are varied
all_params_fname = os.path.join(stats_dir, 'all_params.json')
with open(all_params_fname, 'r') as f:
all_params_dict = json.load(f)
const_params = list(all_params_dict['const_params'].keys())
vary_params = list(all_params_dict['vary_params'].keys())
print(f'Params varied: {vary_params}')
all_row_dfs = []
runs_dir = os.path.join(stats_dir, 'all_runs')
# Iterate through table, for each run label, find its corresponding dir,
# walk through it, get all its scores, create a dataframe from them,
# then concatenate all these df's into a big one, that we can plot.
for index, row in overview_df.iterrows():
# Only get the params varied, turn them into a dict
#row_dict = row[vary_params].to_dict()
row_dict = row[vary_params + const_params].to_dict()
#row_dict = row.to_dict()
run_label = row['run_fname_label']
# Get the one dir that has the run_fname_label in its name
match_dirs = [x for x in os.listdir(runs_dir) if run_label in x]
assert len(match_dirs)==1, 'Must only have one dir matching label!'
vary_dir = match_dirs[0]
# Clumsy, but: walk through this dir until you find the evo_stats.json,
# then add its scores to the row_dict
for root, dirs, files in os.walk(os.path.join(runs_dir, vary_dir)):
if 'evo_stats.json' in files:
with open(os.path.join(root, 'evo_stats.json'), 'r') as f:
evo_dict = json.load(f)
row_dict['all_scores'] = evo_dict['all_scores']
# pandas has the nice perk that if you create a df from a dict where
# some of the entries are constants and one entry is a list, it duplicates
# the constant values.
row_df = pd.DataFrame(row_dict)
all_row_dfs.append(row_df)
all_scores_df = | pd.concat(all_row_dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""
:author: <NAME>
:url: https: // github.com / LiJinfen
"""
from bleach import clean, linkify
from flask import flash
from markdown import markdown
import json
import os
import collections as ct
import pickle
from textstat.textstat import textstat
from nltk.tokenize import sent_tokenize
import pandas as pd
import time
import random
import numpy as np
import nltk
from sklearn.metrics.pairwise import cosine_similarity
def to_html(raw):
allowed_tags = ['a', 'abbr', 'b', 'br', 'blockquote', 'code',
'del', 'div', 'em', 'img', 'p', 'pre', 'strong',
'span', 'ul', 'li', 'ol','h1','h2','h3']
allowed_attributes = ['src', 'title', 'alt', 'href', 'class']
html = markdown(raw, output_format='html',
extensions=['markdown.extensions.fenced_code',
'markdown.extensions.codehilite'])
clean_html = clean(html, tags=allowed_tags, attributes=allowed_attributes)
return linkify(clean_html)
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text, error)
)
def save_messages(messages,revised_messages):
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(APP_ROOT + "/static/downloads/")
filename = "%s-%s.xlsx" % ('messages', time.strftime('%Y%m%d'))
writer = | pd.ExcelWriter(filepath + filename,engine='xlsxwriter') | pandas.ExcelWriter |
import json
import pandas as pd
import plotly
import plotly.graph_objs as go
from flask import Flask, render_template, request
app = Flask(__name__)
data = pd.read_csv("items.csv")
data=data.drop([0,1,3,17,18],axis=0)
data=data.sort_values(by=['product_price'])
new_data=data[0:5]
DVDs=new_data.iloc[:,0]
Prices=new_data.iloc[:,1]
movies = pd.read_csv("movies.csv")
movies=movies.sort_values(by=['movie_rating'])
new_data=movies.drop([17,9,16,18,1,15,6,10,7,14],axis=0)
movies_name=new_data.iloc[:,0]
movies_rating=new_data.iloc[:,1]
prime = pd.read_csv("prime.csv")
prime=prime.drop([1,2,3,5,8,10,12,13],axis=0)
prime=prime.reindex([4, 15, 9, 6,11,0,14,7])
prime.index = range(8)
prime_name=prime.iloc[:,0]
prime_reviews=prime.iloc[:,1]
prime = pd.read_csv("editors.csv")
prime=prime.drop([3,6,12,15],axis=0)
df1=prime['Editor'].value_counts()
df2=pd.DataFrame(df1)
df2.reset_index(level=0, inplace=True)
editor_name=df2.iloc[:,0]
editor_occurence=df2.iloc[:,1]
@app.route('/')
def index():
feature = 'Amazon DVDs'
bar = create_plot(feature)
return render_template('index.html', plot=bar)
def create_plot(feature):
if feature == 'Amazon DVDs':
x = DVDs
y = Prices
df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
)
]
fig = go.Figure(data=[go.Bar(x=df['x'], y=df['y'],
)])
# Customize aspect
fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',
marker_line_width=3.5, opacity=0.6)
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.update_layout(title_text='DVDs sortis lors du dernier mois',
yaxis=dict(
title='Prix (dollars)',
titlefont_size=16,
tickfont_size=14,
))
fig.update_layout(
autosize=False,
width=1300,
height=700,
)
elif feature=='IMDB movies':
colors = ['lightslategray', ] * 5
x = movies_name
y = movies_rating
df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
)
]
fig = go.Figure(data=[go.Bar(x=df['x'], y=df['y'],
)])
# Customize aspect
fig.update_traces(marker_color='blue', marker_line_color='rgb(8,48,107)',
marker_line_width=3.5, opacity=0.6,),
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.update_layout(title_text='TOP 10 films de 2019',
yaxis=dict(
title='Metascore',
titlefont_size=16,
tickfont_size=14,
))
fig.update_layout(
autosize=False,
width=1300,
height=700,
)
elif feature == 'Amazon movies':
x = prime_name
y = prime_reviews
df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
)
]
fig = go.Figure(data=[go.Bar(x=df['x'], y=df['y'],
)])
# Customize aspect
fig.update_traces(marker_color='lightslategray', marker_line_color='rgb(8,48,107)',
marker_line_width=3.5, opacity=0.6)
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.update_layout(title_text='Films proposés par Amazon Prime',
yaxis=dict(
title='Nombre de reviews',
titlefont_size=16,
tickfont_size=14,
))
fig.update_layout(
autosize=False,
width=1300,
height=700,
)
elif feature == 'Amazon VGs':
x = editor_name
y = editor_occurence
df = | pd.DataFrame({'x': x, 'y': y}) | pandas.DataFrame |
Subsets and Splits