prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# Author: vkaff
# E-mail: <EMAIL>
import time
from sklearn.model_selection import train_test_split
from joblib import dump, load
import numpy as np
import pandas as pd
# We'll use this library to make the display pretty
from tabulate import tabulate
import os
from polygon_classification import param_tuning, config
from polygon_classification.features import Features
from polygon_classification.helpers import StaticValues
class StrategyEvaluator:
"""
This class implements the pipeline for various strategies.
"""
max_features_toshow = 10
def __init__(self):
pass
def hyperparamTuning(self, dataset, classifiers):
"""A complete process of distinct steps in figuring out the best ML algorithm with best hyperparameters to
polygon classification problem.
"""
pt = param_tuning.ParamTuning()
f = Features()
tot_time = time.time(); start_time = time.time()
Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)
print("Loaded train/test datasets in {} sec.".format(time.time() - start_time))
fX = f.build(Xtrain)
print("Build features from train data in {} sec.".format(time.time() - start_time))
start_time = time.time()
# 1st phase: find and fine tune the best classifier from a list of candidate ones
best_clf = pt.fineTuneClassifiers(fX, ytrain, classifiers)
estimator = best_clf['estimator']
print("Best hyperparams, {}, with score {}; {} sec.".format(
best_clf['hyperparams'], best_clf['score'], time.time() - start_time))
start_time = time.time()
# 2nd phase: train the fine tuned best classifier on the whole train dataset (no folds)
estimator = pt.trainClassifier(fX, ytrain, estimator)
print("Finished training model on dataset; {} sec.".format(time.time() - start_time))
start_time = time.time()
fX = f.build(Xtest)
print("Build features from test data in {} sec".format(time.time() - start_time))
start_time = time.time()
# 3th phase: test the fine tuned best classifier on the test dataset
res = pt.testClassifier(fX, ytest, estimator)
self._print_stats(best_clf['clf_name'], res['metrics'], res['feature_imp'], start_time)
print("The whole process took {} sec.".format(time.time() - tot_time))
def exec_classifiers(self, dataset):
"""Train and evaluate selected ML algorithms with custom hyper-parameters on dataset.
"""
f = Features()
pt = param_tuning.ParamTuning()
start_time = time.time()
Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)
print("Loaded train/test datasets in {} sec.".format(time.time() - start_time))
fX_train = f.build(Xtrain)
fX_test = f.build(Xtest)
print("Build features from train/test data in {} sec".format(time.time() - start_time))
for clf in config.MLConf.clf_custom_params:
print('Method {}'.format(clf))
print('=======', end='')
print(len(clf) * '=')
tot_time = time.time(); start_time = time.time()
# 1st phase: train each classifier on the whole train dataset (no folds)
# estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])
estimator = pt.clf_names[clf][0](random_state=config.seed_no)
estimator.set_params(**config.MLConf.clf_custom_params[clf])
estimator = pt.trainClassifier(fX_train, ytrain, estimator)
print("Finished training model on dataset; {} sec.".format(time.time() - start_time))
start_time = time.time()
# 2nd phase: test each classifier on the test dataset
res = pt.testClassifier(fX_test, ytest, estimator)
self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)
# if not os.path.exists('output'):
# os.makedirs('output')
# np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt="%u")
print("The whole process took {} sec.\n".format(time.time() - tot_time))
def train(self, dataset, classifiers):
"""A complete process of distinct steps in figuring out the best ML algorithm with optimal hyperparameters that
best fits to data at hand for the polygon classification problem.
Parameters
----------
dataset : str
Name of train dataset
classifiers : str
Comma separated classifiers to tune
"""
pt = param_tuning.ParamTuning()
f = Features()
tot_time = time.time(); start_time = time.time()
data_df = pd.read_csv(dataset)
ytrain = data_df['status']
Xtrain = data_df.drop('status', axis=1)
print("Loaded train dataset in {} sec.".format(time.time() - start_time))
fX = f.build(Xtrain)
print("Build features from train data in {} sec.".format(time.time() - start_time))
start_time = time.time()
# 1st phase: find and fine tune the best classifier from a list of candidate ones
best_clf = pt.fineTuneClassifiers(fX, ytrain, classifiers)
estimator = best_clf['estimator']
print("Best hyperparams for {}, {}, with score {}; {} sec.".format(
best_clf['clf_name'], best_clf['hyperparams'], best_clf['score'], time.time() - start_time))
# estimator = pt.trainClassifier(fX, ytrain, estimator)
os.makedirs(os.path.join(os.getcwd(), 'models'), exist_ok=True)
dump(estimator, os.path.join(os.getcwd(), 'models', best_clf['clf_name'] + '_model.joblib'))
print("The whole process took {} sec.".format(time.time() - tot_time))
return best_clf['clf_name']
def evaluate(self, dataset, classifier):
"""Evaluate the best ML algorithm with optimal hyperparameters to new unseen data.
Parameters
----------
dataset : str
Name of train dataset
classifier : str
Classifier to train and evaluate
"""
pt = param_tuning.ParamTuning()
f = Features()
tot_time = time.time(); start_time = time.time()
# Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)
data_df =
|
pd.read_csv(dataset)
|
pandas.read_csv
|
print("Loading...")
import sys
import numpy as np
from numpy import genfromtxt
import tkinter as tk
from tkinter import filedialog
import os
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal as signal
from scipy import interpolate
from scipy.stats import mode
from ipfx import feature_extractor
from ipfx import subthresh_features as subt
import pyabf
print("Load finished")
root = tk.Tk()
root.withdraw()
files = filedialog.askdirectory(
title='Select dir File'
)
root_fold = files
def crop_ap(abf):
spikext = feature_extractor.SpikeFeatureExtractor(filter=0, dv_cutoff=20)
dataT, dataV, dataI = abf.sweepX, abf.sweepY, abf.sweepC
spike_in_sweep = spikext.process(dataT, dataV, dataI)
sweep_indi = np.arange(0, dataV.shape[0])
if spike_in_sweep.empty == False:
ap_start_ = spike_in_sweep['threshold_index'].to_numpy()
ap_end_ = spike_in_sweep['trough_index'].to_numpy() + 300
pairs = np.vstack((ap_start_, ap_end_)).T
pair_data = []
for p in pairs:
temp = np.arange(p[0], p[1]).astype(np.int)
pair_data.append(temp.tolist())
pair_data = np.hstack(pair_data)
pair_data = pair_data[pair_data<dataV.shape[0]]
dataV[pair_data] = np.nan
sweep_data = dataV
else:
sweep_data = abf.sweepY
return sweep_data
def rmp_abf(abf, time=30, crop=True):
#try:
sweepsdata = []
for sweepNumber in abf.sweepList:
#f10 = int((abf.sweepLengthSec * .10) * 1000)
f10 = int((time) * 1000)
t1 = abf.dataPointsPerMs * f10
if t1 >= abf.sweepY.shape[0]:
t1 = abf.sweepY.shape[0] - 1
abf.setSweep(sweepNumber)
if crop == True:
data = crop_ap(abf)
else:
data = abf.sweepY
mean_vm = np.nanmean(data)
std_vm = np.nanstd(data)
mmode_vm = mode(data, nan_policy='omit')[0][0]
mean_vm = mmode_vm
f_vm = np.nanmean(data[:t1])
e_vm = np.nanmean(data[-t1:])
median_vm = np.nanmedian(data[:t1])
mode_vm = mode(data[:t1], nan_policy='omit')[0][0]
delta_vm = f_vm - e_vm
sweep_time = abf.sweepLengthSec
if abf.sweepLengthSec >= time:
f60 = abf.dataPointsPerMs * int((time) * 1000)
median_vm_last = np.nanmedian(abf.sweepY[-t1:])
mode_vm_last = mode(abf.sweepY[-t1:], nan_policy='omit')[0][0]
else:
mode_vm_last = mode_vm
median_vm_last= np.nanmedian(abf.sweepY)
#if mean_vm < -20 and mean_vm >-100:
sweepsdata.append(np.hstack((mean_vm, std_vm, f_vm, median_vm, mode_vm, e_vm, median_vm_last, mode_vm_last, delta_vm, sweep_time)))
sweep_full = np.vstack(sweepsdata)
df = pd.DataFrame(data=sweep_full, columns=[f'Overall Mean vm','Overall STD vm', f'first {time}s Mean Vm', f'first {time}s Median Vm',f'first {time}s Mode Vm', f'End {time}s Mean Vm', f'End {time}s median Vm', f'End {time}s mode Vm', 'Delta Vm', 'Length(s)'])
df['fold_name'] = np.full(sweep_full.shape[0], abf.abfFolderPath)
df['sweep number'] = abf.sweepList
df['cell_name'] = np.full(sweep_full.shape[0], abf.abfID)
return df
#except:
#return pd.DataFrame
def find_zero(realC):
#expects 1d array
zero_ind = np.where(realC == 0)[0]
##Account for time constant?
diff = np.diff(zero_ind)
if np.amax(diff) > 1:
diff_jump = np.where(diff>2)[0][0]
if diff_jump + 3000 > realC.shape[0]:
_hop = diff_jump
else:
_hop = diff_jump + 3000
zero_ind_crop = np.hstack((zero_ind[:diff_jump], zero_ind[_hop:]))
else:
zero_ind_crop = zero_ind
return zero_ind_crop
def compute_vm_drift(realY, zero_ind):
sweep_wise_mean = np.mean(realY[:,zero_ind], axis=1)
mean_drift = np.abs(np.amax(sweep_wise_mean) - np.amin(sweep_wise_mean))
abs_drift = np.abs(np.amax(realY[:,zero_ind]) - np.amin(realY[:,zero_ind]))
return mean_drift, abs_drift
def compute_rms(realY, zero_ind):
mean = np.mean(realY[:,zero_ind], axis=1)
rms = []
for x in np.arange(mean.shape[0]):
temp = np.sqrt(np.mean(np.square(realY[x,zero_ind] - mean[x])))
rms = np.hstack((rms, temp))
full_mean = np.mean(rms)
return full_mean, np.amax(rms)
def run_qc(realY, realC):
#try:
zero_ind = find_zero(realC[0,:])
mean_rms, max_rms = compute_rms(realY, zero_ind)
mean_drift, max_drift = compute_vm_drift(realY, zero_ind)
return [mean_rms, max_rms, mean_drift, max_drift]
#except:
# print("Failed to run QC on cell")
return [np.nan, np.nan, np.nan, np.nan]
filter = input("Filter (recommended to be set to 0): ")
braw = False
bfeat = True
try:
filter = int(filter)
except:
filter = 0
tag = input("tag to apply output to files: ")
try:
tag = str(tag)
except:
tag = ""
full_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import pickle
import pyranges as pr
from ATGC.model.CustomKerasModels import InputFeatures, ATGC
from ATGC.model.CustomKerasTools import BatchGenerator, Losses, histogram_equalization
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[6], True)
tf.config.experimental.set_visible_devices(physical_devices[6], 'GPU')
##your path to the files directory
path = 'ATGC/files/'
usecols = ['Hugo_Symbol', 'Chromosome', 'Start_Position', 'End_Position', 'Variant_Classification', 'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele2', 'Tumor_Sample_Barcode', 't_ref_count', 't_alt_count']
##your GENIE MAF
genie_maf = pd.read_csv('data_mutations_extended_8.3-consortium.txt', sep='\t',
usecols=usecols,
low_memory=False)
##your GENIE samples
genie_sample_table = pd.read_csv('tumor_normal.csv', sep=',', low_memory=False)
genie_sample_table.rename(columns={'sample_id': 'Tumor_Sample_Barcode'}, inplace=True)
genie_maf = genie_maf.loc[genie_maf['Tumor_Sample_Barcode'].isin(genie_sample_table['Tumor_Sample_Barcode'])]
genie_maf.reset_index(inplace=True, drop=True)
path_to_genome = path + 'chromosomes/'
chromosomes = {}
for i in list(range(1, 23))+['X', 'Y']:
with open(path_to_genome+'/'+'chr'+str(i)+'.txt') as f:
chromosomes[str(i)] = f.read()
##Use GFF3 to annotate variants
##ftp://ftp.ensembl.org/pub/grch37/current/gff3/homo_sapiens/
gff = pd.read_csv(path + 'Homo_sapiens.GRCh37.87.gff3',
sep='\t',
names=['chr', 'unknown', 'gene_part', 'start', 'end', 'unknown2', 'strand', 'unknown3', 'gene_info'],
usecols=['chr','gene_part', 'start', 'end', 'gene_info'],
low_memory=False)
gff_cds_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'CDS') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()
gff_exon_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'exon') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()
del gff
##make index column for merging
genie_maf['index'] = genie_maf.index.values
maf_pr = pr.PyRanges(genie_maf.loc[:, ['Chromosome', 'Start_Position', 'End_Position', 'index']].rename(columns={'Start_Position': 'Start', 'End_Position': 'End'}))
##used genie 8.3 panels, panel information can be obtained from https://www.synapse.org/#!Synapse:syn7844529
genie = pd.read_csv('genomic_information_8.3-consortium.txt', sep='\t', low_memory=False)
panels = genie.SEQ_ASSAY_ID.unique()
panel_df = pd.DataFrame(data=panels, columns=['Panel'])
total_sizes = []
cds_sizes = []
exon_sizes = []
panel_prs = []
for panel in panels:
if panel in genie_sample_table['seq_assay_id'].unique():
print(panel)
panel_pr = pr.PyRanges(genie.loc[(genie['SEQ_ASSAY_ID'] == panel) & genie['Chromosome'].isin(chromosomes), 'Chromosome':'End_Position'].rename(columns={'Start_Position': 'Start', 'End_Position': 'End'})).merge()
total_sizes.append(sum([i + 1 for i in panel_pr.lengths()]))
cds_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_cds_pr).lengths()]))
exon_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_exon_pr).lengths()]))
panel_prs.append(panel_pr)
grs = {k: v for k, v in zip(['CDS', 'exon'] + list(panels[np.isin(panels, genie_sample_table['seq_assay_id'].unique())]), [gff_cds_pr, gff_exon_pr] + panel_prs)}
result = pr.count_overlaps(grs, pr.concat({'maf': maf_pr}.values()))
result = result.df
genie_maf = pd.merge(genie_maf, result.iloc[:, 3:], how='left', on='index')
genie_maf = pd.merge(genie_maf, genie_sample_table, on='Tumor_Sample_Barcode')
def variant_features(maf, ref_length=6, alt_length=6, five_p_length=11, three_p_length=11):
refs = []
alts = []
five_ps = []
three_ps = []
if ref_length % 2 != 0:
ref_length += 1
print('Your ref length was not even, incrementing by 1.')
if alt_length % 2 != 0:
alt_length += 1
print('Your alt length was not even, incrementing by 1.')
for index, row in enumerate(maf.itertuples()):
Ref = row.Reference_Allele
Alt = row.Tumor_Seq_Allele2
Chr = str(row.Chromosome)
Start = row.Start_Position
End = row.End_Position
if
|
pd.isna(Alt)
|
pandas.isna
|
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def reverse_personality(nombre):
if nombre == 5:
nombre = 1
elif nombre == 4:
nombre = 2
elif nombre == 2:
nombre = 4
elif nombre == 1:
nombre = 5
return nombre
def reverse(nombre):
if nombre == 7:
nombre = 1
elif nombre == 6:
nombre = 2
elif nombre == 5:
nombre = 4
elif nombre == 4:
nombre = 3
elif nombre == 3:
nombre = 4
elif nombre == 2:
nombre = 6
elif nombre == 1:
nombre = 7
return nombre
df_fr = pd.read_csv("./data_fr.csv")
df_en = pd.read_csv("./data_en.csv")
df_fr_old_db = pd.read_csv("./data_fr_old_db.csv")
df_en_old_db = pd.read_csv("./data_en_old_db.csv")
df_en.columns = df_fr_old_db.columns
df_en_old_db.columns = df_fr_old_db.columns
df =
|
pd.concat([df_fr_old_db, df_fr, df_en, df_en_old_db])
|
pandas.concat
|
# coding=utf-8
import pandas as pd
from sqlalchemy import create_engine
from tabulate import tabulate
# pd.set_option('display.max_columns', None)
pd.set_option('display.width', 5000)
pd.set_option('display.max_columns', 60)
class PandasUtil(object):
"""
- Pandas Util
"""
def __init__(self, mysql_info: list):
self.mysql_engine = create_engine(
"mysql+mysqldb://{user}:{passwd}@{host}/{db}?charset=utf8".format(**mysql_info),
encoding='utf8', echo=False)
def read_sql(self, sql: str, index_col: str = 'date', columns: list = None) -> pd.DataFrame:
df = pd.read_sql(sql=sql, con=self.mysql_engine.raw_connection(), index_col=index_col, columns=columns)
return df
def to_sql(self, df: pd.DataFrame, table_name: str, index_label: str or [] = None, dtype: dict = None) -> None:
if dtype is None:
dtype = {}
if index_label is None or len(index_label) == 0:
df.to_sql(con=self.mysql_engine, name=table_name, if_exists='append', chunksize=10000, dtype=dtype,
index=False)
else:
df.to_sql(con=self.mysql_engine, name=table_name, if_exists='append', chunksize=10000, dtype=dtype,
index=True, index_label=index_label)
@staticmethod
def merge_all(df_list: list) -> pd.DataFrame:
if len(df_list) == 0:
return pd.DataFrame()
elif len(df_list) == 1:
return df_list[0]
else:
df = df_list.pop(0)
for df2 in df_list:
df = PandasUtil.merge(df, df2)
return df
@staticmethod
def merge(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
df2_columns = df2.columns.difference(df1.columns)
return pd.merge(df1, df2[df2_columns], left_index=True, right_index=True, how='outer')
@staticmethod
def create_dataframe(rows: list, indexes: list, column_names: list, column_prefix: str = '',
extra_column: tuple = None) -> pd.DataFrame:
if len(column_prefix) > 0:
_column_names = list(column_names)
column_names = []
for c in _column_names:
column_names.append('%s%s' % (column_prefix, c))
if len(rows) > 0 and extra_column:
key, value = extra_column[0], extra_column[1]
column_names.insert(0, key)
df =
|
pd.DataFrame(rows, index=indexes, columns=column_names)
|
pandas.DataFrame
|
import numpy as np
from scipy.optimize import minimize
import pandas as pd
import matplotlib.pyplot as plt
class garch:
def __init__(self, init_params, x, model = 'GARCH'):
self.model = model
self.params = init_params
self.x = x
def sgarch(self, init_params, x):
alpha0 = self.params[0]
alpha1 = self.params[1]
beta1 = self.params[2]
eps = self.x
iT = len(eps)
sigma_2 = np.zeros(iT)
for i in range(iT):
if i == 0:
sigma_2[i] = alpha0/(1 - alpha1 - beta1)
else:
sigma_2[i] = alpha0 + alpha1 * eps[i - 1]**2 + beta1 * sigma_2[i - 1]
return sigma_2
def gjr_garch(self, init_params, x):
alpha0 = self.params[0]
alpha1 = self.params[1]
beta1 = self.params[2]
omega = self.params[3]
eps = self.x
iT = len(eps)
sigma_2 = np.zeros(iT)
for i in range(iT):
if i == 0:
sigma_2[i] = alpha0/(1 - alpha1 - beta1)
else:
sigma_2[i] = alpha0 + alpha1 * eps[i - 1]**2 + beta1 * sigma_2[i - 1] + omega * eps[i - 1]**2 * (eps[i - 1] < 0)
return sigma_2
def loglike(self, init_params, x):
if self.model == 'GARCH':
sigma_2 = self.sgarch(init_params, x)
elif self.model == 'GJR-GARCH':
sigma_2 = self.gjr_garch(init_params, x)
logL = -np.sum(-np.log(sigma_2) - self.x**2/sigma_2)
return logL
def fit(self, init_params, x):
if self.model == 'GARCH':
res = minimize(self.loglike, init_params, args = (x, ),
bounds = ((0.0001, None), (0.0001, None), (0.0001, None)),
options = {'disp': True})
elif self.model == 'GJR-GARCH':
res = minimize(self.loglike, init_params, args = (x,),
bounds = ((0.0001, None), (0.0001, None), (0.0001, None), (0.0001, None)),
options = {'disp': True})
return res
if __name__ == '__main__':
print('A fit() függvénybe még van valami hiba!, így a modell nem tud lefutni')
df =
|
pd.read_csv('AMD.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from card_live_dashboard.model.RGIParser import RGIParser
RGI_DF = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', 'Perfect', 'class1; class2', 'gene1'],
['file1', 'Strict', 'class1; class2; class3', 'gene2'],
['file2', 'Perfect', 'class1; class2; class4', 'gene1'],
['file2', 'Perfect', 'class5', 'gene1'],
['file2', 'Perfect', '', 'gene1'],
['file3', None, None, None],
]
).set_index('filename')
RGI_PARSER = RGIParser(RGI_DF)
RGI_DF_NONE = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', None, '', None],
['file2', None, None, None]
]
).set_index('filename')
RGI_PARSER_NONE = RGIParser(RGI_DF_NONE)
RGI_DF_NA = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', None, '', pd.NA],
['file2', None, pd.NA, pd.NA]
]
).set_index('filename')
RGI_PARSER_NA = RGIParser(RGI_DF_NA)
RGI_DF_ONLY_EMPTY_STRING = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', None, '', pd.NA],
['file2', None, '', pd.NA]
]
).set_index('filename')
RGI_PARSER_ONLY_EMPTY_STRING = RGIParser(RGI_DF_ONLY_EMPTY_STRING)
RGI_DF_ONLY_NA= pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', None, pd.NA, pd.NA],
['file2', None, np.nan, pd.NA]
]
).set_index('filename')
RGI_PARSER_ONLY_NA = RGIParser(RGI_DF_ONLY_NA)
RGI_DF_ONLY_NUMPY_NAN = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[['file1', np.nan, np.nan, np.nan],
['file2', np.nan, np.nan, np.nan]
]
).set_index('filename')
RGI_PARSER_ONLY_NUMPY_NAN = RGIParser(RGI_DF_ONLY_NUMPY_NAN)
def test_all_drugs():
assert {'class1', 'class2', 'class3', 'class4', 'class5'} == RGI_PARSER.all_drugs()
def test_all_drugs_only_none():
assert set() == RGI_PARSER_NONE.all_drugs()
def test_all_drugs_only_na():
assert set() == RGI_PARSER_NA.all_drugs()
def test_all_drugs_only_empty_string():
assert set() == RGI_PARSER_ONLY_EMPTY_STRING.all_drugs()
def test_all_drugs_only_na_values():
assert set() == RGI_PARSER_ONLY_NA.all_drugs()
def test_all_drugs_empty():
rgi_df_empty = pd.DataFrame(
columns=['filename', 'rgi_main.Cut_Off', 'rgi_main.Drug Class', 'rgi_main.Best_Hit_ARO'],
data=[]
).set_index('filename')
rgi_parser_empty = RGIParser(rgi_df_empty)
assert set() == rgi_parser_empty.all_drugs()
def test_all_amr_genes():
assert {'gene1', 'gene2'} == RGI_PARSER.all_amr_genes()
def test_all_amr_genes_only_none():
assert set() == RGI_PARSER_NONE.all_amr_genes()
def test_all_amr_genes_only_na():
assert set() == RGI_PARSER_NA.all_amr_genes()
def test_expand_drug_class():
expanded_df = RGI_PARSER.explode_column('rgi_main.Drug Class')
assert 11 == len(expanded_df)
assert ['file1', 'file1', 'file1', 'file1', 'file1',
'file2', 'file2', 'file2', 'file2', 'file2', 'file3'] == expanded_df.index.tolist()
value_counts = expanded_df['rgi_main.Drug Class'].groupby('filename').value_counts()
assert 2 == value_counts['file1']['class1; class2']
assert 3 == value_counts['file1']['class1; class2; class3']
assert 3 == value_counts['file2']['class1; class2; class4']
assert 1 == value_counts['file2']['class5']
assert 'file3' not in value_counts
assert ['class1', 'class2', 'class1', 'class2', 'class3',
'class1', 'class2', 'class4', 'class5'] == expanded_df['rgi_main.Drug Class_exploded'].dropna().tolist()
assert pd.isna(expanded_df.loc['file3', 'rgi_main.Drug Class_exploded'])
def test_expand_drug_class_none():
expanded_df = RGI_PARSER_NONE.explode_column('rgi_main.Drug Class')
assert 2 == len(expanded_df)
assert ['file1', 'file2'] == expanded_df.index.tolist()
assert '' == expanded_df.loc['file1', 'rgi_main.Drug Class']
assert [] == expanded_df['rgi_main.Drug Class_exploded'].dropna().tolist()
assert pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class_exploded'])
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class_exploded'])
def test_expand_drug_class_na():
expanded_df = RGI_PARSER_NA.explode_column('rgi_main.Drug Class')
assert 2 == len(expanded_df)
assert ['file1', 'file2'] == expanded_df.index.tolist()
assert '' == expanded_df.loc['file1', 'rgi_main.Drug Class']
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class'])
assert [] == expanded_df['rgi_main.Drug Class_exploded'].dropna().tolist()
assert pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class_exploded'])
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class_exploded'])
def test_expand_drug_class_only_empty_string():
expanded_df = RGI_PARSER_ONLY_EMPTY_STRING.explode_column('rgi_main.Drug Class')
assert 2 == len(expanded_df)
assert ['file1', 'file2'] == expanded_df.index.tolist()
assert '' == expanded_df.loc['file1', 'rgi_main.Drug Class']
assert '' == expanded_df.loc['file2', 'rgi_main.Drug Class']
assert [] == expanded_df['rgi_main.Drug Class_exploded'].dropna().tolist()
assert pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class_exploded'])
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class_exploded'])
def test_expand_drug_class_only_na():
expanded_df = RGI_PARSER_ONLY_NA.explode_column('rgi_main.Drug Class')
assert 2 == len(expanded_df)
assert ['file1', 'file2'] == expanded_df.index.tolist()
assert pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class'])
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class'])
assert [] == expanded_df['rgi_main.Drug Class_exploded'].dropna().tolist()
assert pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class_exploded'])
assert pd.isna(expanded_df.loc['file2', 'rgi_main.Drug Class_exploded'])
def test_expand_drug_class_only_numpy_nan():
expanded_df = RGI_PARSER_ONLY_NUMPY_NAN.explode_column('rgi_main.Drug Class')
assert 2 == len(expanded_df)
assert ['file1', 'file2'] == expanded_df.index.tolist()
assert
|
pd.isna(expanded_df.loc['file1', 'rgi_main.Drug Class'])
|
pandas.isna
|
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
Age,
EmailAddressToDomain,
IsFreeEmailDomain,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives
)
def test_time_since():
time_since = TimeSince()
# class datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[,
times = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2019, 3, 1, 0, 0, 1, 0),
datetime(2019, 3, 1, 0, 2, 0, 0)])
cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, -120])
time_since = TimeSince(unit='nanoseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(round, values)) == [-1000, -1000000000, -120000000000])
time_since = TimeSince(unit='milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Years')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, 0, 0])
times_y = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2020, 3, 1, 0, 0, 1, 0),
datetime(2017, 3, 1, 0, 0, 0, 0)])
time_since = TimeSince(unit='Years')
values = time_since(array=times_y, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, 1])
error_text = 'Invalid unit given, make sure it is plural'
with pytest.raises(ValueError, match=error_text):
time_since = TimeSince(unit='na')
time_since(array=times, time=cutoff_time)
def test_age():
age = Age()
dates = pd.Series(datetime(2010, 2, 26))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.005] # .005 added due to leap years
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_two_years_quarterly():
age = Age()
dates = pd.Series(pd.date_range('2010-01-01', '2011-12-31', freq='Q'))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [9.915, 9.666, 9.414, 9.162, 8.915, 8.666, 8.414, 8.162]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_leap_year():
age = Age()
dates = pd.Series([datetime(2016, 1, 1)])
ages = age(dates, time=datetime(2016, 3, 1))
correct_ages = [(31 + 29) / 365.0]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
# born leap year date
dates = pd.Series([datetime(2016, 2, 29)])
ages = age(dates, time=datetime(2020, 2, 29))
correct_ages = [4.0027] # .0027 added due to leap year
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_nan():
age = Age()
dates = pd.Series([datetime(2010, 1, 1), np.nan, datetime(2012, 1, 1)])
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.159, np.nan, 8.159]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_week_no_deprecation_message():
dates = [datetime(2019, 1, 3),
datetime(2019, 6, 17, 11, 10, 50),
datetime(2019, 11, 30, 19, 45, 15)
]
with pytest.warns(None) as record:
week = Week()
week(dates).tolist()
assert not record
def test_url_to_domain_urls():
url_to_domain = URLToDomain()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org'])
correct_urls = ['play.google.com',
'mplay.google.co.in',
'lplay.google.co.in',
'play.google.co.in',
'tplay.google.co.in',
'google.co.in',
'google.co.in',
'google.com',
'compzets.com',
'compzets.com',
'facebook.com',
'compzets.net',
'featuretools.org']
np.testing.assert_array_equal(url_to_domain(urls), correct_urls)
def test_url_to_domain_long_url():
url_to_domain = URLToDomain()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['chart.apis.google.com']
results = url_to_domain(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_domain_nan():
url_to_domain = URLToDomain()
urls = pd.Series(['www.featuretools.com', np.nan], dtype='object')
correct_urls = pd.Series(['featuretools.com', np.nan], dtype='object')
results = url_to_domain(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_urls():
url_to_protocol = URLToProtocol()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'https://featuretools.com'])
correct_urls = pd.Series(['https',
'http',
'http',
np.nan,
'http',
'https',
np.nan,
np.nan,
'https',
'http',
'https'])
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_long_url():
url_to_protocol = URLToProtocol()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['http']
results = url_to_protocol(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_protocol_nan():
url_to_protocol = URLToProtocol()
urls = pd.Series(['www.featuretools.com', np.nan, ''], dtype='object')
correct_urls = pd.Series([np.nan, np.nan, np.nan], dtype='object')
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_tld_urls():
url_to_tld = URLToTLD()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.dev?asd=10',
'www.compzets.com?asd=10',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'featuretools.org'])
correct_urls = ['com',
'in',
'in',
'in',
'in',
'in',
'in',
'com',
'dev',
'com',
'net',
'org',
'org']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_long_url():
url_to_tld = URLToTLD()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['com']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_nan():
url_to_tld = URLToTLD()
urls = pd.Series(['www.featuretools.com', np.nan, 'featuretools', ''], dtype='object')
correct_urls = pd.Series(['com', np.nan, np.nan, np.nan], dtype='object')
results = url_to_tld(urls)
pd.testing.assert_series_equal(results, correct_urls, check_names=False)
def test_is_free_email_domain_valid_addresses():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', 'free<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_valid_addresses_whitespace():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([' <EMAIL>', ' <EMAIL>', '<EMAIL> ', ' <EMAIL> '])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, '<EMAIL>', '<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_string():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['', '<EMAIL>', '<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_series():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_invalid_email():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, 'this is not an email address', '<EMAIL>', '<EMAIL>', 1234, 1.23, True])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan, False, True, np.nan, np.nan, np.nan])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_all_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, np.nan])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan], dtype=object)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_valid_addresses():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(['hotmail.com', 'featuretools.com', 'yahoo.com', 'gmail.com'])
|
pd.testing.assert_series_equal(answers, correct_answers)
|
pandas.testing.assert_series_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created in September 2020
@author: karliskanders
Functions and classes for generating and analysing career transition recommendations
"""
import pandas as pd
import numpy as np
import pickle
from time import time
import yaml
import os
from ast import literal_eval
from sklearn.preprocessing import normalize
from scipy.spatial.distance import cdist, cosine
from scipy.stats import wilcoxon
from collections import defaultdict
import mapping_career_causeways
import mapping_career_causeways.compare_nodes_utils as compare_nodes_utils
import mapping_career_causeways.load_data_utils as load_data
from mapping_career_causeways.scripts import pickle_large_files
find_closest = compare_nodes_utils.find_closest
useful_paths = mapping_career_causeways.Paths()
data = load_data.Data()
sim = load_data.Similarities()
# Import default skills description embeddings
embeddings = np.load(f'{useful_paths.data_dir}interim/embeddings/embeddings_skills_description_SBERT.npy')
### SET UP DEFAULT TRANSITION FILTERING CRITERIA ###
with open(f'{useful_paths.codebase_dir}configs/default_transition_params.yaml', 'r') as f:
def_transition_params = yaml.load(f, Loader=yaml.FullLoader)
# Viability: Similarity threshold for viable transitions (default = 0.3)
MIN_VIABLE_DEF = def_transition_params['MIN_VIABLE']
# Viability: Similarity threshold for highly viable transitions (default = 0.4)
HIGHLY_VIABLE_DEF = def_transition_params['HIGHLY_VIABLE']
# Viability: Max absolute difference in job zones (default = 1)
MAX_JOB_ZONE_DIF_DEF = def_transition_params['MAX_JOB_ZONE_DIF']
# Desirability: Threshold for differences in earnings (default = 0.75)
MIN_EARNINGS_RATIO_DEF = def_transition_params['MIN_EARNINGS_RATIO']
def occupations_to_check(id_to_check):
"""
Helper function for selecting a list of occupations
Parameters
----------
id_to_check (list of int, or str or None):
List of integers corresponding to occupation IDs, or a string for a shorthand
reference to a predefined set of occupations.
"""
if (type(id_to_check)==type(None)) or (id_to_check=='report'):
id_to_check = data.report_occ_ids
elif id_to_check == 'top':
id_to_check = data.top_occ_ids
elif id_to_check == 'all':
id_to_check = data.occ.id.to_list()
return id_to_check
def find_most_similar(
occ = None,
similarity_measure='combined',
n=15,
destination_ids='report',
transpose=False):
"""
Helper function for finding the most similar occupations that a worker in
the specified occupation could transition to.
Parameters
----------
occ (int or str):
Either the occupation ID (int) or preferred label (str)
similarity_measure (str):
One of the following: 'combined', 'essential_skills', 'optional_skills',
'work_activities', 'work_context'
n (int):
Number of the top-most similar occupations to return
destination_ids (list of int, or str):
List of admissible destination occupations, specified by a list occupation IDs or
a string for a shorthand reference to a predefined set of occupations
transpose (boolean):
If True, it will transpose the similarity matrix and the results will
show the most similar occupations that could transition into the specified occupation
(NB: The skills and combined similarity matrices are asymmetric)
Returns
-------
df (pandas.DataFrame):
A dataframe with the following fields: 'id', 'preferred_label' and 'similarity'
"""
occ_id = data.occ_title_to_id(occ)
destination_ids = occupations_to_check(destination_ids)
sim_matrix = sim.select_similarity_matrix(similarity_measure)
if transpose:
sim_matrix = sim_matrix.T
df = find_closest(occ_id, sim_matrix, data.occ[['id', 'preferred_label']])
df = df[df.id.isin(destination_ids)].iloc[0:n]
return df
def get_transitions(
origin_ids = None,
MIN_VIABLE = MIN_VIABLE_DEF,
HIGHLY_VIABLE = HIGHLY_VIABLE_DEF,
MAX_JOB_ZONE_DIF = MAX_JOB_ZONE_DIF_DEF,
MIN_EARNINGS_RATIO = MIN_EARNINGS_RATIO_DEF,
destination_ids = None,
verbose=False, less_information=False):
"""
Function to find viable, desirable and safe transitions according to the specified filters;
NB: This function outputs only transitions whose occupation similarity is above MIN_VIABLE threshold
Parameters
----------
origin_ids (list of int):
List of origin occupation IDs, for which to check the transitions. If None,
we only check the subset of occupations analysed in the report
MIN_VIABLE (float):
Similarity threshold for viable transitions (default = 0.3)
HIGHLY_VIABLE (float):
Similarity threshold for highly viable transitions (default = 0.4)
MAX_JOB_ZONE_DIF (int):
Max absolute difference in job zones (default = 1)
MIN_EARNINGS_RATIO (float):
Threshold for differences in earnings (default = 0.75)
destination_ids (list of int):
List of permissible destination occupation IDs. If None, we check only
the occupations subset analysed in the report
Returns
-------
trans_df (pandas.DataFrame):
A pandas dataframe with transitions and various descriptors and indicators.
See https://github.com/nestauk/mapping-career-causeways/tree/main/supplementary_online_data/transitions/transitions_tables/
for descriptions for each of the columns.
"""
columns = initialise_transition_table_columns()
origin_ids = occupations_to_check(origin_ids)
destination_ids = occupations_to_check(destination_ids)
# For each occupation in consideration...
if verbose: print('Finding all transitions...', end=' ')
t_now = time()
for j, j_id in enumerate(origin_ids):
# Find the most similar occupations
df = find_closest(j_id, sim.W_combined, data.occ[['id']])
# Filter out self
df = df[df.id!=j_id]
# Filter out occupations that we're not supposed to check
df = df[df.id.isin(destination_ids)]
# Filter out non-viable transitions
df = df[df.similarity > MIN_VIABLE]
# Viable IDs
viable_ids = df.id.to_list()
# Collect data about each transition from j_id to viable_ids
columns = transition_data_processing(
columns, j_id, viable_ids,
MIN_VIABLE,
HIGHLY_VIABLE,
MAX_JOB_ZONE_DIF,
MIN_EARNINGS_RATIO)
if verbose: print(f'Done!\nThis took {(time()-t_now):.2f} seconds.')
trans_df = pd.DataFrame(data=columns)
# Add filtering variables
trans_df = transition_data_filtering(trans_df, MIN_VIABLE, HIGHLY_VIABLE)
if less_information:
return trans_df[[
'origin_id',
'origin_label',
'destination_id',
'destination_label',
'similarity',
'is_viable',
'is_desirable',
'is_safe_desirable',
'is_strictly_safe_desirable'
]].reset_index(drop=True)
else:
return trans_df.reset_index(drop=True)
def initialise_transition_table_columns():
columns = {
'origin_id': [],
'origin_label': [],
'destination_id': [],
'destination_label': [],
'similarity': [],
'is_jobzone_ok': [],
'is_earnings_ok': [],
'is_not_high_risk': [],
'is_safer': [],
'is_strictly_safe': [],
'job_zone_dif': [],
'earnings_ratio': [],
'risk_dif': [],
'prop_dif': [],
'W_skills': [],
'W_work': [],
'W_essential_skills': [],
'W_optional_skills': [],
'W_activities': [],
'W_work_context': []
}
return columns
def transition_data_processing(
columns, j_id, viable_ids,
MIN_VIABLE = MIN_VIABLE_DEF,
HIGHLY_VIABLE = HIGHLY_VIABLE_DEF,
MAX_JOB_ZONE_DIF = MAX_JOB_ZONE_DIF_DEF,
MIN_EARNINGS_RATIO = MIN_EARNINGS_RATIO_DEF):
"""
Used by get_transitions() and get_transition_data();
Adds various descriptors for the transitions from j_id (int)
to a set of viable_ids (list of int) that will be further used
to filter viable, desirable and safe transitions.
"""
N = len(viable_ids)
origin_job_zone = data.occ.loc[j_id].job_zone
origin_earnings = data.occ.loc[j_id].annual_earnings
origin_risk = data.occ.loc[j_id].risk
origin_prevalence = data.occ.loc[j_id].prevalence
origin_label = data.occ.loc[j_id].risk_category
job_zone_dif = origin_job_zone - data.occ.loc[viable_ids].job_zone
earnings_ratio = data.occ.loc[viable_ids].annual_earnings / origin_earnings
risk_dif = origin_risk - data.occ.loc[viable_ids].risk
prevalence_dif = data.occ.loc[viable_ids].prevalence - origin_prevalence
# Job Zone difference not larger than MAX_JOB_ZONE_DIF
is_jobzone_ok = np.abs(job_zone_dif) <= MAX_JOB_ZONE_DIF
# Earnings at destination larger than MIN_EARNINGS_RATIO
is_earnings_ok = earnings_ratio > MIN_EARNINGS_RATIO
# Destination is not a high risk occupation
is_not_high_risk = (data.occ.loc[viable_ids].risk_category != 'High risk')
# Destination has a smaller risk and a larger prevalence of bottleneck tasks
is_safer = (risk_dif > 0) & (prevalence_dif > 0)
# Combine both safety filters
is_strictly_safe = is_safer & is_not_high_risk
# Summarise similarities
W_skills = 0.5*sim.W_essential[j_id, viable_ids] + 0.5*sim.W_all_to_essential[j_id, viable_ids]
W_work = 0.5*sim.W_activities[j_id, viable_ids] + 0.5*sim.W_work_context[j_id, viable_ids]
# Save the row data
columns['origin_id'] += [j_id] * N
columns['origin_label'] += [data.occ.loc[j_id].preferred_label] * N
columns['destination_id'] += viable_ids
columns['destination_label'] += data.occ.loc[viable_ids].preferred_label.to_list()
columns['similarity'] += list(sim.W_combined[j_id, viable_ids])
columns['is_jobzone_ok'] += list(is_jobzone_ok)
columns['is_earnings_ok'] += list(is_earnings_ok)
columns['is_not_high_risk'] += list(is_not_high_risk)
columns['is_safer'] += list(is_safer)
columns['is_strictly_safe'] += list(is_strictly_safe)
columns['job_zone_dif'] += list(job_zone_dif)
columns['earnings_ratio'] += list(earnings_ratio)
columns['risk_dif'] += list(risk_dif)
columns['prop_dif'] += list(prevalence_dif)
columns['W_skills'] += list(W_skills)
columns['W_work'] += list(W_work)
columns['W_essential_skills'] += list(sim.W_essential[j_id, viable_ids])
columns['W_optional_skills'] += list(sim.W_all_to_essential[j_id, viable_ids])
columns['W_activities'] += list(sim.W_activities[j_id, viable_ids])
columns['W_work_context'] += list(sim.W_work_context[j_id, viable_ids])
return columns
def transition_data_filtering(trans_df, MIN_VIABLE, HIGHLY_VIABLE):
"""
Adds filtering variables to the transitions dataframe trans_df (pandas.DataFrame)
to indicate transitions that are viable, desirable and safe.
"""
trans_df['sim_category'] = ''
trans_df.loc[trans_df.similarity <= HIGHLY_VIABLE, 'sim_category'] = 'min_viable'
trans_df.loc[trans_df.similarity > HIGHLY_VIABLE, 'sim_category'] = 'highly_viable'
trans_df.loc[trans_df.similarity <= MIN_VIABLE, 'sim_category'] = 'not_viable'
trans_df['is_viable'] = trans_df['is_jobzone_ok'] & (trans_df['sim_category'] != 'not_viable')
trans_df['is_desirable'] = trans_df['is_viable'] & trans_df['is_earnings_ok']
trans_df['is_safe_desirable'] = trans_df['is_desirable'] & trans_df['is_not_high_risk']
trans_df['is_strictly_safe_desirable'] = trans_df['is_desirable'] & trans_df['is_strictly_safe']
return trans_df
def get_transition_data(
transition_pairs,
MIN_VIABLE = MIN_VIABLE_DEF,
HIGHLY_VIABLE = HIGHLY_VIABLE_DEF,
MAX_JOB_ZONE_DIF = MAX_JOB_ZONE_DIF_DEF,
MIN_EARNINGS_RATIO = MIN_EARNINGS_RATIO_DEF,
verbose=False):
"""
Compiles transition data for each transition pair; final output table follows the same
format as the output of get_transitions()
Parameters
----------
transition_pairs (list of tuples):
Pairs of transitions for which to generate a table with various descriptors
and viability, desirability and safety indicators.
...
Returns
-------
trans_df (pandas.DataFrame):
A pandas dataframe with transitions and various descriptors and indicators.
See https://github.com/nestauk/mapping-career-causeways/tree/main/supplementary_online_data/transitions/transitions_tables/
for descriptions for each of the columns.
"""
columns = initialise_transition_table_columns()
if verbose: print('Finding data for all transitions...', end=' ')
t_now = time()
transition_pair_dict = defaultdict(list)
for pair in transition_pairs:
transition_pair_dict[pair[0]].append(pair[1])
# For each transition pair in consideration...
for j_id in list(transition_pair_dict.keys()):
viable_ids = transition_pair_dict[j_id]
columns = transition_data_processing(
columns, j_id, viable_ids,
MIN_VIABLE,
HIGHLY_VIABLE,
MAX_JOB_ZONE_DIF,
MIN_EARNINGS_RATIO)
if verbose: print(f'Done!\nThis took {(time()-t_now):.2f} seconds.')
trans_df = pd.DataFrame(data=columns)
trans_df = transition_data_filtering(trans_df, MIN_VIABLE, HIGHLY_VIABLE)
return trans_df.reset_index(drop=True)
def create_filtering_matrices(
origin_ids = None,
MIN_VIABLE = MIN_VIABLE_DEF,
HIGHLY_VIABLE = HIGHLY_VIABLE_DEF,
MAX_JOB_ZONE_DIF = MAX_JOB_ZONE_DIF_DEF,
MIN_EARNINGS_RATIO = MIN_EARNINGS_RATIO_DEF,
destination_ids = None,
export_path = None):
"""
Creates boolean matrices for tagging transitions as 'safe', 'desirable', 'viable'
'highly viable' and combinations of these.
These boolean matrices are later used for analysing the number of different
types of transitions for each occupation.
Parameters
----------
origin_ids (list of int):
List of origin occupation IDs, for which to check the transitions. If None,
we only check the subset of occupations analysed in the report
MIN_VIABLE (float):
Similarity threshold for viable transitions (default = 0.3)
HIGHLY_VIABLE (float):
Similarity threshold for highly viable transitions (default = 0.4)
MAX_JOB_ZONE_DIF (int):
Max absolute difference in job zones (default = 1)
MIN_EARNINGS_RATIO (float):
Threshold for differences in earnings (default = 0.75)
destination_ids (list of int):
List of permissible destination occupation IDs. If None, we check only
the occupations subset analysed in the report
"""
# Select the occupations to check
origin_ids = occupations_to_check(origin_ids)
destination_ids = occupations_to_check(destination_ids)
# Select the similarities corresponding to the specified occupations
W_combined_select = sim.W_combined[origin_ids, :].copy()
W_combined_select = W_combined_select[:, destination_ids]
# Filter matrices
N = len(origin_ids)
N2 = len(destination_ids)
# Boolean natrices to indicate...
# ...compatibility of job zones
F_jobzone = np.zeros((N,N2)).astype(bool)
# ...compatability of earnings
F_earnings = np.zeros((N,N2)).astype(bool)
# ...reduction of risk and increase of the prevalence of bottleneck tasks
F_safer = np.zeros((N,N2)).astype(bool)
# ...that destination is not of high risk
F_not_high_risk = np.zeros((N,N2)).astype(bool)
# ...that the transition is not to self
F_not_self = np.zeros((N,N2)).astype(bool)
print('Creating filtering matrices...', end=' ')
t_now = time()
# Brute force approach (for each transition...)
for i in range(N):
row_i = data.occ.iloc[origin_ids[i]]
for j in range(N2):
row_j = data.occ.iloc[destination_ids[j]]
is_jobzone_ok = np.abs(row_i.job_zone - row_j.job_zone) <= MAX_JOB_ZONE_DIF
is_earnings_ok = (row_j.annual_earnings / row_i.annual_earnings) > MIN_EARNINGS_RATIO
is_safer = (row_i.risk > row_j.risk) & (row_i.prevalence < row_j.prevalence)
is_not_high_risk = (row_j.risk_category != 'High risk')
F_jobzone[i][j] = is_jobzone_ok
F_earnings[i][j] = is_earnings_ok
F_not_high_risk[i][j] = is_not_high_risk
F_safer[i][j] = is_safer
F_not_self[i][j] = row_i.id != row_j.id
print(f'Done!\nThis took {(time()-t_now):.2f} seconds.')
# Matrices indicating viable and highly viable transitions
F_viable = F_jobzone & (W_combined_select > MIN_VIABLE)
F_highly_viable = F_jobzone & (W_combined_select > HIGHLY_VIABLE)
F_min_viable = F_jobzone & (W_combined_select > MIN_VIABLE) & (W_combined_select <= HIGHLY_VIABLE)
# Matrix indicating desirable transitions
F_desirable = F_viable & F_earnings
# Matrix indicating safe transitions
F_strictly_safe = F_safer & F_not_high_risk
# Matrices indicating safe and desirable transitions
F_safe_desirable = F_desirable & F_not_high_risk # 1st definition
F_strictly_safe_desirable = F_desirable & F_strictly_safe # 2nd (stricter) definition
# Export filtering matrices
filter_matrices = {
'F_viable': F_viable,
'F_min_viable': F_min_viable,
'F_highly_viable': F_highly_viable,
'F_desirable': F_desirable,
'F_jobzone': F_jobzone,
'F_earnings': F_earnings,
'F_not_high_risk': F_not_high_risk,
'F_safer': F_safer,
'F_strictly_safe': F_strictly_safe,
'F_not_self': F_not_self,
'F_safe_desirable': F_safe_desirable,
'F_strictly_safe_desirable': F_strictly_safe_desirable,
}
# Remove transitions to self
for key in list(filter_matrices.keys()):
filter_matrices[key] = filter_matrices[key] & F_not_self
filter_matrices['origin_ids'] = origin_ids
filter_matrices['destination_ids'] = destination_ids
# Export filtering matrices
if export_path is not None:
if os.path.exists(export_path) == False:
pickle.dump(filter_matrices, open(export_path, 'wb'))
print(f'Filtering matrices saved at {export_path}')
else:
print('File already exists! (not saved)')
return filter_matrices
def show_skills_overlap(
job_i,
job_j,
data=data, sim=sim,
embeddings=embeddings,
skills_match = 'optional', # either 'optional' or 'essential'
matching_method='one_to_one',
verbose=True,
rounding=True):
"""
NLP-adjusted overlap of skill sets between occupations job_i and job_j
"""
job_i = data.occ_title_to_id(job_i)
job_j = data.occ_title_to_id(job_j)
if verbose: print(f"from {data.occ.loc[job_i].preferred_label} (id {job_i}) to {data.occ.loc[job_j].preferred_label} (id {job_j})")
# Create the input dataframe in the required format
if skills_match == 'optional':
node_to_items_ = pd.concat([data.node_to_all_items.loc[[job_i]],
data.node_to_essential_items.loc[[job_j]]])
w = sim.W_all_to_essential[job_i, job_j]
elif skills_match == 'essential':
node_to_items_ = pd.concat([data.node_to_essential_items.loc[[job_i]],
data.node_to_essential_items.loc[[job_j]]])
w = sim.W_essential[job_i, job_j]
# Check for empty arrays
assert((data.node_to_essential_items.loc[[job_j]].items_list.values[0]) != 0)
# Compare occupations
df, score = compare_nodes_utils.two_node_comparison(
node_to_items_, job_i, job_j,
data.skills[['id','preferred_label']],
embeddings,
metric='cosine',
matching_method=matching_method,
symmetric=False,
rounding=rounding)
N_matched = len(df)
# Tidy up the dataframe
df.rename(columns={
'id_x': 'origin_skill_id',
'preferred_label_x': 'origin_skill',
'id_y': 'destination_skill_id',
'preferred_label_y': 'destination_skill',
'similarity': 'score',
'similarity_raw': 'similarity'}, inplace=True)
df = df[['origin_skill_id', 'origin_skill',
'destination_skill_id', 'destination_skill',
'similarity', 'score']]
# Add leftover skills from the destination occupation
all_destination_skills = data.occupation_to_skills[
(data.occupation_to_skills.occupation_id==job_j) &
(data.occupation_to_skills.importance=='Essential')].skill_id.to_list()
skills_to_add = set(all_destination_skills).difference(set(df.destination_skill_id))
if len(skills_to_add) != 0:
append_df = {
'origin_skill_id':[],
'origin_skill':[],
'destination_skill_id':[],
'destination_skill':[],
'similarity':[],
'score':[]
}
for s in skills_to_add:
append_df['origin_skill_id'].append('-')
append_df['origin_skill'].append('-')
append_df['destination_skill_id'].append(s)
append_df['destination_skill'].append(data.skills.loc[s].preferred_label)
append_df['similarity'].append(0)
append_df['score'].append(0)
df = df.append(pd.DataFrame(data=append_df), ignore_index=True)
if verbose:
print('--------')
#print(f'{N_matched}/{len(data.node_to_essential_items.loc[[job_j]].items_list.values[0])} destination skills matched')
print(f'NLP-adjusted overlap = {w:.2f} (total combined similarity: {sim.W_combined[job_i, job_j]:.2f})')
return df
class CompareFeatures():
"""
Class to inspect feature vector differences between occupations
"""
def __init__(self, data_folder=useful_paths.data_dir):
### Import work context vectors ###
self.work_context_vectors = np.load(data_folder + 'interim/work_context_features/ESCO_work_context_vectors.npy')
self.work_context_features = pd.read_csv(data_folder + 'processed/work_context_vector_features.csv')
self.work_context_features['category'] = self.work_context_features.element_id.apply(lambda x: int(x[4]))
# Add work context feature category label
def categorise(x):
if x == 1: return 'interpersonal'
if x == 2: return 'physical'
if x == 3: return 'structural'
self.work_context_features['category'] = self.work_context_features['category'].apply(lambda x: categorise(x))
### Import ESCO skills category vectors ###
self.esco_vectors_1 = np.load(data_folder + 'interim/work_activity_features/esco_hierarchy_vectors_level_1.npy')
self.esco_features_1 = pickle.load(open(data_folder + 'interim/work_activity_features/esco_hierarchy_codes_level_1.pickle', 'rb'))
self.esco_features_1 = data.concepts[data.concepts.code.isin(self.esco_features_1)][['code','title']].sort_values('code').copy()
self.esco_vectors_2 = np.load(data_folder + 'interim/work_activity_features/esco_hierarchy_vectors_level_2.npy')
self.esco_features_2 = pickle.load(open(data_folder + 'interim/work_activity_features/esco_hierarchy_codes_level_2.pickle', 'rb'))
self.esco_features_2 = data.concepts[data.concepts.code.isin(self.esco_features_2)][['code','title']].sort_values('code').copy()
self.esco_vectors_3 = np.load(data_folder + 'interim/work_activity_features/esco_hierarchy_vectors_level_3.npy')
self.esco_features_3 = pickle.load(open(data_folder + 'interim/work_activity_features/esco_hierarchy_codes_level_3.pickle', 'rb'))
self.esco_features_3 = data.concepts[data.concepts.code.isin(self.esco_features_3)][['code','title']].sort_values('code').copy()
def select_esco_level(self, level=2):
""" Selects the level of ESCO hierarchy; if level=None, uses work context features instead """
if level==1:
self.vectors = self.esco_vectors_1
self.features = self.esco_features_1
elif level==2:
self.vectors = self.esco_vectors_2
self.features = self.esco_features_2
elif level==3:
self.vectors = self.esco_vectors_3
self.features = self.esco_features_3
elif level is None:
self.vectors = self.work_context_vectors
self.features = self.work_context_features
def get_feature_differences(self, origin_id, destination_id, esco_level=2):
"""
Useful for checking what are the biggest differences between the two occupations
Parameters
----------
origin_id (int):
Origin occupation's integer ID
destination_id (int):
Destination occupation's integer ID
esco_level (int or boolean):
ESCO hierarchy level (normally use level 2); if esco_level is None, uses work context vectors
"""
self.select_esco_level(esco_level)
# Calculate vector deltas and add category labels
delta_vector = self.vectors[destination_id] - self.vectors[origin_id]
df = self.features.copy()
df['origin'] = self.vectors[origin_id]
df['destination'] = self.vectors[destination_id]
df['dif'] = delta_vector
df['dif_abs'] = np.abs(delta_vector)
return df.sort_values('dif_abs', ascending=False)
def most_impactful_features(self, origin_id, destination_id, esco_level=2):
"""
Useful for checking what makes both occupations similar; calculates 'impact'
which relates to how much an element contributes to similarity
Parameters
----------
origin_id (int):
Origin occupation's integer ID
destination_id (int):
Destination occupation's integer ID
esco_level (int or boolean):
ESCO hierarchy level (normally use level 2); if esco_level is None, uses work context vectors
"""
self.select_esco_level(esco_level)
original_destination_vector = self.vectors[destination_id,:]
origin_vector = normalize(self.vectors[origin_id,:].reshape(1,-1))
original_sim = cosine(normalize(original_destination_vector.reshape(1,-1)), origin_vector)
impacts = []
for j in range(len(original_destination_vector)):
new_vector = original_destination_vector.copy()
new_vector[j] = 0
new_vector = normalize(new_vector.reshape(1,-1))
impact = original_sim - cosine(new_vector, origin_vector)
impacts.append(-impact)
df = self.features.copy()
df['impact'] = impacts
return df.sort_values('impact', ascending=False)
class SkillsGaps():
"""
Class for characterising prevalent skills gaps for a collection of transitions
"""
def __init__(self, trans_to_analyse, verbose=True):
"""
trans_to_analyse (pandas.DataFrame):
Table with transitions, with columns 'origin_id' and 'destination_id'
indicating the occupations involved in the transition.
"""
self.trans_to_analyse = trans_to_analyse
self.get_skills_scores(verbose=verbose)
self.skill_similarities_all = None
self._skills_gaps = None
self.cluster_gaps = None
@property
def skills_gaps(self):
if self._skills_gaps is None:
self._skills_gaps = self.get_skills_gaps()
return self._skills_gaps
def get_skills_scores(self, verbose=True):
"""
Compare skillsets using NLP-adjusted overlap across all transitions
in self.trans_to_analyse, and save the matching scores for each skill from each comparison
"""
## List of lists (a list for each transition)
# Skills IDs for all transitions
self.destination_skills_id_ALL = []
self.origin_skills_id_ALL = []
# All matching scores
self.destination_skills_id_score_ALL = []
self.origin_skills_id_score_ALL = []
# All semantic similarity values (not used in the final analysis)
self.destination_skills_id_sim_ALL = []
self.origin_skills_id_sim_ALL = []
t = time()
for j, row in self.trans_to_analyse.iterrows():
# Get job IDs
job_i = row.origin_id
job_j = row.destination_id
# Create the input dataframe in the required format
df = show_skills_overlap(job_i, job_j, verbose=False)
###### DESTINATION SKILLS ######
# Save the skill IDs and similarity values
self.destination_skills_id_ALL.append(df.destination_skill_id.to_list())
self.destination_skills_id_score_ALL.append(df.score.to_list())
self.destination_skills_id_sim_ALL.append(df.similarity.to_list())
###### ORIGIN SKILLS ######
# Exclude unmatched destination skill rows
origin_skills = df[df.origin_skill_id.apply(lambda x: type(x)!=str)]
# Extract the origin skill IDs, matching scores and similarity values
self.origin_skills_id_ALL.append(origin_skills.origin_skill_id.to_list())
self.origin_skills_id_score_ALL.append(origin_skills.score.to_list())
self.origin_skills_id_sim_ALL.append(origin_skills.similarity.to_list())
t_elapsed = time() - t
if verbose: print(f'Time elapsed: {t_elapsed :.2f} sec ({t_elapsed/len(self.trans_to_analyse): .3f} per transition)')
def setup(self, transition_indices=None, skills_type='destination', skill_items=None):
"""
Parameters:
----------
transition_indices (list of int)
Transitions that we wish to analyse (will correspond to the row indices of 'trans_to_analyse')
skills_type (str):
Sets up which skills are we checking ('destination' vs 'origin'; normally use 'destination')
skills_items (str):
Optionally can specify whether to only analyse gaps for specific ESCO skills pillar categories:
skills ('S'), knowledge ('K') or attitudes ('A')
"""
# Store the analysis parameters
if type(transition_indices)==type(None):
self.transition_indices = range(0, len(self.trans_to_analyse))
else:
self.transition_indices = transition_indices
self.skills_type = skills_type
# Number of transitions we have
self.n_trans = len(self.transition_indices)
# Get all skills occurrences and matching scores
self.skill_similarities_all = self.merge_lists()
# Select only specific skill items (either 'K' for knowledge, 'S' for skills or 'A' for attitude)
if skill_items is None:
pass
else:
df = self.skill_similarities_all.merge(data.skills[['id','skill_category']], left_on='skills_id', right_on='id', how='left')
self.skill_similarities_all = self.skill_similarities_all[df.skill_category.isin(skill_items)]
self._skills_gaps = self.get_skills_gaps()
def prevalent_skills_gaps(self, top_x=10, percentile=False):
"""
Show the most prevalent skills gaps
top_x (int):
Determines if the analysis outputs the top-most top_x prevalent skills
(if percentile is False) or the top percentile of most prevalent skills
(if percentile is True). Normally, use top_x=90 or 95 if percentile=True
percentile (boolean):
Determines how top_x is interpreted
"""
# Return the top most prevalent skills
return self.get_most_prevalent_gaps(self.skills_gaps, top_x=top_x, percentile=percentile)
def prevalent_cluster_gaps(self, level='level_3', top_x=10, percentile=False):
"""
Show the most prevalent skills gaps, aggregated at the level of ESCO skills categories
Parameters
----------
level (str or int):
Determines which level (1, 2 or 3) of ESCO skills hierarchy we are using to
aggregate the skills gaps
top_x (int):
Determines if the function outputs the top-most top_x prevalent skills
(if percentile is False) or the top percentile of most prevalent skills
(if percentile is True). Normally, use top_x=90 or 95 if percentile=True
percentile (boolean):
Determines how top_x is interpreted
"""
if level in [1,2,3]:
level = 'level_' + str(level)
self.cluster_gaps = self.get_cluster_gaps(level)
prevalent_clusters = self.get_most_prevalent_gaps(self.cluster_gaps, top_x=top_x, percentile=percentile)
return self.most_prevalent_cluster_skills(prevalent_clusters)
def merge_lists(self):
"""
Creates dataframe with all skills occurrences, their matched similarities and scores.
It is possible to analyse a subset of all supplied transitions, by specifying
the row indices of 'trans_to_analyse' table using 'transition_indices'
"""
# Merge lists
list_skills = []
list_score = []
list_similarity = []
for i in self.transition_indices:
if self.skills_type=='destination':
list_skills += self.destination_skills_id_ALL[i]
list_score += self.destination_skills_id_score_ALL[i]
list_similarity += self.destination_skills_id_sim_ALL[i]
elif self.skills_type=='origin':
list_skills += self.origin_skills_id_ALL[i]
list_score += self.origin_skills_id_score_ALL[i]
list_similarity += self.origin_skills_id_sim_ALL[i]
skill_similarities_all = pd.DataFrame(data={
'skills_id': list_skills,
'score': list_score,
'similarity': list_similarity})
# If a skill was not matched, then set it to 0
skill_similarities_all.loc[skill_similarities_all.score.isnull(), 'score'] = 0
return skill_similarities_all
def count_and_agg_scores(self, skill_similarities_all, groupby_column):
""" Aggregates scores for each skill or cluster (depending on groupby_column) """
# Counts
skill_counts = skill_similarities_all.groupby(groupby_column).count()
# Mean similarity
skill_similarities = skill_similarities_all.groupby(groupby_column).mean()
# Create the dataframe
skill_similarities['counts'] = skill_counts['score']
skill_similarities['stdev'] = skill_similarities_all.groupby(groupby_column).std()['score']
skill_similarities.reset_index(inplace=True)
return skill_similarities
def get_skills_gaps(self):
""" Agregates scores for skills """
# Aggregate scores
skill_similarities = self.count_and_agg_scores(self.skill_similarities_all, 'skills_id')
skill_similarities['prevalence'] = skill_similarities['counts'] / self.n_trans
# Add information about skills
skill_similarities = skill_similarities.merge(
data.skills[['id', 'preferred_label', 'level_1', 'level_2', 'level_3']],
left_on='skills_id', right_on='id', how='left')
# Clean up the dataframe
skill_similarities = self.clean_up_df(skill_similarities)
skill_similarities = skill_similarities[['id', 'preferred_label', 'level_1', 'level_2', 'level_3', 'counts', 'prevalence', 'score' , 'stdev']]
return skill_similarities
def get_cluster_gaps(self, level='level_1'):
""" Agregates scores for ESCO skills clusters """
# Save the level of analysis
self.level = level
# Add skills cluster information
skill_similarities_all_clust = self.skill_similarities_all.merge(data.skills[[
'id', 'preferred_label', 'level_1', 'level_2', 'level_3', 'code']], left_on='skills_id', right_on='id')
# Aggregate scores
skill_similarities = self.count_and_agg_scores(skill_similarities_all_clust, level)
skill_similarities['prevalence'] = skill_similarities['counts'] / self.n_trans
# Add skills cluster title
skill_similarities = skill_similarities.merge(data.concepts[['code','title']], left_on=level, right_on='code')
# Clean up the dataframe
skill_similarities = self.clean_up_df(skill_similarities)
skill_similarities = skill_similarities[['code', 'title', 'counts', 'prevalence', 'score', 'stdev']]
return skill_similarities
def clean_up_df(self, df):
""" Clean up the dataframe for presentation """
df.prevalence = df.prevalence.round(3)
df.similarity = df.similarity.round(3)
df.reset_index(drop=True, inplace=True)
return df
def get_most_prevalent_gaps(self, skills_gaps, top_x=10, percentile=False):
""" Select only the most prevalent skills """
if percentile:
df = skills_gaps[skills_gaps.prevalence > np.percentile(skills_gaps.prevalence, top_x)]
df = df.sort_values('score', ascending=False)
return df
else:
return skills_gaps.sort_values('prevalence', ascending=False).head(top_x).sort_values('score', ascending=False)
def most_prevalent_cluster_skills(self, prevalent_clusters, top_n=3):
""" For each cluster, find top_n most prevalent skills and add to the dataframe """
x = []
for j, row in prevalent_clusters.iterrows():
dff = self.skills_gaps[self.skills_gaps[self.level]==row.code]
dff = dff.sort_values('prevalence', ascending=False).iloc[0:top_n]
xx = []
# Add matching scores
for jj, rrow in dff.iterrows():
xx.append(f'{rrow.preferred_label} ({np.round(rrow.score,2)})')
x.append(', '.join(xx))
prevalent_clusters_ = prevalent_clusters.copy()
prevalent_clusters_['skills'] = x
return prevalent_clusters_
class Upskilling():
"""
Tests upskilling by adding new ESCO skills to occupations' skillsets and
re-evaluating viable transitions
"""
def __init__(self,
origin_ids='report',
new_skillsets=[None],
destination_ids='report',
verbose=False,
load_data_path=False,
):
"""
Parameters
----------
origin_ids (list of int, or str):
Origin occupation integer identifiers
new_skillsets (list of int, or a list of lists):
List of the new skills IDs (or combinations of skills) to be tested;
can feature mixed single skills and combinations e.g. [1, [1000, 23], 3]
destination_ids (list of int, or str):
Destination occupation integer identifiers
"""
self.verbose = verbose
# List of perturbed matrices
self.new_W_combined = None
# Upskilling analysis results
self.upskilling_effects = None
if load_data_path:
self.load_data_path = load_data_path
result_dict = self.load_results()
self.new_W_combined = result_dict['new_W_combined']
origin_ids = result_dict['origin_ids']
destination_ids = result_dict['destination_ids']
new_skillsets = result_dict['new_skillsets']
if 'upskilling_effects' in list(result_dict.keys()):
self.upskilling_effects = result_dict['upskilling_effects']
# Origin and destination occupations
self.origin_ids = occupations_to_check(origin_ids)
self.destination_ids = occupations_to_check(destination_ids)
# Prep a list of lists of skills (allowing us to add multiple skill combinations)
self.list_of_new_skills = [skill if type(skill)==list else [skill] for skill in new_skillsets]
self.n_origin_occupations = len(self.origin_ids)
self.n_destination_occupations = len(self.destination_ids)
self.n_new_skills = len(self.list_of_new_skills)
# Dictionaries mapping matrix element indices to the original occupation IDs
self.origin_ids_to_row_indices = dict(zip(self.origin_ids, list(range(len(self.origin_ids)))))
self.destination_ids_to_col_indices = dict(zip(self.destination_ids, list(range(len(self.destination_ids)))))
self.row_indices_to_origin_ids = dict(zip(list(range(len(self.origin_ids))),self.origin_ids))
self.col_indices_to_destination_ids = dict(zip(list(range(len(self.destination_ids))),self.destination_ids))
## Required variables for re-calculating similarities (Note: should eventually do further refactoring) ##
# Variables for recalculating work activity feature vector similarity
activity_vector_dir = f'{useful_paths.data_dir}interim/work_activity_features/'
self.element_codes_2 = np.array(pickle.load(open(f'{activity_vector_dir}esco_hierarchy_codes_level_2.pickle', 'rb')))
self.normalisation_params = pickle.load(open(f'{activity_vector_dir}esco_hierarchy_norm_params.pickle', 'rb'))
self.occupation_vectors_level_2_abs = np.load(f'{activity_vector_dir}esco_hierarchy_vectors_level_2_abs.npy')
self.occupation_vectors_level_2 = np.load(f'{activity_vector_dir}esco_hierarchy_vectors_level_2.npy')
# Variables including work context similarities into the combined measure
esco_to_work_context_vector =
|
pd.read_csv(useful_paths.data_dir + 'interim/work_context_features/occupations_work_context_vector.csv')
|
pandas.read_csv
|
"""Calculate high x of y.
Todo:
Load configs in `__main__.py` for making it easier to test.
"""
import sys # noqa: F401 # pylint: disable=unused-import
import toml
from typing import Any, Dict, List, MutableMapping, Union # noqa: F401 # pylint: disable=unused-import
import icecream # noqa: F401 # pylint: disable=unused-import
import numpy as np
import pandas as pd
CONFIG_FILE: str = 'configs/config.toml'
# day of weeks
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
CONFIGS: MutableMapping[str, Any] = toml.load(CONFIG_FILE)
UNIT_NUM_PER_DAY: int = CONFIGS['unit_num_per_day']
EXCLUDED_CRITERION_RATIO: float = CONFIGS['excluded_criterion_ratio']
MAX_GO_BACK_DAYS: int = CONFIGS['max_go_back_days']
def calculate(df_demand: pd.DataFrame, df_holidays: pd.DataFrame) -> pd.DataFrame:
"""Return dataframe contain high x of y result.
Todo:
Change arguments for making it easier to test.
Args:
df_demand (pd.DataFrame): Original data. i.e. historical data.
df_holiday (pd.DataFrame): Holidays dataframe.
Returns:
pd.DataFrame: dataframe contain average of high x of y.
"""
df_base = _make_df_base(df_demand, df_holidays)
df_bases: Dict[str, pd.DataFrame] = {}
df_bases['weekday'] = df_base.query('is_weekday == True')
df_bases['holiday'] = df_base.query('is_weekday == False')
df_calceds: List[pd.DataFrame] = []
for day_type, df_day_type in df_bases.items():
x = CONFIGS[day_type]['x']
y = CONFIGS[day_type]['y']
df_calced = _mean_high_x_of_y(df_day_type, x, y)
df_calceds.append(df_calced)
df = pd.concat(df_calceds)
return df
def _make_df_base(df_demand: pd.DataFrame, df_holidays: pd.DataFrame) -> pd.DataFrame:
"""Return a dataframe contains following columns.
- datetime
- demand
@todo: not deal as int but bool
- dr_invoked_unit: 0: not invoked, 1 invoked
- dr_invoked_day: 0: not invoked, otherwise invoked
- date
- day_of_week
- is_pub_holiday
- is_weekday
- unit_num
- mean_daily_demand_for_dr
Args:
df_demand (pd.DataFrame): contain demand.
df_holidays (pd.DataFrame): contain holidays.
Returns:
pd.DataFrame: contain demand and holidays.
"""
df_holidays['is_pub_holiday'] = True
df_demand['date'] = pd.to_datetime(df_demand['datetime'].dt.date)
df_demand['day_of_week'] = df_demand['datetime'].dt.dayofweek
df_invoked_days = df_demand.groupby('date').sum()[['dr_invoked_unit']] \
.reset_index() \
.rename(columns={'dr_invoked_unit': 'dr_invoked_day'})
df = df_demand.merge(
df_invoked_days,
how='left',
on='date'
)
df_demand_means_per_invoked_day = df_demand.query('dr_invoked_unit != 0') [['date', 'demand']].groupby('date').mean() \
.reset_index() \
.rename(columns={'demand': 'mean_daily_demand_for_dr'})
df = df.merge(
df_demand_means_per_invoked_day,
how='left',
on='date'
)
df = df.merge(
df_holidays,
how='left',
on='date'
)
df = df.fillna({'is_pub_holiday': False})
df['is_weekday'] = df.apply(_applied_is_weekday, axis='columns')
df = _add_unit_num_column(df, UNIT_NUM_PER_DAY)
return df
def _applied_is_weekday(row: pd.Series) -> bool:
"""Return whether weekday or not.
Args:
row (pd.Series): one record of dataframe.
Returns:
bool: weekday: True, otherwise: False
"""
if row['day_of_week'] in [SAT, SUN]:
return False
if row['is_pub_holiday']:
return False
return True
def _add_unit_num_column(df: pd.DataFrame, unit_num_per_day: int) -> pd.DataFrame:
"""Return dataframe added unit num columns.
When `UNIT_NUM_PER_DAY` is 48, return `[1, 2, ..., 48, 1, 2, ...]`.
Args:
df (pd.DataFrame):
UNIT_NUM_PER_DAY (int)
Returns:
pd.DataFrame: dataframe added unit num columns.
"""
elements: List[int] = []
for i in range(1, df.shape[0] + 1):
mod = i % unit_num_per_day
if mod != 0:
elements.append(mod)
else:
elements.append(unit_num_per_day)
df_elements: pd.DataFrame = pd.DataFrame({'unit_num': elements})
df_ret =
|
pd.concat([df, df_elements], axis='columns')
|
pandas.concat
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib import animation
import os
import glob
import tables
#sometimes gives an error first time that runs, this solve the problem
try:
from lstchain.reco.utils import get_effective_time, add_delta_t_key
except:
from lstchain.reco.utils import get_effective_time, add_delta_t_key
from ctapipe.instrument import SubarrayDescription
from ctapipe.visualization import CameraDisplay
from ctapipe.coordinates import EngineeringCameraFrame
from ctapipe.instrument import CameraGeometry
from traitlets.config import Config
from ctapipe.io import EventSource
#################################################
#################################################
# change depending where we are running the code
#################################################
#################################################
#number of subruns we have
number_subruns=122
#number assigned to the concrete run (only for the name of the files saved)
run_number=2969
#number of events in each subrun, normally they are saved in groups of 53000 events
subrun_events=53000
#directories
#DEPENDING ON THE RUN WE ARE STUDYING
#direction of the dl2 data file (if we want to implement the direction and source)
dl2_directory="/data/cta/users-ifae/moralejo/CTA/LST/RealData/DL2/lstchain_v0.7/20201120/dl2_LST-1.Run02969.h5"
#parameters to read dl1 and dl2
dl2_parameters="dl2/event/telescope/parameters/LST_LSTCam"
dl1_parameters="/dl1/event/telescope/parameters/LST_LSTCam"
#direction of the dl1 data files for each subrun, the format we use is "directory.****.fits.fz" or "h5"
#where **** is the subrun index
dl1_directory="/data/cta/users-ifae/moralejo/CTA/LST/RealData/DL1/lstchain_V0.7/20201120/dl1_LST-1.Run02969."
#R0 directory
R0_directory="/data/cta/users-ifae/moralejo/CTA/LST/RealData/R0/20201120/LST-1.1.Run02969."
#DEPENDING ON WHERE ARE WE RUNNING THE CODE
#R1 calibration
calibration_directory = "/data/cta/users-ifae/moralejo/CTA/LST/RealData/R0/20201120/calib_files/"
drs4_pedestal_path = calibration_directory + "drs4_pedestal.Run02963.0000.fits"
calib_path = calibration_directory + "calibration.Run02964.0000.hdf5"
time_calib_path = calibration_directory + "time_calibration.Run02964.0000.hdf5"
drive_report = calibration_directory + "drive_log_20201120.txt"
run_summary = calibration_directory + "RunSummary_20201120.ecsv"
#################################################
#################################################
#################################################
#################################################
#################################################################################
# function for printing a set of images and save them into a directory
#################################################################################
def plot( array_ids, #array with the ID's of the events that we want to represent
representation='charge', #type of graphic representation 'charge', 'time' or 'both'
plot_direction=True, #to represent the reconstructed direction, ellipse, and source
gamma_lim=0.5, #gammanness limit to represent the reconstructed parameters (for not plotting them in not gammalike events)
colormap_scale=0.7, #change the maximum limit of the colormap [0-1]
save=True, #saving the image into a folder
file_type='.pdf', #type of archive to save the image '.pdf', '.png' etc
plot_image=True, #for plotting or not the image at the console
folder_name='event_plots' #name of the folder for saving the plots
):
#################################################
#reordering events and identifying the corresponding subrun in order to optimize the plotting
#if we input only one event in format 'int' we convert-it to a array
if type(array_ids) is int:
array_ids=[array_ids]
array_ids.sort()
#we separate the events in each subrun
SubrunsList,SubrunIndex=[],[]
for i in range(number_subruns+1):
SubrunsList.append([])
SubrunIndex.append(i)
for i in range(len(array_ids)):
for k in range(number_subruns+1):
if array_ids[i]>=(k*subrun_events) and array_ids[i]<((k+1)*subrun_events):
SubrunsList[k].append(array_ids[i])
LenSubrun=[]
for i in range(number_subruns+1):
LenSubrun.append(len(SubrunsList[i]))
#we only iterate the subruns that have some events
SubrunsList_,SubrunIndex_=[],[]
for i in range(len(SubrunsList)):
if LenSubrun[i]!=0:
SubrunsList_.append(SubrunsList[i])
SubrunIndex_.append(SubrunIndex[i])
#################################################
#importing dl2 data if needed
if plot_direction==True:
df=pd.read_hdf(dl2_directory, dl2_parameters)
df=df.set_index('event_id')
#################################################
#definition of functions for plotting
#############################
#plot a basic charge map#####
def basic_plot(N,represent='charge'):
#titles, we print the value of gammaness and energy if exist dl2
try:
titles='$\\gamma=$'+ str(round((df.at[N,'gammaness']),2))+', $E=$'+str(round((df.at[N,'reco_energy']),2))+'TeV'
except:
titles='Event ID = '+str(N)
if represent=='time':
charge_map = times_data[N-subrun_events*SubrunIndex_[ii]-1]
camdisplay = CameraDisplay(camera_geom.transform_to(EngineeringCameraFrame()),
norm='lin', title='Time (ns)' ,image=charge_map,cmap='Reds',
show_frame=False)
else:
charge_map = charges_data[N-subrun_events*SubrunIndex_[ii]-1]
camdisplay = CameraDisplay(camera_geom.transform_to(EngineeringCameraFrame()),
norm='lin', title=titles ,image=charge_map,cmap='plasma',
show_frame=False)
camdisplay.add_colorbar()
camdisplay.set_limits_percent(colormap_scale*100)
##############################
#complet plot of an event#####
def complet_plot(N,represent='charge'):
print('Plotting Event ID ='+str(N))
fig, ax=plt.subplots()
if only_dl1==False:
basic_plot(N,represent)
if plot_direction==True:
#Source reconstruction
plt.plot(-df.at[N,'reco_src_y'],-df.at[N,'reco_src_x'],'*',color='darkgrey',
label='Reconstructed source',markersize=17,alpha=0.9)
plt.autoscale(False)
#ellipse and mass center
plt.plot(-df.at[N,'y'],-df.at[N,'x'],'.',color='w')
ellipse = Ellipse(xy=(-df.at[N,'y'], -df.at[N,'x']), width=df.at[N,'width'],
height=df.at[N,'length'],angle=-np.rad2deg(df.at[N,'psi']),
edgecolor='w', fc='None', lw=2)
ax.add_patch(ellipse)
#print a line of direction
slope=np.tan(-df.at[N,'psi']+np.pi/2)
x0=-df.at[N,'y']
y0=-df.at[N,'x']
plt.plot([(3-y0+slope*x0)/slope,(-3-y0+slope*x0)/slope],[3,-3],'--',color='w')
plt.legend(loc='best')
elif only_dl1==True:
basic_plot(N,represent)
#saving the images in a folder
if save==True:
if not os.path.exists(folder_name+'_Run'+str(run_number)):
os.makedirs(folder_name+'_Run'+str(run_number))
fig.savefig(folder_name+'_Run'+str(run_number)+'/event_'+str(N).zfill(7)+
'_'+representation+file_type, dpi=300)
#if we only want to download the image we dont show it (is faster)
if plot_image==False:
plt.close()
else:
plt.show()
###########################################################################
#plot a representation of both charges and times one next to the other#####
def complet_plot_double(N):
print('Plotting Event ID ='+str(N))
fig, ax=plt.subplots(figsize=(17,7))
#first image of charges
ax1=plt.subplot(1,2,1)
if only_dl1==False:
basic_plot(N)
if plot_direction==True:
#Source reconstruction
plt.plot(-df.at[N,'reco_src_y'],-df.at[N,'reco_src_x'],'*',
color='darkgrey',label='Reconstructed source',markersize=17,alpha=0.9)
plt.autoscale(False)
#ellipse and center of mass
plt.plot(-df.at[N,'y'],-df.at[N,'x'],'.',color='w')
ellipse = Ellipse(xy=(-df.at[N,'y'], -df.at[N,'x']), width=df.at[N,'width'],
height=df.at[N,'length'],angle=-np.rad2deg(df.at[N,'psi']),
edgecolor='w', fc='None', lw=2)
ax1.add_patch(ellipse)
#print a line of direction
slope=np.tan(-df.at[N,'psi']+np.pi/2)
x0=-df.at[N,'y']
y0=-df.at[N,'x']
plt.plot([(3-y0+slope*x0)/slope,(-3-y0+slope*x0)/slope],[3,-3],'--',color='w')
plt.legend(loc='best')
elif only_dl1==True:
basic_plot(N)
#second image of times
ax2=plt.subplot(1,2,2)
basic_plot(N,'time')
#saving the images in a folder
if save==True:
if not os.path.exists(folder_name+'_Run'+str(run_number)):
os.makedirs(folder_name+'_Run'+str(run_number))
fig.savefig(folder_name+'_Run'+str(run_number)+'/event_'+str(N).zfill(7)+
'_'+representation+file_type, dpi=300)
#if we only want to download the image we dont show
if plot_image==False:
plt.close()
else:
plt.show()
#################################################
#plotting
#plot parameters in a context to not affect the before defined parameters
with plt.rc_context(rc={'figure.figsize':(10,9),
'font.size':17,
'mathtext.fontset':'custom',
'mathtext.rm':'Bitstream Vera Sans',
'mathtext.it':'Bitstream Vera Sans:italic',
'mathtext.bf':'Bitstream Vera Sans:bold',
'mathtext.fontset':'stix',
'font.family':'STIXGeneral',
'xtick.direction':'out',
'ytick.direction':'out',
'xtick.major.size':8,
'xtick.major.width':2,
'xtick.minor.size':5,
'xtick.minor.width':1,
'ytick.major.size':8,
'ytick.major.width':2,
'ytick.minor.size':5,
'ytick.minor.width':1,
'xtick.top':False,
'ytick.right':False,
'xtick.minor.visible':False,
'ytick.minor.visible':False}):
#we iterate the process subrun to subrun
for ii in range(len(SubrunIndex_)):
#importing the DL1 data of corresponding subrun
data_files = dl1_directory+str(SubrunIndex_[ii]).zfill(4)+".h5"
dummy = []
data_files = glob.glob(data_files)
data_files.sort()
for data_file in data_files:
dfDL1 = pd.read_hdf(data_file, dl1_parameters)
dummy.append(dfDL1)
data_parameters = pd.concat(dummy, ignore_index=True)
subarray_info = SubarrayDescription.from_hdf(data_files[0])
focal_length = subarray_info.tel[1].optics.equivalent_focal_length
camera_geom = subarray_info.tel[1].camera.geometry
dummy1 = []
dummy2 = []
for data_file in data_files:
data = tables.open_file(data_file)
dummy1.append(data.root.dl1.event.telescope.image.LST_LSTCam.col('image'))
dummy2.append(data.root.dl1.event.telescope.image.LST_LSTCam.col('peak_time'))
charges_data = np.concatenate(dummy1)
times_data = np.concatenate(dummy2)
#we plot each event jj for a determined subrun ii
for jj in SubrunsList_[ii]:
#for each event first we see if exist data from dl2
only_dl1=True
if plot_direction==True:
try:
if df.at[jj,'gammaness']>gamma_lim:
only_dl1=False
except:
pass
#depending the representation choosen we use a method of plotting
if representation=='both':
complet_plot_double(jj)
elif representation=='time':
complet_plot(jj,'time')
else:
complet_plot(jj,'charge')
#################################################
#################################################################################
# function for printing a set of animations and save them into a directory
#################################################################################
def animate( array_ids, #array with the ID's of the events that we want to represent
plot_direction=True, #to represent the reconstructed direction, ellipse, and source
gamma_lim=0.5, #gammanness limit to represent the reconstructed parameters
colormap_scale=0.7, #change the maximum limit of the colormap [0-1]
file_type='.gif', #type of archive to save the image '.gif' (for '.mp4' ffpmeg needed)
fps=20, #frames per second for the animation
folder_name='event_animations' #name of the folder for saving the plots
):
#################################################
#reordering events and identifying the corresponding subrun in order to optimize the plotting
#if we input only one event in format 'int' we convert-it to a array
if type(array_ids) is int:
array_ids=[array_ids]
array_ids.sort()
#we separate the events in each subrun
SubrunsList=[]
SubrunsList_translate=[]
SubrunIndex=[]
for i in range(number_subruns+1):
SubrunsList.append([])
SubrunsList_translate.append([])
SubrunIndex.append(i)
for i in range(len(array_ids)):
for k in range(number_subruns+1):
if array_ids[i]>=(k*subrun_events) and array_ids[i]<((k+1)*subrun_events):
SubrunsList[k].append(array_ids[i])
LenSubrun=[]
for i in range(number_subruns+1):
LenSubrun.append(len(SubrunsList[i]))
#we oly iterate the subruns with events
SubrunsList_,SubrunIndex_,SubrunsList_translate_=[],[],[]
for i in range(len(SubrunsList)):
if LenSubrun[i]!=0:
SubrunsList_.append(SubrunsList[i])
SubrunsList_translate_.append(SubrunsList_translate[i])
SubrunIndex_.append(SubrunIndex[i])
#translating event id's into positions in a list of each subrun
for i in range(len(SubrunsList_)):
for j in range(len(SubrunsList_[i])):
if j==0:
SubrunsList_translate_[i].append(SubrunsList_[i][j]-SubrunIndex_[i]*subrun_events)
else:
SubrunsList_translate_[i].append(SubrunsList_[i][j]-SubrunsList_[i][j-1])
#estimation of time of the process
total_iterations=0
total_images=0
for i in range(len(SubrunsList_translate_)):
for j in range(len(SubrunsList_translate_[i])):
total_iterations=total_iterations+SubrunsList_translate_[i][j]
total_images=total_images+1
time_est=(33*total_iterations/1000+total_images*12)/60
print('\n Estimated time = '+str(round((time_est),2))+' (min) \n')
#################################################
#import dl2 data if needed
if plot_direction==True:
df=pd.read_hdf(dl2_directory, dl2_parameters)
df=df.set_index('event_id')
#################################################
#definition of function to animate outside of the loop
def animate(i):
camdisplay.image=ev.r1.tel[1].waveform[:,i]
plt.title('$t=$'+str(i).zfill(2)+' (ns)')
return fig,
#################################################
#plotting
#plot parameters in a context to not affect the before defined parameters
with plt.rc_context(rc={'figure.figsize':(10,9),
'font.size':17,
'mathtext.fontset':'custom',
'mathtext.rm':'Bitstream Vera Sans',
'mathtext.it':'Bitstream Vera Sans:italic',
'mathtext.bf':'Bitstream Vera Sans:bold',
'mathtext.fontset':'stix',
'font.family':'STIXGeneral',
'xtick.direction':'out',
'ytick.direction':'out',
'xtick.major.size':8,
'xtick.major.width':2,
'xtick.minor.size':5,
'xtick.minor.width':1,
'ytick.major.size':8,
'ytick.major.width':2,
'ytick.minor.size':5,
'ytick.minor.width':1,
'xtick.top':False,
'ytick.right':False,
'xtick.minor.visible':False,
'ytick.minor.visible':False}):
#we iterate the process subrun to subrun
for ii in range(len(SubrunIndex_)):
#importing the DL1 data of corresponding subrun
R0_file = R0_directory+str(SubrunIndex_[ii]).zfill(4)+".fits.fz"
# The files above are of course specific of (at least) each observation night!
# config = Config(
# {
# 'LSTEventSource': {
# 'default_trigger_type': 'ucts',
# 'allowed_tels': [1],
# 'min_flatfield_adc': 3000,
# 'min_flatfield_pixel_fraction': 0.8,
# 'calibrate_flatfields_and_pedestals': False,
# 'LSTR0Corrections': {
# 'drs4_pedestal_path': drs4_pedestal_path,
# 'calibration_path': calib_path,
# 'drs4_time_calibration_path': time_calib_path,
# },
# 'PointingSource': {
# 'drive_report_path': drive_report,
# },
# 'EventTimeCalculator': {
# 'run_summary_path': run_summary,
# }
# },
# }
# )
# Simplified version, in case we are missing the drive_report and run_summary:
config = Config(
{
'LSTEventSource': {
'default_trigger_type': 'ucts',
'allowed_tels': [1],
'min_flatfield_adc': 3000,
'min_flatfield_pixel_fraction': 0.8,
'calibrate_flatfields_and_pedestals': True,
'LSTR0Corrections': {
'drs4_pedestal_path': drs4_pedestal_path,
'calibration_path': calib_path,
'drs4_time_calibration_path': time_calib_path,
},
},
}
)
#"Event source", to read in the R0 (=raw) events and calibrate them into R1 (=calibrated waveforms)
source = EventSource(input_url=R0_file, config=config, max_events=5000)
#################################################
#we plot each event jj for a determined subrun ii
for jj in SubrunsList_translate_[ii]:
#for going to a determined event ID we need to pass trough all before id's (don't know a faster way to do it)
#aproximately pass trough 1000 events in 30 seconds
for j in range(jj):
for i, ev in enumerate(source):
break
# event_id, the same meaning as in DL1 and DL2 files:
index=ev.index.event_id
camgeom = source.subarray.tel[1].camera.geometry
#we see if we have data in dl2 of the corresponding event
only_dl1=True
if plot_direction==True:
try:
if df.at[index,'gammaness']>gamma_lim:
only_dl1=False
except:
pass
#find maximum value
max_=[]
for i in range(36):
max_.append(max(ev.r1.tel[1].waveform[:,i]))
maximum=max(max_)
#################################################
#plotting
fig, ax=plt.subplots(figsize=(13,11))
camdisplay =CameraDisplay(camgeom.transform_to(EngineeringCameraFrame()),ax=ax,
image=ev.r1.tel[1].waveform[:,0],
show_frame=False,cmap='plasma')
#setting the limits
camdisplay.add_colorbar()
camdisplay.set_limits_minmax(0, maximum*colormap_scale)
anim = animation.FuncAnimation(fig, animate,frames=36, interval=22, blit=True)
if (plot_direction==True) and (only_dl1==False):
#Source reconstruction
plt.plot(-df.at[index,'reco_src_y'],-df.at[index,'reco_src_x'],'*',color='darkgrey',
label='Reconstructed source',markersize=17 ,alpha=0.9)
#print a line of direction
plt.autoscale(False)
slope=np.tan(-df.at[index,'psi']+np.pi/2)
x0=-df.at[index,'y']
y0=-df.at[index,'x']
plt.plot([(3-y0+slope*x0)/slope,(-3-y0+slope*x0)/slope],[3,-3],'--',color='w',alpha=0.8)
plt.legend(loc='best')
#saving the animations in a folder
if not os.path.exists(folder_name+'_Run'+str(run_number)):
os.makedirs(folder_name+'_Run'+str(run_number))
anim.save(folder_name+'_Run'+str(run_number)+'/event_'+str(index).zfill(7)
+file_type, fps=fps, extra_args=['-vcodec', 'libx264'])
plt.close()
#################################################
#################################################################################
# function for searching in the events of the run
#################################################################################
def search( sort=False, #if we want to sort the list in terms of some variable for example ='gammaness'
inflim_index=False, #index of the event ID
suplim_index=False,
inflim_gammaness=False, #gammaness
suplim_gammaness=False,
inflim_intensity=False, #intensity
suplim_intensity=False,
inflim_proportion=False, #proportion between length and width
suplim_proportion=False,
inflim_length=False, #length
suplim_length=False,
inflim_width=False, #width
suplim_width=False,
inflim_time_gradient=False, #time gradient
suplim_time_gradient=False,
inflim_reco_energy=False, #reco_energy
suplim_reco_energy=False,
#defoult defined parameters
#gamma like intense events
gamma_like=False,
#muon like events
muon_like=False,
):
df =
|
pd.read_hdf(dl2_directory,dl2_parameters)
|
pandas.read_hdf
|
import argparse
import os
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import seaborn as sns
#from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-e",
"--exp_names",
type=str,
nargs='+',
help="Experiment names to compare, example python data_comparison_spatial_2.py -e exp_name1 exp_name2"
)
parser.add_argument(
"-l",
"--labels",
type=str,
nargs='+',
help="Experiment labels, if not specified will be extracted from exp_names"
)
parser.add_argument(
"-ch",
"--channel",
type=str,
default = 'deaths',
help="Outcome channel to plot"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
return parser.parse_args()
def write_combined_csv(exp_names,channel,labels, first_day,last_day, region="All"):
first_md = first_day.strftime('%b%d')
last_md = last_day.strftime('%b%d')
df = pd.DataFrame()
for s, exp_name in enumerate(exp_names):
simpath = os.path.join(projectpath, 'cms_sim', 'simulation_output', exp_name)
exp_date = exp_name.split("_")[0]
fname = f'nu_{exp_date}_{region}.csv'
df_i = pd.read_csv(os.path.join(simpath, fname))
df_i['date'] = pd.to_datetime(df_i['date'])
df_i = df_i[df_i['date'].between(
|
pd.Timestamp(first_day)
|
pandas.Timestamp
|
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('bt.ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].ix[dts[1]] = 105
data['c1'].ix[dts[2]] = 95
data['c1'].ix[dts[3]] = 105
data['c1'].ix[dts[4]] = 95
# low vol c2
data['c2'].ix[dts[1]] = 100.1
data['c2'].ix[dts[2]] = 99.9
data['c2'].ix[dts[3]] = 100.1
data['c2'].ix[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('bt.ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts =
|
pd.date_range('2010-01-01', periods=3)
|
pandas.date_range
|
from pandas.util.py3compat import StringIO
import numpy as np
from pandas.core.api import Series, DataFrame
import pandas.stats.common as common
from pandas.util.decorators import cache_readonly
def fama_macbeth(**kwargs):
"""Runs Fama-MacBeth regression.
Parameters
----------
Takes the same arguments as a panel OLS, in addition to:
nw_lags_beta: int
Newey-West adjusts the betas by the given lags
"""
window_type = kwargs.get('window_type')
if window_type is None:
klass = FamaMacBeth
else:
klass = MovingFamaMacBeth
return klass(**kwargs)
class FamaMacBeth(object):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies={}, verbose=False):
self._nw_lags_beta = nw_lags_beta
from pandas.stats.plm import MovingPanelOLS
self._ols_result = MovingPanelOLS(
y=y, x=x, window_type='rolling', window=1,
intercept=intercept,
nw_lags=nw_lags, entity_effects=entity_effects,
time_effects=time_effects, x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._cols = self._ols_result._x.columns
@cache_readonly
def _beta_raw(self):
return self._ols_result._beta_raw
@cache_readonly
def _stats(self):
return _calc_t_stat(self._beta_raw, self._nw_lags_beta)
@cache_readonly
def _mean_beta_raw(self):
return self._stats[0]
@cache_readonly
def _std_beta_raw(self):
return self._stats[1]
@cache_readonly
def _t_stat_raw(self):
return self._stats[2]
def _make_result(self, result):
return Series(result, index=self._cols)
@cache_readonly
def mean_beta(self):
return self._make_result(self._mean_beta_raw)
@cache_readonly
def std_beta(self):
return self._make_result(self._std_beta_raw)
@cache_readonly
def t_stat(self):
return self._make_result(self._t_stat_raw)
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw,
'std_beta': self._std_beta_raw,
't_stat': self._t_stat_raw,
}
@cache_readonly
def _coef_table(self):
buffer = StringIO()
buffer.write('%13s %13s %13s %13s %13s %13s\n' %
('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))
template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\n'
for i, name in enumerate(self._cols):
if i and not (i % 5):
buffer.write('\n' +
|
common.banner('')
|
pandas.stats.common.banner
|
# coding: utf-8
# In[2]: import pandas
import pandas as pd
import sys
import os
#take output file and read into dataframe
txtfile=sys.argv[1]
df=pd.read_csv("%s" %txtfile,delimiter='\s',encoding='utf-8', engine ='python', header=None)
# In[3]: create a list with rows to drop
rows=list(range(len(df)))
rows_to_drop=[2*x+1 for x in rows]
rows_to_drop= [x for x in rows_to_drop if x<(len(df)+1)]
# In[4]: drop all rows starting with total
df=df.drop(df.index[rows_to_drop])
# In[6]: reset the index to renumber everything
df=df.reset_index(drop=True)
# In[5]: select only the relevant columns from score_guides output
df = df[[3,5,7,9]]
# In[7]: rename columns
df=df.rename(index=int, columns={3 : "Guide Library", 5 : "Filename",7 : "Reads DASHed",9 : "Percent DASHed"})
# In[8]: parse out the name of the guide library file
guidelibrary = df['Guide Library'].str.split('/',expand=True)
df['Guide Library'] = guidelibrary[guidelibrary.columns[-1]]
# In[12]: get rid of percent sign in percent DASHed
df['Percent DASHed']=df['Percent DASHed'].str.strip('%')
# In[13]: parse out the total reads vs DASHed reads
reads = df['Reads DASHed'].str.split('/',expand=True)
reads= reads.rename(index=int, columns={ 0 : "total reads DASHed", 1 : "total reads"})
df=
|
pd.concat([df, reads],axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# # News Categorizer
#
# Predicting news article categories by headline text.
#
# Compares three models: Naive Bayes, SGD, and neural network, then
# applies the trained neural network to roughly categorize 2020 New York Times headlines.
#
# > The dataset is from https://www.kaggle.com/uciml/news-aggregator-dataset.
# >
# > It contains headlines, URLs, and categories for 422,937 news stories
# collected by a web aggregator between March 10th, 2014 and August 10th, 2014.
# >
# > <NAME>. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import unicodedata
import time
import string
import os
import csv
import re
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.metrics import confusion_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
# ## Retrieve raw data
#
# Read dataset as a CSV from local file.
# In[2]:
print('Retrieving raw data...')
news_df = pd.read_csv(os.path.join('news-data', 'uci-news-aggregator.csv'), header=0, index_col=0, dtype=str)
print('Read CSV file from local copy of dataset.')
print(news_df.shape)
news_df.head()
# ## Pre-preprocessing
#
# Preprocessing transformations that can happen before the train/test split are those that are not dependent on the values contained in the dataset itself.
#
# Such steps are:
#
# * Checking for null values
# * Renaming columns
# * Standardising case
# * Removing punctuation
#
# There are 2 null values in `PUBLISHER`,
# but that field is neither a relevant feature nor the label to be predicted.
# In[3]:
# Check for null values
news_df.isnull().sum()
# In[4]:
# Make labels more intuitive and human-readable
CATEGORY_DICT = {
'b':'business',
'e':'entertainment',
'm':'health',
't':'science/technology'
}
news_df['CATEGORY'] = news_df['CATEGORY'].map(CATEGORY_DICT)
# In[5]:
def remove_punctuation(s1):
"""
Returns s1 unicode-normalised without punctuation.
"""
s1 = s1.translate(str.maketrans('', '', string.punctuation))
return unicodedata.normalize("NFKD", s1)
# The relevant feature is the headline text (in `TITLE`), and the label is the news category (in `CATEGORY`).
# In[6]:
print('Standardising case and removing punctuation...')
# Make all the headlines lowercase and remove punctuation
news_df['TITLE'] = news_df['TITLE'].str.lower()
news_df['TITLE'] = news_df['TITLE'].apply(remove_punctuation)
# Designate features and labels
features = news_df[['TITLE']]
labels = news_df[['CATEGORY']]
news_df.head()
# In[7]:
count_df = pd.DataFrame(news_df['CATEGORY'].value_counts()).reset_index()
print('There are', len(count_df), 'news categories')
plt.clf()
sns.set_style('darkgrid')
plt.figure(figsize=(6,6))
sns.barplot(data=count_df, y='index', x='CATEGORY', palette='Dark2')
plt.title('Number of news articles in each category', loc='left', fontsize=14)
plt.xlabel("")
plt.ylabel("")
plt.tight_layout()
plt.savefig(os.path.join('output', 'training_data_composition.png'))
print('Saving as output/training_data_composition.png...')
plt.plot();
# # Statistical machine learning approach, naive Bayes and SGD
#
# We use `sklearn`'s implementations of both models.
# In[8]:
print('===== Naive Bayes and SGD =====')
# split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
# ## Preprocessing
#
# In each instance, we avoid data leakage by
# "fitting on train and transforming both train and test".
#
# This ensures that information contained in the test set
# is not factored in by the model at training time (avoids over-optimistic results).
#
#
# The preprocessing steps are shared between naive Bayes and SGD:
#
# * Convert text (the single feature) into count vectors (**removing stop words in the process**)
# * Encode the categories (the label)
# In[9]:
start_time = time.time()
print('Vectorising text into features and encoding categorical labels...')
# Turn text into integer count vectors
vectorizer = CountVectorizer(stop_words = 'english')
vectorizer.fit(x_train['TITLE'])
x_train = vectorizer.transform(x_train['TITLE'])
x_test = vectorizer.transform(x_test['TITLE'])
# Turn categories into integers
# 0: business, 1: entertainment, 2: health, 3: science/technology
encoder = LabelEncoder()
encoder.fit(y_train['CATEGORY'])
y_train = encoder.transform(y_train['CATEGORY'])
y_test = encoder.transform(y_test['CATEGORY'])
duration = time.time() - start_time
print(f'Preprocessing for Naive Bayes and SGD took {duration} seconds.')
# ## Training and evaluating the models
#
# Both models' accuracies are high, above 90%, with default parameters.
#
# SGD outperforms the Naive Bayes **(\~93.7% compared to \~92.6% in the sample run)**, but takes slightly longer to train.
# In[10]:
print('Training and evaluating Naive Bayes...')
start_time = time.time()
# Naive Bayes model
nb = MultinomialNB()
nb.fit(x_train, y_train)
duration = time.time() - start_time
print(f'Training the Naive Bayes model took {duration} seconds.')
score = nb.score(x_test, y_test)
print(f'Multinomial Naive Bayes accuracy:\n{score}')
# In[11]:
print('Training and evaluating SGD...')
start_time = time.time()
# Stochastic gradient descent classifier
sgd = SGDClassifier(early_stopping=True)
sgd.fit(x_train, y_train)
duration = time.time() - start_time
print(f'Training the SGD model took {duration} seconds.')
score = sgd.score(x_test, y_test)
print(f'SGD classsifier accuracy:\n{score}')
# ### Some other classifier models
#
# These models are relatively slow to train, and underperformed in tests.
# In[12]:
# from sklearn.svm import LinearSVC
# svc = LinearSVC()
# svc.fit(x_train, y_train)
# svc.score(x_test, y_test)
# In[13]:
# from sklearn.ensemble import RandomForestClassifier
# forest = RandomForestClassifier()
# forest = forest.fit(x_train, y_train)
# forest.score(x_test, y_test)
# ## Visualizing the predictions
#
# We use `seaborn`'s heatmap to visualise the confusion matrices.
# In[14]:
def visualise_confusion_matrix(model_name, confusion_matrix, cmap='YlOrBr'):
'''
Displays the given confusion matrix for the given model name.
'''
cm = pd.DataFrame(confusion_matrix)
plt.clf()
sns.set(font_scale = 1)
plt.figure(figsize = (8,8))
# x and y labels are based on the global CATEGORY_DICT
sns.heatmap(cm, cmap = cmap,linewidths = 1, annot = True,square = True, fmt='d', cbar = False,
xticklabels = CATEGORY_DICT.values(),
yticklabels = CATEGORY_DICT.values())
plt.xticks(rotation = 0)
plt.yticks(rotation = 0)
plt.title(f'Confusion Matrix for {model_name}')
plt.xlabel('Predicted Classes', rotation=0)
plt.ylabel('Actual Classes', rotation=0)
plt.tight_layout()
plt.plot()
plt.savefig(os.path.join('output', f'confusion_matrix_{model_name}.png'))
print(f'Saving as output/confusion_matrix_{model_name}.png...')
# ### Visualize Naive Bayes
# In[15]:
# Visualise Naive Bayes
y_pred = nb.predict(x_test)
matrix = confusion_matrix(y_test, y_pred)
visualise_confusion_matrix('Naive Bayes', matrix)
# ### Visualize SGD
# In[16]:
y_pred = sgd.predict(x_test)
matrix = confusion_matrix(y_test, y_pred)
visualise_confusion_matrix('SGD', matrix)
# ## Testing the models on data not in the dataset
# In[17]:
def predict_categories(model, titles):
'''
Use the given model to predict categories for the given news headline titles.
'''
titles =
|
pd.Series(titles)
|
pandas.Series
|
import pymongo
import pandas as pd
from pymongo import MongoClient
client = MongoClient("mongodb://localhost:27017/")
db = client["Finance"]
trades= db["Trades"]
df =
|
pd.read_csv('finance.csv')
|
pandas.read_csv
|
#--------------------------------------------------------------------------------------
# The code is used to test the new method to calcualte the residues distances
# 2019-07-1 <NAME>
#--------------------------------------------------------------------------------------
from Bio.PDB import *
import os ##for directory
import numpy as np
import pandas as pd
from Bio.PDB.PDBParser import PDBParser
import sys
sys.path.append(r"/Users/luho/PycharmProjects/3D_model/Data_collection_of_PDB/code")
os.chdir('/Users/luho/PycharmProjects/3D_model/Data_collection_of_PDB/code')
from pdb_function_module import *
#pre-process the pdb meta information before calculate the residue distance
infile = '../data/pdb_homo_filter.txt'
#read meta data for one group of structure
pdb_sce =
|
pd.read_csv(infile, sep="\t")
|
pandas.read_csv
|
## Connect to External Sources
import requests
from bs4 import BeautifulSoup
import yfinance as yf
## Interactive Visualization
import plotly.figure_factory as ff
import plotly.express as px
import plotly.graph_objects as go
## Data Manipulation
import pandas as pd
import numpy as np
from scipy.optimize import minimize
import datetime
## Web Framework
import streamlit as st
import base64
def download_link(object_to_download, download_filename, download_link_text):
## Create Download Link
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
def negative_red(val):
## Red-Green Style for Dataframe
color = 'red' if val < 0 else 'green'
return 'color: %s' % color
@st.cache
def get_ticker():
## Connect to Wikipedia
url ='https://id.wikipedia.org/wiki/Daftar_perusahaan_yang_tercatat_di_Bursa_Efek_Indonesia'
page = requests.get(url)
## Parse Page Content
soup = BeautifulSoup(page.content, 'html.parser')
tags = soup.find_all('tr')
## Collect Ticker Information
ticker_list = []
for i in range(1,len(tags)):
row = tags[i]
text = row.text.split('\n')
idx = text[1].replace('IDX: ', '')
date = text[3].split('\xa0')[0]
value = [idx, text[2], date]
ticker_list.append(value)
## Change to DataFrame and Return
tickers = pd.DataFrame(ticker_list, columns=['Kode', 'Nama Perusahaan', 'Tanggal Pencatatan'])
return tickers
@st.cache(allow_output_mutation=True)
def get_data(tickers, start_date):
## Download Datasets
adj_ticker = [x + '.JK' for x in tickers]
prices = yf.download(adj_ticker, start_date)['Adj Close']
data_returns = prices.pct_change().dropna()
data_returns.columns = [x.replace('.JK', '') for x in data_returns.columns]
return data_returns
@st.cache
def core_plot_data(returns, weights, conf = 95):
if 'Portfolio' in returns.columns:
returns = returns.drop(columns=['Portfolio'])
## Get Tickers and Date Range First
tickers = [x for x in returns.columns]
## Correlation of Individual Asset
ind_asset_corr = round(returns.corr(), 3).values.tolist()
## Calculate Cumulative Returns for Portfolio and Individually
returns['Portfolio'] = returns.mul(weights, axis=1).sum(axis=1)
ret_cum = round((returns + 1).cumprod() - 1, 3)
## Reorganise Dataframe
new_ret = ret_cum.unstack().reset_index().set_index('Date')
new_ret.columns = ['Perusahaan', 'Returns']
new_ret = new_ret.pivot(columns = 'Perusahaan', values = 'Returns')
new_ret = new_ret[['Portfolio'] + tickers]
## Calculate Historical Drawdown
running_max = np.maximum.accumulate(new_ret['Portfolio'].add(1))
drawdown = new_ret['Portfolio'].add(1).div(running_max).sub(1)
max_drawdown = np.minimum.accumulate(drawdown)
hist_draw = round(pd.DataFrame([drawdown, max_drawdown]).transpose(), 3)
hist_draw.columns = ['Drawdown', 'Max Drawdown']
hist_draw = hist_draw.reset_index()
## Calculate the risk metrics
var = round(np.percentile(returns['Portfolio'], 100 - conf), 3)
cvar = round(returns['Portfolio'][returns['Portfolio'] <= var].mean(), 3)
## Recap Key Value
summary = {}
summary['Returns Saat Ini'] = round(returns['Portfolio'][-1]*100, 3)
summary['Returns Annual'] = round((((1+np.mean(returns['Portfolio']))**252)-1)*100, 3)
summary['Volatilitas Annual'] = round(np.std(returns['Portfolio']) * np.sqrt(252)*100, 3)
summary['Sharpe Ratio'] = round(summary['Returns Annual'] / summary['Volatilitas Annual'], 3)
summary['VaR'] = round(var*100, 3)
summary['CVaR'] = round(cvar*100, 3)
summary['Max Drawdown'] = round(max_drawdown[-1]*100, 3)
return [summary, new_ret, returns, hist_draw, ind_asset_corr, tickers]
@st.cache
def asset_corr_plot(asset_corr, tickers):
## Create Heatmap of Tickers Correlation
corr_heatmap = ff.create_annotated_heatmap(z = asset_corr, x = tickers, y = tickers,
colorscale = "YlGnBu", showscale = True)
corr_heatmap = corr_heatmap.update_layout(title = '<b>Korelasi Antar Saham dalam Portfolio</b>', width=550, height=550)
return corr_heatmap
@st.cache
def asset_cumulative_return(new_ret, ticker):
## Create Faceted Area Chart for Cumulative Returns
start = new_ret.index[0].strftime("%d %b %Y")
end = new_ret.index[-1].strftime("%d %b %Y")
new_ret = new_ret[ticker]
facet_plot = px.area(new_ret, facet_col="Perusahaan", facet_col_wrap=2)
facet_plot = facet_plot.update_layout(title = '<b>Nilai Returns Kumulatif Dari {} hingga {}</b>'.format(start, end))
facet_plot = facet_plot.update_layout(xaxis=dict(rangeslider=dict(visible=True),type="date"))
return facet_plot
@st.cache
def rolling_volatility(returns, interval):
## Create Rolling Volatility Plot
rolling_vol = returns['Portfolio'].rolling(interval).std().dropna() * np.sqrt(252)
rol_vol_plot = px.line(rolling_vol, labels={"Date": "Tanggal", "value": "Volatilitas"},
title="<b>Rolling Volatilitas Annual dengan Rentang Waktu {} Hari</b>".format(interval))
rol_vol_plot = rol_vol_plot.update_layout(showlegend = False)
rol_vol_plot = rol_vol_plot.update_layout(xaxis=dict(rangeselector=dict(buttons=list([
dict(count=1,
label="1 Bulan",
step="month",
stepmode="backward"),
dict(count=6,
label="6 Bulan",
step="month",
stepmode="backward"),
dict(count=1,
label="Year to Date",
step="year",
stepmode="todate"),
dict(count=1,
label="1 Tahun",
step="year",
stepmode="backward"),
dict(label="Semua",
step="all")])),
rangeslider=dict(visible=True),type="date"))
return rol_vol_plot
@st.cache
def drawdown_vis(hist_draw):
## Visualize Drawdown
drawdown_plot = px.area(x = hist_draw['Date'], y = hist_draw['Max Drawdown'],
title = "<b>Data Historical Drawdown</b>", labels = {"x": "Tanggal", "y": "Max Drawdown"})
drawdown_plot = drawdown_plot.add_trace(go.Scatter(x = hist_draw['Date'], y = hist_draw['Drawdown'],
fill = 'tozeroy', name = 'Drawdown', mode = 'none'))
return drawdown_plot
@st.cache
def var_cvar(returns, conf = 95):
## Calculate the risk metrics
var = round(np.percentile(returns['Portfolio'], 100 - conf), 3)
cvar = round(returns['Portfolio'][returns['Portfolio'] <= var].mean(), 3)
## Visualize Histogram
hist_plot = px.histogram(returns['Portfolio'], labels={"value": "Returns", "count": "Frekuensi"},
title="<b>Histogram Nilai Return Portfolio dengan Level Kepercayaan {}%</b>".format(conf))
hist_plot = hist_plot.add_vline(x = var, line_dash="dot", line_color = 'green',
annotation_text=" VaR {}".format(var),
annotation_position="top right",
annotation_font_size=12,
annotation_font_color="green"
)
hist_plot = hist_plot.add_vline(x = cvar, line_dash="dot", line_color = 'red',
annotation_text="CVaR {} ".format(cvar),
annotation_position="top left",
annotation_font_size=12,
annotation_font_color="red"
)
hist_plot = hist_plot.update_layout(showlegend = False)
return hist_plot, (var, cvar)
def get_market_cap(tickers):
## Download Datasets
start_date = datetime.date.today() - datetime.timedelta(7)
adj_ticker = [x + '.JK' for x in tickers]
prices = yf.download(adj_ticker, start_date)[['Adj Close', 'Volume']]
recent_market = (prices['Adj Close']*prices['Volume']).iloc[-1]
market_weight = recent_market.div(sum(recent_market)).tolist()
return market_weight
def portfolio_performance(weights, my_data, risk_free = 0, target = 'all'):
## Evaluate Portfolio Performance
port_return = my_data.mul(weights, axis=1).sum(axis=1)
annual_return = (((1+np.mean(port_return))**252)-1)*100
annual_vol = np.std(port_return) * np.sqrt(252)*100
sharpe_ratio = (annual_return - risk_free)/annual_vol
evaluasi = {'Return Annual':annual_return, 'Volatilitas Annual':annual_vol, 'Sharpe Ratio':sharpe_ratio}
## Return Based on Target
if target == 'all':
return evaluasi
if target == 'max_sharpe_ratio':
return -sharpe_ratio
if target == 'min_volatility':
return annual_vol
def optimize(my_data, target, risk_free_rate = 0):
## Set Optimum Weights for Desired Target
num_assets = len(my_data.columns)
args = (my_data, risk_free_rate, target)
initial = num_assets*[1./num_assets,]
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0,1.0)
bounds = tuple(bound for asset in range(num_assets))
result = minimize(portfolio_performance, x0 = initial, args = args, bounds=bounds, constraints=constraints)
return result
def efficient_return(my_data, expectation, risk_free_rate = 0):
## Retrieve Returns as Constraint
def portfolio_return(weights):
return portfolio_performance(weights, my_data)['Return Annual']
## Optimum Weights Based on Expected Risk
target = 'min_volatility'
num_assets = len(my_data.columns)
args = (my_data, risk_free_rate, target)
initial = num_assets*[1./num_assets,]
constraints = ({'type': 'eq', 'fun': lambda x: portfolio_return(x) - expectation},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0,1) for asset in range(num_assets))
result = minimize(portfolio_performance, x0 = initial, args=args, bounds=bounds, constraints=constraints)
return result
def efficient_frontier(my_data, expectation_range, risk_free_rate = 0):
efficients = []
for exp in expectation_range:
efficients.append(efficient_return(my_data, exp, risk_free_rate))
return efficients
@st.cache
def markowitz_portfolio(my_data, max_exp, rf = 0):
## Individual Assets Performance
ind_stocks = {}
for each in my_data.columns:
stock_return = my_data[each]
annual_stock_return = round((((1+np.mean(stock_return))**252)-1)*100, 3)
annual_stock_vol = round(np.std(stock_return) * np.sqrt(252)*100, 3)
evaluasi_stock = {'Return Annual':annual_stock_return, 'Volatilitas Annual':annual_stock_vol}
ind_stocks[each] = evaluasi_stock
ticker = [x for x in my_data.columns]
## Equal Weight Portfolio
num_assets = len(my_data.columns)
ew_weights = num_assets*[1./num_assets,]
ew = portfolio_performance(ew_weights, my_data, rf)
for key, value in ew.items():
ew[key] = round(value, 3)
for i in range(0, num_assets):
ew[ticker[i]] = ew_weights[i]
## Market Cap Weight Portfolio
mcap_weights = get_market_cap(ticker)
mcap = portfolio_performance(mcap_weights, my_data, rf)
for key, value in mcap.items():
mcap[key] = round(value, 3)
for i in range(0, num_assets):
mcap[ticker[i]] = mcap_weights[i]
## MSR Portfolio
msr_obj = optimize(my_data = my_data, target = 'max_sharpe_ratio', risk_free_rate = rf)
msr = portfolio_performance(msr_obj['x'], my_data, rf)
for key, value in msr.items():
msr[key] = round(value, 3)
msr_weights = [round(x, 3) for x in msr_obj['x']]
for i in range(0, num_assets):
msr[ticker[i]] = msr_weights[i]
## GMV Portfolio
gmv_obj = optimize(my_data = my_data, target = 'min_volatility', risk_free_rate = rf)
gmv = portfolio_performance(gmv_obj['x'], my_data, rf)
for key, value in gmv.items():
gmv[key] = round(value, 3)
gmv_weights = [round(x, 3) for x in gmv_obj['x']]
for i in range(0, num_assets):
gmv[ticker[i]] = gmv_weights[i]
## Efficient Frontier Portfolio
min_exp = gmv['Return Annual']
max_exp = max_exp + 5
range_exp = np.linspace(min_exp, max_exp, 50)
ef = efficient_frontier(my_data, range_exp, rf)
## Organize Portfolio Results
key_port = round(
|
pd.DataFrame([ew, mcap, msr, gmv])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Test data
"""
# Imports
import pandas as pd
from edbo.feature_utils import build_experiment_index
# Build data sets from indices
def aryl_amination(aryl_halide='ohe', additive='ohe', base='ohe', ligand='ohe', subset=1):
"""
Load aryl amination data with different features.
"""
# SMILES index
index = pd.read_csv('data/aryl_amination/experiment_index.csv')
# Choose supset:
ar123 = ['FC(F)(F)c1ccc(Cl)cc1','FC(F)(F)c1ccc(Br)cc1','FC(F)(F)c1ccc(I)cc1']
ar456 = ['COc1ccc(Cl)cc1','COc1ccc(Br)cc1','COc1ccc(I)cc1']
ar789 = ['CCc1ccc(Cl)cc1','CCc1ccc(Br)cc1','CCc1ccc(I)cc1']
ar101112 = ['Clc1ccccn1','Brc1ccccn1','Ic1ccccn1']
ar131415 = ['Clc1cccnc1','Brc1cccnc1','Ic1cccnc1']
def get_subset(ar):
a = index[index['Aryl_halide_SMILES'] == ar[0]]
b = index[index['Aryl_halide_SMILES'] == ar[1]]
c = index[index['Aryl_halide_SMILES'] == ar[2]]
return pd.concat([a,b,c])
if subset == 1:
index = get_subset(ar123)
elif subset == 2:
index = get_subset(ar456)
elif subset == 3:
index = get_subset(ar789)
elif subset == 4:
index = get_subset(ar101112)
elif subset == 5:
index = get_subset(ar131415)
# Aryl halide features
if aryl_halide == 'dft':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_dft.csv')
elif aryl_halide == 'mordred':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_mordred.csv')
elif aryl_halide == 'ohe':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_ohe.csv')
# Additive features
if additive == 'dft':
add_features = pd.read_csv('data/aryl_amination/additive_dft.csv')
elif additive == 'mordred':
add_features = pd.read_csv('data/aryl_amination/additive_mordred.csv')
elif additive == 'ohe':
add_features = pd.read_csv('data/aryl_amination/additive_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/aryl_amination/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/aryl_amination/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/aryl_amination/base_ohe.csv')
# Ligand features
if ligand == 'Pd(0)-dft':
ligand_features = pd.read_csv('data/aryl_amination/ligand-Pd(0)_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/aryl_amination/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/aryl_amination/ligand_ohe.csv')
# Build the descriptor set
index_list = [index['Aryl_halide_SMILES'],
index['Additive_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES']]
lookup_table_list = [aryl_features,
add_features,
base_features,
ligand_features]
lookup_list = ['aryl_halide_SMILES',
'additive_SMILES',
'base_SMILES',
'ligand_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield'].values
return experiment_index
def suzuki(electrophile='ohe', nucleophile='ohe', base='ohe', ligand='ohe', solvent='ohe'):
"""
Load Suzuki data with different features.
"""
# SMILES index
index = pd.read_csv('data/suzuki/experiment_index.csv')
# Electrophile features
if electrophile == 'dft':
elec_features = pd.read_csv('data/suzuki/electrophile_dft.csv')
elif electrophile == 'mordred':
elec_features = pd.read_csv('data/suzuki/electrophile_mordred.csv')
elif electrophile == 'ohe':
elec_features = pd.read_csv('data/suzuki/electrophile_ohe.csv')
# Nucleophile features
if nucleophile == 'dft':
nuc_features = pd.read_csv('data/suzuki/nucleophile_dft.csv')
elif nucleophile == 'mordred':
nuc_features = pd.read_csv('data/suzuki/nucleophile_mordred.csv')
elif nucleophile == 'ohe':
nuc_features = pd.read_csv('data/suzuki/nucleophile_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/suzuki/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/suzuki/base_mordred.csv')
elif base == 'ohe':
base_features =
|
pd.read_csv('data/suzuki/base_ohe.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import plotly.express as px
def emoji_plot():
df = pd.read_csv("igdata.tsv", index_col=0, sep="\t").reset_index().sort_values('Timestamp')
# cut the timestamp, save day only
df['Timestamp'] = [str(e).split(" ")[0] for e in df['Timestamp']]
# here split emojies into single char
all_emoji = []
for e in df['Emojies'].dropna().values:
for i in e:
all_emoji.append(i)
time = []
lan = []
emo = []
for i, row in df.iterrows():
emostr = row['Emojies']
# print(emostr)
# print(type(emostr))
if emostr is not np.nan:
for e in emostr:
time.append(row['Timestamp'])
lan.append(row['Language'])
emo.append(e)
# time, language and single emoji
clean = pd.DataFrame({'Timestamp': time, 'Language': lan, 'Emojies': emo})
# time, emoji, and count (under language column)
counted = clean.groupby(['Timestamp', 'Emojies']).count().reset_index()
emolist = counted['Emojies'].unique()
# random scatter
x = np.random.uniform(low=-10.0, high=10.0, size=len(emolist))
y = np.random.uniform(low=-10.0, high=10.0, size=len(emolist))
coords =
|
pd.DataFrame({'Emojies': emolist, 'x': x, 'y': y})
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
# # Example Predictor: Linear Rollout Predictor
#
# This example contains basic functionality for training and evaluating a linear predictor that rolls out predictions day-by-day.
#
# First, a training data set is created from historical case and npi data.
#
# Second, a linear model is trained to predict future cases from prior case data along with prior and future npi data.
# The model is an off-the-shelf sklearn Lasso model, that uses a positive weight constraint to enforce the assumption that increased npis has a negative correlation with future cases.
#
# Third, a sample evaluation set is created, and the predictor is applied to this evaluation set to produce prediction results in the correct format.
# ## Training
# In[1]:
import pickle
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
# ### Copy the data locally
# In[2]:
# Main source for the training data
DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
# Local file
DATA_FILE = 'data/OxCGRT_latest.csv'
# In[3]:
import os
import urllib.request
if not os.path.exists('data'):
os.mkdir('data')
urllib.request.urlretrieve(DATA_URL, DATA_FILE)
# In[4]:
# Load historical data from local file
old_df = pd.read_csv(DATA_FILE,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
# In[5]:
if not os.path.exists('data/supplement'):
os.mkdir('data/supplement')
# In[6]:
date_range = pd.date_range(old_df['Date'].min(), old_df['Date'].max(), freq='D').strftime("%m-%d-%Y")
# In[7]:
no_info_dates = []
info_dates = []
# for date in date_range:
# date_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + date + ".csv"
# date_file = "data/us_states/" + date + "_states.csv"
# try:
# if os.path.exists(date_file):
# os.remove(date_file)
# urllib.request.urlretrieve(date_url, date_file)
# info_dates.append(date)
# except:
# no_info_dates.append(date)
# In[8]:
state_df =
|
pd.DataFrame(columns=["Country_Region", "Province_State", "Date", "Confirmed", "Deaths", "Recovered", "Active"])
|
pandas.DataFrame
|
"""
Script responsável por alocar funções e classes
auxiliares relacionadas a leitura e preparação dos
sinais de áudio.
O código aqui disponibilizado pode ser importado em
outros módulos para ser utilizado de forma encapsulada
nas etapas de preparação e construção de pipelines.
------------------------------------------------------
SUMÁRIO
------------------------------------------------------
1. Importação de bibliotecas
2. Funções de leitura e enriquecimento de base
3. Funções de pré-processamento de dados
4. Funções auxiliares de extração de features
5. Classes transformadoras do pipeline
"""
# Autor: <NAME>
# Data de Criação: 29/03/2021
"""
------------------------------------------------------
------------ 1. IMPORTAÇÃO DE BIBLIOTECAS ------------
------------------------------------------------------
"""
import os
import pandas as pd
import numpy as np
import shutil
import librosa
from warnings import filterwarnings
filterwarnings('ignore')
from sklearn.base import BaseEstimator, TransformerMixin
"""
------------------------------------------------------
--- 2. FUNÇÕES DE LEITURA E ENRIQUECIMENTO DE BASE ---
------------------------------------------------------
"""
# Definindo função para cópia de arquivos de áudio do portal Mozilla Common Voice
def copy_common_voice(mcv_train_path, mcv_clips_path, data_path, n_mcv_files=10,
label_prefix='interlocutor_', new_folder=False):
"""
Função responsável por copiar arquivos de áudio do diretório local Mozilla Common Voice
Parâmetros
----------
:param mcv_train_path: referência de arquivo train.tsv do Mozilla Common Voice [type: string]
:param mcv_clips_path: diretório contendo áudios do Mozilla Common Voice [type: string]
:param data_path: diretório de dados do projeto Voice Unlocker [type: string]
:param n_mcv_files: quantidade de arquivos mp3 a serem copiados do MCV [type: int, default=10]
:param label_prefix: prefixo de label no diretório do Voice Unlcoker [type: string, default='interlocutor_']
:param new_folder: flag para criação de nova pasta de interlocutor [type: bool, default=False]
Retorno
-------
Essa função não retorna nenhum parâmetro além da devida cópia dos arquivos do diretório
Mozilla Common Voice para o diretório do projeto Voice Unlocker na pasta adequada de interlocutor
"""
labels = os.listdir(data_path)
if new_folder:
# Definindo nomenclatura para nova pasta a ser criada
qtd_labels = len(labels)
if qtd_labels < 9:
other_label = label_prefix + '0' + str(qtd_labels + 1)
else:
other_label = label_prefix + str(qtd_labels + 1)
# Criando nova pasta
print(f'Pastas presentes antes da criação: \n{os.listdir(data_path)}')
os.mkdir(os.path.join(DATA_PATH, other_label))
print(f'\nPastas presentes após a criação: \n{os.listdir(data_path)}')
else:
other_label = sorted(labels)[-1]
print(f'Pastas presentes no diretório destino: \n{os.listdir(data_path)}')
# Definindo diretório de destino baseado no label definido acima
dst_path = os.path.join(data_path, other_label)
print(f'\nDiretório destino das amostras de áudio da classe N-1:\n{dst_path}')
# Lendo base de referência de dados a serem copiados
mcv_train = pd.read_csv(mcv_train_path, sep='\t')
mcv_train = mcv_train.head(n_mcv_files)
mcv_train['src_path'] = mcv_train['path'].apply(lambda x: os.path.join(mcv_clips_path, x))
mcv_train['dst_path'] = mcv_train['path'].apply(lambda x: os.path.join(dst_path, x))
# Copiando arquivos
for src, dst in mcv_train.loc[:, ['src_path', 'dst_path']].values:
shutil.copy(src=src, dst=dst)
# Validando cópia
new_files = os.listdir(dst_path)
print(f'\nQuantidade de novos arquivos copiados pra pasta do projeto: {len(new_files)}')
# Definindo função para leitura de arquivos de áudio em diretório do projeto
def read_data(data_path, sr, signal_col='signal', target_col='y_class'):
"""
Leitura e armazenagem de arquivos de áudio e seus respectivos metadados
Parâmetros
----------
:param data_path: diretório alvo contendo as pastas para os interlocutores [type: string]
:param sr: taxa de amostragem utilizada na leitura dos áudios [type: int]
:param signal_col: referência da coluna de armazenamento do sinal [type: string, default='signal']
:param target_col: referência da coluna de armazenamento do target [type: string, default='y_class']
Retorno
-------
:return df: pandas DataFrame com áudios armazenados [type: pd.DataFrame]
"""
# Extraindo informações dos sinais de áudio armazenados localmente
roots = [root for root, dirs, files in os.walk(data_path)][1:]
files = [files for root, dirs, files in os.walk(data_path)][1:]
paths = [os.path.join(root, f) for root, file in zip(roots, files) for f in file]
filenames = [p.split('/')[-1] for p in paths]
file_formats = [f.split('.')[-1] for f in filenames]
labels = [p.split('/')[-2] for p in paths]
# Realizando a leitura dos sinais
signals = [librosa.load(path, sr=sr)[0] for path in paths]
durations = [librosa.get_duration(s) for s in signals]
# Criando DataFrame para armazenagem de sinais
df = pd.DataFrame()
df['audio_path'] = paths
df['filename'] = filenames
df['file_format'] = file_formats
df[signal_col] = signals
df['duration'] = durations
df['label_class'] = labels
# Definindo variável resposta
unique_class = df['label_class'].sort_values().unique()
class_dict = {c: i for c, i in zip(unique_class, range(1, len(unique_class) + 1))}
df[target_col] = df['label_class'].map(class_dict)
return df
"""
------------------------------------------------------
------ 3. FUNÇÕES DE PRÉ-PROCESSAMENTO DE DADOS ------
------------------------------------------------------
"""
# Definindo função para pré processamento da base
def data_pre_processing(df, signal_col='signal', target_col='y_class', encoded_target=True):
"""
Função responsável por filtrar as colunas utilizadas na preparação e aplicar o processo
de encoding na variável resposta
Parâmetros
----------
:param df: base de dados original contendo informações de áudios [type: pd.DataFrame]
:param signal_col: coluna de armazenamento do sinal temporal [type: string, default='signal']
:param target_col: coluna de armazenamento da variável target [type: string, default='y_class']
:param encoded_target: define a aplicação do encoding no array target [type: bool, default=True]
Retorno
-------
:return X: base de dados contendo apenas o sinal de entrada dos áudios [type: pd.DataFrame]
:return y: array multidimensional contendo informações sobre as classes [type: np.array]
"""
# Filtrando dataset inicial
X_df = df.loc[:, [signal_col]]
y_df = df.loc[:, [target_col]]
# Codificando variável target
if encoded_target:
y = pd.get_dummies(y_df[target_col]).values
else:
y = y_df.values.reshape(-1)
return X_df, y
"""
------------------------------------------------------
--- 4. FUNÇÕES AUXILIARES DE EXTRAÇÃO DE FEATURES ----
------------------------------------------------------
"""
# Definindo função para separação de faixas de frequências (BER)
def calc_split_freq_bin(spec, split_freq, sr):
"""
Função responsável por calcular o índice da frequência de separação F
no espectro discreto de frequências
Parâmetros
----------
:param spec: espectrograma calculado via STFT [type: ndarray]
:param split_freq: frequência de separação F [type: int]
:param sr: taxa de amostragem do sinal [type: int]
Retorno
-------
:return idx_split_freq: retorna o índice relacionado ao parâmetro F no espectro discreto [type: int]
:return split_freq_bin: retorna a frequência discreta relacionada ao parâmetro F [type: float]
"""
# Intervalo de frequências (Nyquist)
f_range = sr / 2
# Intervalo de frequências para cada faixa discreta individual
qtd_freq_bins = spec.shape[0]
f_delta_bin = f_range / qtd_freq_bins
# Calculando índice do parâmetro F nas faixas discretas
idx_split_freq = int(np.floor(split_freq / f_delta_bin))
# Calculando faixa de frequência presente na matriz espectral
freq_bins = np.linspace(0, f_range, qtd_freq_bins)
split_freq_bin = freq_bins[idx_split_freq]
return idx_split_freq, split_freq_bin
# Definindo função para o cálculo da Taxa de Energia de Banda (BER)
def calc_ber(spec, split_freq, sr):
"""
Função responsável por calcular a taxa de energia de banda (BER)
Parâmetros
----------
:param spec: espectrograma calculado via STFT [type: ndarray]
:param split_freq: frequência de separação F [type: int]
:param sr: taxa de amostragem do sinal [type: int]
Retorno
-------
:return ber: taxa de energia de banda para cada frame t [type: np.array]
"""
# Calculando faixa de frequência discreta do parâmetro F
idx_split_freq, split_freq_bin = calc_split_freq_bin(spec, split_freq, sr)
bers = []
# Transformando amplitudes do espectro em potências
power_spec = np.abs(spec) ** 2
# Aplicando transpose para iteração em cada frame
power_spec = power_spec.T
# Calculando somatório para cada frame
for frame in power_spec:
sum_power_low_freq = frame[:idx_split_freq].sum()
sum_power_high_freq = frame[idx_split_freq:].sum()
ber_frame = sum_power_low_freq / sum_power_high_freq
bers.append(ber_frame)
return np.array(bers)
"""
------------------------------------------------------
------- 5. CLASSES TRANSFORMADORAS DO PIPELINE -------
------------------------------------------------------
"""
# Definindo transformador para envelope de amplitude
class AmplitudeEnvelop(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair o envelope de amplitude de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para o envelope de amplitude [type: pd.DataFrame]
Aplicação
---------
ae_extractor = AmplitudeEnvelop(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_ae = ae_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Retornando o envelope de amplitude para cada frame do sinal
X['ae'] = X[self.signal_col].apply(lambda x: np.array([max(x[i:i+self.frame_size]) for i in range(0, len(x), self.hop_length)]))
# Criando dicionário com agregações do envelope de amplitude de cada sinal
X['aggreg_dict'] = X['ae'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['ae_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['ae', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para RMS Energy
class RMSEnergy(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a raíz da energia média quadrática de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a raíz da energia média quadrática [type: pd.DataFrame]
Aplicação
---------
rms_extractor = RMSEnergy(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_rms = rms_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Extraindo feature para cada sinal
X['rms_engy'] = X[self.signal_col].apply(lambda x: librosa.feature.rms(x, frame_length=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['rms_engy'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['rms_engy_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['rms_engy', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para Zero Crossing Rate
class ZeroCrossingRate(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a taxa de cruzamento de zero de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a taxa de cruzamento de zero [type: pd.DataFrame]
Aplicação
---------
zcr_extractor = ZeroCrossingRate(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_zcr = zcr_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Extraindo feature para cada sinal
X['zcr'] = X[self.signal_col].apply(lambda x: librosa.feature.zero_crossing_rate(x, frame_length=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['zcr'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['zcr_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['zcr', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para BER
class BandEnergyRatio(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a taxa de energia de banda de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param split_freq: frequência de separação entre altas e baixas frequências [type: int]
:param sr: taxa de amostragem do sinal de áudio [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a taxa de energia de banda [type: pd.DataFrame]
Aplicação
---------
ber_extractor = BandEnergyRatio(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_ber = ber_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, split_freq, sr, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.split_freq = split_freq
self.sr = sr
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Calculando espectrograma dos sinais
X['spec'] = X[self.signal_col].apply(lambda x: librosa.stft(y=x, n_fft=self.frame_size,
hop_length=self.hop_length))
# Calculando BER
X['ber'] = X['spec'].apply(lambda x: calc_ber(spec=x, split_freq=self.split_freq, sr=self.sr))
# Criando dicionário com agregações
X['aggreg_dict'] = X['ber'].apply(lambda x:
|
pd.DataFrame(x)
|
pandas.DataFrame
|
#!/usr/bin/env python
"""Estimate allele lengths and find outliers at STR loci
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
import argparse
import sys
import glob
import os
import re
import numpy as np
import statsmodels.api as sm
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import multipletests
from sklearn import linear_model
import pandas as pd
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1.0"
__email__ = "<EMAIL>"
def parse_args():
"""Parse the input arguments, use '-h' for help"""
parser = argparse.ArgumentParser(description='Estimate allele lengths and find outliers at STR loci.')
parser.add_argument(
'--locus_counts', type=str, nargs='+', required = True,
help='.locus_counts files for all samples. Contains the number of reads assigned to each STR locus.')
parser.add_argument(
'--STR_counts', type=str, nargs='+', required = True,
help='.STR_counts files for all samples. Contains the number of reads mapped to each STR decoy chromosome.')
parser.add_argument(
'--median_cov', type=str, nargs='+', required = True,
help='.median_cov files for all samples. Text files containing median coverage.')
parser.add_argument(
'--out', type=str, default = '',
help='Prefix for all output files (suffix will be STRs.tsv) (default: %(default)s)')
parser.add_argument(
'--model', type=str, default='STRcov.model.csv',
help='Data to produce linear regression model (provided with STRetch) (default: %(default)s)')
parser.add_argument(
'--control', type=str, default='',
help='Input file for median and standard deviation estimates at each locus from a set of control samples. This file can be produced by this script using the emit option. If this option is not set, all samples in the current batch will be used as controls by default.')
parser.add_argument(
'--emit', type=str, default='',
help='Output file for median and standard deviation estimates at each locus (tsv).')
return parser.parse_args()
def get_sample(fullpath):
"""Get the sample ID from the filename"""
basename = os.path.basename(fullpath)
return(basename.split('.')[0])
def parse_STRcov(filename):
"""Parse all STR coverage"""
sample_id = get_sample(filename)
try:
cov_data = pd.read_table(filename, delim_whitespace = True,
names = ['chrom', 'start', 'end', 'decoycov'])
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
cov_data['sample'] = sample_id
cov_data['repeatunit'] = [x.split('-')[1] for x in cov_data['chrom']]
cov_data = cov_data[['sample', 'repeatunit', 'decoycov']]
return(cov_data)
def parse_locuscov(filename):
"""Parse locuscoverage data produced by identify_locus.py"""
sample_id = get_sample(filename)
try:
locuscov_data = pd.read_table(filename, delim_whitespace = True)
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
if locuscov_data.shape[0] == 0: # Check for file with only header
sys.exit('ERROR: file {0} contained 0 loci.\n'.format(filename))
locuscov_data['sample'] = sample_id
locuscov_data['locus'] = ['{0}-{1}-{2}'.format(locuscov_data['STR_chr'][i],
locuscov_data['STR_start'][i], locuscov_data['STR_stop'][i]) for i in range(len(locuscov_data.index-1))]
locuscov_data['repeatunit'] = locuscov_data['motif']
locuscov_data['locuscoverage'] = locuscov_data['count']
locuscov_data = locuscov_data[['sample', 'locus', 'repeatunit', 'reflen', 'locuscoverage']]
return(locuscov_data)
def parse_genomecov(filename):
"""Parse median genome coverage from covmed output.
Assumes median coverage is the top left value in the text file."""
sample_id = get_sample(filename)
try:
mediancov = pd.read_table(filename, delim_whitespace = True, header = None).iloc[0,0]
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
if mediancov < 1:
sys.exit('ERROR: Median coverage in file {0} was {1}.\nSuch a low value could indicate median coverage was not correctly calculated,\nfor example an incorrect target region was specified or the WGS pipeline was used for exome data.'.format(filename, mediancov))
genomecov_data = pd.DataFrame({'sample': [sample_id], 'genomecov': [mediancov]})
return(genomecov_data)
def parse_controls(control_file):
"""Parse control file with columns locus, median and standard deviation"""
control_estimates = pd.read_table(control_file, index_col=0)
# Allow for old style column headings, but change to mu and sd.
if control_estimates.columns[0] in ['mu', 'median'] and control_estimates.columns[1] in ['sd', 'SD']:
colnames = list(control_estimates.columns)
colnames[0:2] = ['mu', 'sd']
control_estimates.columns = colnames
else:
raise ValueError(''.join(["The column names in the control file ",
"don't look right, expecting columns named median, SD ",
"or mu, sd. Column names are ", str(list(control_estimates.columns)),
". Check the file: ", control_file]))
return(control_estimates)
#from statsmodels import robust
# If using mad below
def hubers_est(x):
"""Emit Huber's M-estimator median and SD estimates.
If Huber's fails, emit standard median and NA for sd"""
huber50 = sm.robust.scale.Huber(maxiter=50)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
try:
mu, s = huber50(np.array(x))
except (ValueError, RuntimeWarning):
mu = np.median(x)
s = np.nan
#s = robust.mad(x)
#XXX working on this - replace s with mad when hubers est fails?
return pd.Series({'mu': mu, 'sd': np.sqrt(s)})
def z_score(x, df):
"""Calculate a z score for each x value, using estimates from a pandas data
frame with the columns 'mu' and 'sd' and index coressponding to the x values"""
z = (x.transpose() - df['mu'])/df['sd']
return z.transpose()
def p_adj_bh(x):
'''Adjust p values using Benjamini/Hochberg method'''
return multipletests(x, method='fdr_bh', returnsorted = False)[1]
def main():
# Parse command line arguments
args = parse_args()
base_filename = args.out
STRcov_model_csv = args.model
emit_file = args.emit
control_file = args.control
locuscov_files = args.locus_counts
STRcov_files = args.STR_counts
genomecov_files = args.median_cov
results_suffix = 'STRs.tsv'
# Check files exist for all samples
locuscov_ids = set([get_sample(f) for f in locuscov_files])
STRcov_ids = set([get_sample(f) for f in STRcov_files])
genomecov_ids = set([get_sample(f) for f in genomecov_files])
if not (locuscov_ids == STRcov_ids == genomecov_ids):
all_samples = locuscov_ids | STRcov_ids | genomecov_ids
missing_samples = (all_samples - locuscov_ids) | (all_samples - STRcov_ids) | (all_samples - genomecov_ids)
sys.exit("ERROR: One or more files are missing for sample(s): " + ' '.join(missing_samples))
sys.stderr.write('Processing {0} samples\n'.format(len(locuscov_files)))
if len(locuscov_files) < 2 and control_file == '':
sys.stderr.write('WARNING: Only 1 sample and no control file provided, so outlier scores and p-values will not be generated.')
# Parse input data
locuscov_data = pd.concat( (parse_locuscov(f) for f in locuscov_files), ignore_index = True)
STRcov_data = pd.concat( (parse_STRcov(f) for f in STRcov_files), ignore_index = True)
genomecov_data = pd.concat( (parse_genomecov(f) for f in genomecov_files), ignore_index = True)
# Check for multiple rows with the same sample/locus combination
crosstable = pd.crosstab(locuscov_data['locus'], locuscov_data['sample'])
ismultiloci = crosstable.apply(lambda row: any(row > 1), axis=1)
multiloci = ismultiloci[ismultiloci == True].index.values
if len(multiloci) > 0:
sys.exit('''
The locus count input data contains multiple rows with the same sample/locus combination.
This is usually caused by two loci at the same position in the STR annotation bed file.
Check these loci:
''' + ' '.join(multiloci))
# # Check for different reflen for the same locus
# grouped = locuscov_data.groupby('locus')
# reflenloci = []
# for locus, group in grouped:
# if len(set(group['reflen'])) > 1:
# #reflenloci.append(name)
# # If different, replace with the smallest
# locuscov_data.loc[locuscov_data['locus'] == locus,'reflen'] = np.repeat(min(group['reflen']), len(group['reflen']))
# if len(reflenloci) > 0:
# sys.exit('''
# The locus count input data contains the same locus with different reflens.
# This may be caused by an error in the STR annotation bed file.
# Check these loci:
# ''' + ' '.join(reflenloci)) + '''
# The locus count input data contains the same locus with different reflens.
# This may be caused by an error in the STR annotation bed file.
# Check the above loci'''
#locuscov_data['reflen'] = np.repeat(1, len(locuscov_data['reflen']))
# Fill zeros in locuscov
locuscov_wide = locuscov_data.pivot(index='locus', columns='sample', values='locuscoverage').fillna(0)
locuscov_wide['locus'] = locuscov_wide.index
sample_cols = list(set(locuscov_data['sample']))
locuscov_long = pd.melt(locuscov_wide, id_vars = 'locus',
value_vars = sample_cols, value_name = 'locuscoverage',
var_name = 'sample')
# Add locus info back in
locuscov_data = pd.merge(locuscov_long, locuscov_data[['locus', 'repeatunit', 'reflen']].drop_duplicates(), how='left')
# Normalise STR coverage by median coverage
factor = 100
STRcov_data = pd.merge(STRcov_data, genomecov_data)
#STRcov_data['decoycov_norm'] = factor * (STRcov_data['decoycov'] + 1) / STRcov_data['genomecov']
#STRcov_data['decoycov_log'] = np.log2(STRcov_data['decoycov_norm'])
#XXX combines the previous two lines into one. Keeping commented out in case coverage_norm is required later
STRcov_data['decoycov_log'] = np.log2(factor * (STRcov_data['decoycov'] + 1) / STRcov_data['genomecov'])
# #XXX ***Not fully implemented***
# # Look for repeat units where the number of reads mapping to the decoy can't be
# # explained by those mapping to all loci with that repeat unit
#
# # Sum the counts over all loci for each repeat unit in each sample
# locus_totals = locuscov_data.groupby(['sample', 'repeatunit'])['locuscoverage'].aggregate(np.sum)
# locus_totals = pd.DataFrame(locus_totals).reset_index() # convert to DataFrame and make indices into columns
# # Calculate the difference between reads assigned to a decoy and the sum of
# # all reads assigned to loci with that repeat unit
# all_differences = pd.merge(STRcov_data, locus_totals, how='left')
# all_differences['difference'] = all_differences['decoycov'] - all_differences['locuscoverage']
# # Normalise differences by median coverage and take the log2
# all_differences['difference_log'] = np.log2(factor * (all_differences['difference'] + 1) / all_differences['genomecov'])
#
# locus_totals = pd.merge(locus_totals, STRcov_data)
#
# # Assign decoy counts to each locus, based on what proportion of the counts for that repeat unit they already have
locus_totals = pd.merge(locuscov_data, STRcov_data, how = 'left')
locus_totals['total_assigned'] = locus_totals['locuscoverage'] #XXX remove this line if implementing the above
locus_totals['total_assigned_log'] = np.log2(factor * (locus_totals['total_assigned'] + 1) / locus_totals['genomecov'])
# For each locus, calculate if that sample is an outlier relative to others
total_assigned_wide = locus_totals.pivot(index='locus', columns='sample', values='total_assigned_log')
# Calculate values for if there were zero reads at a locus in all samples
null_locus_counts = np.log2(factor * (0 + 1) / genomecov_data['genomecov'])
sample_names = genomecov_data['sample']
null_locus_counts.index = sample_names
# Add a null locus that has 0 reads for all individuals
# (so just uses coverage)
null_locus_counts_est = hubers_est(null_locus_counts)
# Calculate a z scores using median and SD estimates from the current set
# of samples
# Use Huber's M-estimator to calculate median and SD across all samples
# for each locus
sample_estimates = total_assigned_wide.apply(hubers_est, axis=1)
# Where sd is NA, replace with the minimum non-zero sd from all loci
min_sd = np.min(sample_estimates['sd'][sample_estimates['sd'] > 0])
sample_estimates['sd'].fillna(min_sd, inplace=True)
# if sd is 0, replace with min_sd #XXX is this sensible?
#if null_locus_counts_est['sd'] == 0 or np.isnan(null_locus_counts_est['sd']):
if null_locus_counts_est['sd'] == 0:
null_locus_counts_est['sd'] = min_sd
# Save median and SD of all loci to file if requested (for use as a
# control set for future data sets)
if emit_file != '':
sample_estimates.loc['null_locus_counts'] = null_locus_counts_est
n = len(total_assigned_wide.columns)
sample_estimates['n'] = n
sample_estimates.to_csv(emit_file, sep= '\t')
# Calculate z scores using median and SD estimates per locus from a
# provided control set
if control_file != '':
# Parse control file
control_estimates = parse_controls(control_file)
# Get a list of all loci in the control file but not the sample data
control_loci_df = control_estimates.iloc[control_estimates.index != 'null_locus_counts']
control_loci = [x for x in control_loci_df.index if x not in total_assigned_wide.index]
# Extract and order just those control estimates appearing in the current data
mu_sd_estimates = control_estimates.reindex(total_assigned_wide.index)
# Fill NaNs with null_locus_counts values
mu_sd_estimates.fillna(control_estimates.loc['null_locus_counts'],
inplace=True)
else:
# Extract and order estimates to match the current data
mu_sd_estimates = sample_estimates.reindex(total_assigned_wide.index)
# calculate z scores
z = z_score(total_assigned_wide, mu_sd_estimates)
# If a control file is given, effectively add zeros counts at all loci in
# controls but not in the samples.
# These extra rows will dissapear due to a later merge
if control_file != '':
# Create a total_assigned_wide as if all loci have zero counts
null_total_assigned_wide = pd.DataFrame(columns = sample_names, index = control_loci)
null_total_assigned_wide.fillna(null_locus_counts, inplace = True)
# Caculate z scores
null_z = z_score(null_total_assigned_wide,
control_estimates.reindex(null_total_assigned_wide.index))
loci_with_counts = z.index
z = z.append(null_z)
if z.shape[0] == 1:
ids = z.columns # save index order as data gets sorted
# Calculate p values based on z scores (one sided)
z_list = list(z.iloc[0])
pvals = norm.sf(z_list) # no need to adjust p values if one locus
# Merge pvals and z scores back into locus_totals
p_z_df = pd.DataFrame({'sample': ids, 'p_adj': pvals, 'outlier': z_list})
locus_totals = pd.merge(locus_totals, p_z_df)
elif z.shape[0] > 1:
# Calculate p values based on z scores (one sided)
pvals = z.apply(lambda z_row: [norm.sf(x) for x in z_row], axis=1, result_type='broadcast') # apply to each row
if pvals.isnull().values.all(): # Don't bother adjusting p values if all are null
adj_pvals = pvals
else:
# Adjust p values using Benjamini/Hochberg method
adj_pvals = pvals.apply(p_adj_bh, axis=0) # apply to each column
# Merge pvals and z scores back into locus_totals
adj_pvals['locus'] = adj_pvals.index
pvals_long = pd.melt(adj_pvals, id_vars = 'locus',
value_vars = sample_cols, value_name = 'p_adj', var_name = 'sample')
locus_totals = pd.merge(locus_totals, pvals_long)
z['locus'] = z.index #important to do this only after p values calculated
z_long = pd.melt(z, id_vars = 'locus',
value_vars = sample_cols, value_name = 'outlier', var_name = 'sample')
locus_totals = pd.merge(locus_totals, z_long)
elif z.shape[0] == 0:
pass #XXX raise error. No input data!
# Predict size (in bp) using the ATXN8 linear model (produced from data in
# decoySTR_cov_sim_ATXN8_AGC.R)
# Read in the raw data for this model from a file
# Note: coverage_norm = (STR coverage/median coverage) * 100
# allele2 is the length of the longer allele in bp inserted relative to ref
STRcov_model = pd.read_csv(STRcov_model_csv)
# Model is built from log2 data then converted back
# (to reduce heteroscedasticity)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model
# Reshape using X.reshape(-1, 1) if data has a single feature
# or X.reshape(1, -1) if it contains a single sample.
X_train = np.log2(STRcov_model['coverage_norm']).values.reshape(-1, 1)
Y_train = np.log2(STRcov_model['allele2'])
regr.fit(X_train, Y_train)
# Make a prediction
Y_pred = regr.predict(locus_totals['total_assigned_log'].values.reshape(-1, 1))
predict = np.power(2, Y_pred)
locus_totals['bpInsertion'] = predict
# Get the estimated size in terms of repeat units (total, not relative to ref)
repeatunit_lens = [len(x) for x in locus_totals['repeatunit']]
locus_totals['repeatUnits'] = (locus_totals['bpInsertion']/repeatunit_lens) + locus_totals['reflen']
# Split locus into 3 columns: chrom start end
locuscols = pd.DataFrame([x.split('-') for x in locus_totals['locus']],
columns = ['chrom', 'start', 'end'])
locus_totals = locus_totals.join(locuscols)
# Specify output data columns
write_data = locus_totals[['chrom', 'start', 'end',
'sample', 'repeatunit', 'reflen',
'locuscoverage',
'outlier', 'p_adj',
'bpInsertion', 'repeatUnits'
]]
#sort by outlier score then estimated size (bpInsertion), both descending
write_data = write_data.sort_values(['outlier', 'bpInsertion'], ascending=[False, False])
#XXX check for duplicate rows?
# Convert outlier and p_adj to numeric type and do some rounding/formatting
write_data['outlier'] =
|
pd.to_numeric(write_data['outlier'])
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_per_share_indicators
from data.model import BalanceMRQ, BalanceTTM, FinBalance
from data.model import FinCashFlowTTM, FinCashFlow
from data.model import FinIndicator
from data.model import FinIncome, FinIncomeTTM
from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query
from vision.table.industry_daily import IndustryDaily
from vision.table.fin_cash_flow import FinCashFlow
from vision.table.fin_balance import FinBalance
from vision.table.fin_income import FinIncome
from vision.table.fin_indicator import FinIndicator
from vision.table.valuation import Valuation
from utilities.sync_util import SyncUtil
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url,
methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report data
cash_flow_sets = get_fin_consolidated_statements_pit(FinCashFlow,
[FinCashFlow.cash_and_equivalents_at_end, # 期末现金及现金等价物余额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
cash_flow_sets = cash_flow_sets.rename(
columns={'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue, # 营业收入
FinIncome.total_operating_revenue, # 营业总收入
FinIncome.operating_profit, # 营业利润
FinIncome.diluted_eps, # 稀释每股收益
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
income_sets = income_sets.rename(columns={'operating_revenue': 'operating_revenue', # 营业收入
'total_operating_revenue': 'total_operating_revenue', # 营业总收入
'operating_profit': 'operating_profit', # 营业利润
'diluted_eps': 'diluted_eps', # 稀释每股收益
})
balance_sets = get_fin_consolidated_statements_pit(FinBalance,
[FinBalance.equities_parent_company_owners, # 归属于母公司的所有者权益
FinBalance.capital_reserve_fund,
FinBalance.surplus_reserve_fund,
FinBalance.retained_profit,
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
balance_sets = balance_sets.rename(
columns={'equities_parent_company_owners': 'total_owner_equities', # 归属于母公司的所有者权益
'capital_reserve_fund': 'capital_reserve_fund', # 资本公积
'surplus_reserve_fund': 'surplus_reserve_fund', # 盈余公积
'retained_profit': 'retained_profit', # 未分配利润
})
indicator_sets = get_fin_consolidated_statements_pit(FinIndicator,
[
# FinIndicator.FCFE, # 股东自由现金流量
# FinIndicator.FCFF, # 企业自由现金流量
FinIndicator.eps_basic, # 基本每股收益
# FinIndicator.DPS, # 每股股利(税前)
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
indicator_sets = indicator_sets.rename(columns={
# 'FCFE': 'shareholder_fcfps', # 股东自由现金流量
# 'FCFF': 'enterprise_fcfps', # 企业自由现金流量
'eps_basic': 'basic_eps', # 基本每股收益
# 'DPS': 'dividend_receivable', # 每股股利(税前)
})
# TTM data
cash_flow_ttm_sets = get_fin_consolidated_statements_pit(FinCashFlowTTM,
[FinCashFlowTTM.cash_equivalent_increase_indirect,
# 现金及现金等价物净增加额
FinCashFlowTTM.net_operate_cash_flow, # 经营活动现金流量净额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(
columns={'cash_equivalent_increase_indirect': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额
'net_operate_cash_flow': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额
})
income_ttm_sets = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.np_parent_company_owners, # 归属于母公司所有者的净利润
FinIncomeTTM.operating_profit, # 营业利润
FinIncomeTTM.operating_revenue, # 营业收入
FinIncomeTTM.total_operating_revenue, # 营业总收入
], dates=[trade_date])
for col in columns:
if col in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(col, axis=1)
income_ttm_sets = income_ttm_sets.rename(
columns={'np_parent_company_owners': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润
'operating_profit': 'operating_profit_ttm', # 营业利润
'operating_revenue': 'operating_revenue_ttm', # 营业收入
'total_operating_revenue': 'total_operating_revenue_ttm', # 营业总收入
})
column = ['trade_date']
valuation_data = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.capitalization,
).filter(Valuation.trade_date.in_([trade_date])))
for col in column:
if col in list(valuation_data.keys()):
valuation_data = valuation_data.drop(col, axis=1)
valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex()
valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex()
valuation_sets =
|
pd.merge(income_ttm_sets, valuation_sets, on='security_code')
|
pandas.merge
|
# Copyright © 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._replace`` module.
"""
from numpy import nan
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class CleanReplaceTests(unittest.TestCase):
@staticmethod
def test_clean_replace_string_values():
"""Replace strings in a column."""
_input = DataFrame({"a": [0, 1, "b"]})
_expected = DataFrame({"a": [0, 1, 2]})
_groupings = [{"operator": "replace", "columns": ["a"], "value": ["b", 2]}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_replace_int_values():
"""Replace an int in a column."""
_input = DataFrame({"a": [0, 1, "b"]})
_expected = DataFrame({"a": [2, 1, "b"]})
_groupings = [{"operator": "replace", "columns": ["a"], "value": [0, 2]}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_replace_nan_values():
"""Replace NaN values in a column."""
_input = DataFrame({"a": [0.0, 1.0, "a"]})
_expected =
|
DataFrame({"a": [0.0, 1.0, nan]})
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH#18515
assert (
concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
expected = pd.SparseDtype("object")
assert result.dtype == expected
def test_concat_empty_df_object_dtype(self):
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_concat_empty_dataframe(self):
# 39037
df1 = DataFrame(columns=["a", "b"])
df2 = DataFrame(columns=["b", "c"])
result = concat([df1, df2, df1])
expected = DataFrame(columns=["a", "b", "c"])
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train, test, evaluate, and use a gene symbol classifier to assign gene symbols
to protein sequences.
Evaluate a trained network
A trained network, specified with the `--checkpoint` argument with its path,
is evaluated by assigning symbols to the canonical translations of protein sequences
of annotations in the latest Ensembl release and comparing them to the existing
symbol assignments.
Get statistics for existing symbol assignments
Gene symbol assignments from a classifier can be compared against the existing
assignments in the Ensembl database, by specifying the path to the assignments CSV file
with `--assignments_csv` and the Ensembl database name with `--ensembl_database`.
"""
# standard library imports
import argparse
import csv
import datetime as dt
import json
import math
import pathlib
import pprint
import random
import sys
import time
# third party imports
import numpy as np
import pandas as pd
import torch
import torchmetrics
import yaml
from loguru import logger
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
# project imports
from utils import (
GeneSymbolClassifier,
SequenceDataset,
data_directory,
get_assemblies_metadata,
get_species_taxonomy_id,
get_taxonomy_id_clade,
get_xref_canonical_translations,
load_checkpoint,
logging_format,
read_fasta_in_chunks,
sequences_directory,
)
selected_genome_assemblies = {
"GCA_002007445.2": ("Ailuropoda melanoleuca", "Giant panda"),
"GCA_900496995.2": ("Aquila chrysaetos chrysaetos", "Golden eagle"),
"GCA_009873245.2": ("Balaenoptera musculus", "Blue whale"),
"GCA_002263795.2": ("Bos taurus", "Cow"),
"GCA_000002285.2": ("Canis lupus familiaris", "Dog"),
"GCA_000951615.2": ("Cyprinus carpio", "Common carp"),
"GCA_000002035.4": ("<NAME>", "Zebrafish"),
"GCA_000001215.4": ("Drosophila melanogaster", "Drosophila melanogaster"),
"GCA_000181335.4": ("Felis catus", "Cat"),
"GCA_000002315.5": ("Gallus gallus", "Chicken"),
"GCA_000001405.28": ("Homo sapiens", "Human"),
"GCA_000001905.1": ("Loxodonta africana", "Elephant"),
"GCA_000001635.9": ("Mus musculus", "Mouse"),
"GCA_000003625.1": ("Oryctolagus cuniculus", "Rabbit"),
"GCA_002742125.1": ("Ovis aries", "Sheep"),
"GCA_000001515.5": ("Pan troglodytes", "Chimpanzee"),
"GCA_008795835.1": ("Panthera leo", "Lion"),
"GCA_000146045.2": ("Saccharomyces cerevisiae", "Saccharomyces cerevisiae"),
"GCA_000003025.6": ("Sus scrofa", "Pig"),
}
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EarlyStopping:
"""
Stop training if validation loss doesn't improve during a specified patience period.
"""
def __init__(self, patience=7, loss_delta=0):
"""
Args:
checkpoint_path (path-like object): Path to save the checkpoint.
patience (int): Number of calls to continue training if validation loss is not improving. Defaults to 7.
loss_delta (float): Minimum change in the monitored quantity to qualify as an improvement. Defaults to 0.
"""
self.patience = patience
self.loss_delta = loss_delta
self.no_progress = 0
self.min_validation_loss = np.Inf
def __call__(
self,
network,
optimizer,
experiment,
symbols_metadata,
validation_loss,
checkpoint_path,
):
if self.min_validation_loss == np.Inf:
self.min_validation_loss = validation_loss
logger.info("saving initial network checkpoint...")
checkpoint = {
"experiment": experiment,
"network_state_dict": network.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"symbols_metadata": symbols_metadata,
}
torch.save(checkpoint, checkpoint_path)
elif validation_loss <= self.min_validation_loss - self.loss_delta:
validation_loss_decrease = self.min_validation_loss - validation_loss
assert (
validation_loss_decrease > 0
), f"{validation_loss_decrease=}, should be a positive number"
logger.info(
f"validation loss decreased by {validation_loss_decrease:.4f}, saving network checkpoint..."
)
self.min_validation_loss = validation_loss
self.no_progress = 0
checkpoint = {
"experiment": experiment,
"network_state_dict": network.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"symbols_metadata": symbols_metadata,
}
torch.save(checkpoint, checkpoint_path)
else:
self.no_progress += 1
if self.no_progress == self.patience:
logger.info(
f"{self.no_progress} epochs with no validation loss improvement, stopping training"
)
return True
return False
class Experiment:
"""
Object containing settings values and status of an experiment.
"""
def __init__(self, experiment_settings, datetime):
for attribute, value in experiment_settings.items():
setattr(self, attribute, value)
# experiment parameters
self.datetime = datetime
# set a seed for the PyTorch random number generator if not present
if not hasattr(self, "random_seed"):
self.random_seed = random.randint(1, 100)
if self.included_genera is not None and self.excluded_genera is not None:
raise ValueError(
'"included_genera" and "excluded_genera" are mutually exclusive experiment settings parameters, specify values to at most one of them'
)
# early stopping
loss_delta = 0.001
self.stop_early = EarlyStopping(self.patience, loss_delta)
# loss function
self.criterion = nn.NLLLoss()
self.num_complete_epochs = 0
self.filename = f"{self.filename_prefix}_ns{self.num_symbols}_{self.datetime}"
# self.padding_side = "left"
self.padding_side = "right"
def __str__(self):
return pprint.pformat(self.__dict__, sort_dicts=False)
def generate_dataloaders(experiment):
"""
Generate training, validation, and test dataloaders from the dataset files.
Args:
experiment (Experiment): Experiment object containing metadata
Returns:
tuple containing the training, validation, and test dataloaders
"""
dataset = SequenceDataset(
num_symbols=experiment.num_symbols,
sequence_length=experiment.sequence_length,
padding_side=experiment.padding_side,
included_genera=experiment.included_genera,
excluded_genera=experiment.excluded_genera,
)
experiment.symbol_mapper = dataset.symbol_mapper
experiment.protein_sequence_mapper = dataset.protein_sequence_mapper
experiment.clade_mapper = dataset.clade_mapper
experiment.num_protein_letters = len(
experiment.protein_sequence_mapper.protein_letters
)
experiment.num_clades = len(experiment.clade_mapper.categories)
pandas_symbols_categories = experiment.symbol_mapper.categorical_datatype.categories
logger.info(
"gene symbols:\n{}".format(
pandas_symbols_categories.to_series(
index=range(len(pandas_symbols_categories)), name="gene symbols"
)
)
)
# calculate the training, validation, and test set size
dataset_size = len(dataset)
experiment.validation_size = int(experiment.validation_ratio * dataset_size)
experiment.test_size = int(experiment.test_ratio * dataset_size)
experiment.training_size = (
dataset_size - experiment.validation_size - experiment.test_size
)
# split dataset into training, validation, and test datasets
training_dataset, validation_dataset, test_dataset = random_split(
dataset,
lengths=(
experiment.training_size,
experiment.validation_size,
experiment.test_size,
),
)
logger.info(
f"dataset split to training ({experiment.training_size}), validation ({experiment.validation_size}), and test ({experiment.test_size}) datasets"
)
# set the batch size equal to the size of the smallest dataset if larger than that
experiment.batch_size = min(
experiment.batch_size,
experiment.training_size,
experiment.validation_size,
experiment.test_size,
)
training_loader = DataLoader(
training_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
validation_loader = DataLoader(
validation_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
test_loader = DataLoader(
test_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
return (training_loader, validation_loader, test_loader)
def train_network(
network,
optimizer,
experiment,
symbols_metadata,
training_loader,
validation_loader,
):
tensorboard_log_dir = f"runs/{experiment.num_symbols}/{experiment.datetime}"
summary_writer = SummaryWriter(log_dir=tensorboard_log_dir)
max_epochs = experiment.max_epochs
criterion = experiment.criterion
checkpoint_path = f"{experiment.experiment_directory}/{experiment.filename}.pth"
logger.info(f"start training, experiment checkpoints saved at {checkpoint_path}")
max_epochs_length = len(str(max_epochs))
num_train_batches = math.ceil(experiment.training_size / experiment.batch_size)
num_batches_length = len(str(num_train_batches))
if not hasattr(experiment, "average_training_losses"):
experiment.average_training_losses = []
if not hasattr(experiment, "average_validation_losses"):
experiment.average_validation_losses = []
experiment.epoch = experiment.num_complete_epochs + 1
epoch_times = []
for epoch in range(experiment.epoch, max_epochs + 1):
epoch_start_time = time.time()
experiment.epoch = epoch
# training
########################################################################
training_losses = []
# https://torchmetrics.readthedocs.io/en/latest/pages/overview.html#metrics-and-devices
train_accuracy = torchmetrics.Accuracy().to(DEVICE)
# set the network in training mode
network.train()
batch_execution_times = []
batch_loading_times = []
pre_batch_loading_time = time.time()
for batch_number, (inputs, labels) in enumerate(training_loader, start=1):
batch_start_time = time.time()
batch_loading_time = batch_start_time - pre_batch_loading_time
if batch_number < num_train_batches:
batch_loading_times.append(batch_loading_time)
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# zero accumulated gradients
network.zero_grad()
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
with torch.no_grad():
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# compute training loss
training_loss = criterion(output, labels)
training_losses.append(training_loss.item())
summary_writer.add_scalar("loss/training", training_loss, epoch)
# perform back propagation
training_loss.backward()
# prevent the exploding gradient problem
nn.utils.clip_grad_norm_(network.parameters(), experiment.clip_max_norm)
# perform an optimization step
optimizer.step()
batch_train_accuracy = train_accuracy(predictions, labels)
average_training_loss = np.average(training_losses)
batch_finish_time = time.time()
pre_batch_loading_time = batch_finish_time
batch_execution_time = batch_finish_time - batch_start_time
if batch_number < num_train_batches:
batch_execution_times.append(batch_execution_time)
train_progress = f"epoch {epoch:{max_epochs_length}} batch {batch_number:{num_batches_length}} of {num_train_batches} | average loss: {average_training_loss:.4f} | accuracy: {batch_train_accuracy:.4f} | execution: {batch_execution_time:.2f}s | loading: {batch_loading_time:.2f}s"
logger.info(train_progress)
experiment.num_complete_epochs += 1
average_training_loss = np.average(training_losses)
experiment.average_training_losses.append(average_training_loss)
# validation
########################################################################
num_validation_batches = math.ceil(
experiment.validation_size / experiment.batch_size
)
num_batches_length = len(str(num_validation_batches))
validation_losses = []
validation_accuracy = torchmetrics.Accuracy().to(DEVICE)
# disable gradient calculation
with torch.no_grad():
# set the network in evaluation mode
network.eval()
for batch_number, (inputs, labels) in enumerate(validation_loader, start=1):
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# compute validation loss
validation_loss = criterion(output, labels)
validation_losses.append(validation_loss.item())
summary_writer.add_scalar("loss/validation", validation_loss, epoch)
batch_validation_accuracy = validation_accuracy(predictions, labels)
average_validation_loss = np.average(validation_losses)
validation_progress = f"epoch {epoch:{max_epochs_length}} validation batch {batch_number:{num_batches_length}} of {num_validation_batches} | average loss: {average_validation_loss:.4f} | accuracy: {batch_validation_accuracy:.4f}"
logger.info(validation_progress)
average_validation_loss = np.average(validation_losses)
experiment.average_validation_losses.append(average_validation_loss)
total_validation_accuracy = validation_accuracy.compute()
average_batch_execution_time = sum(batch_execution_times) / len(
batch_execution_times
)
average_batch_loading_time = sum(batch_loading_times) / len(batch_loading_times)
epoch_finish_time = time.time()
epoch_time = epoch_finish_time - epoch_start_time
epoch_times.append(epoch_time)
train_progress = f"epoch {epoch:{max_epochs_length}} complete | validation loss: {average_validation_loss:.4f} | validation accuracy: {total_validation_accuracy:.4f} | time: {epoch_time:.2f}s"
logger.info(train_progress)
logger.info(
f"training batch average execution time: {average_batch_execution_time:.2f}s | average loading time: {average_batch_loading_time:.2f}s ({num_train_batches - 1} complete batches)"
)
if experiment.stop_early(
network,
optimizer,
experiment,
symbols_metadata,
average_validation_loss,
checkpoint_path,
):
summary_writer.flush()
summary_writer.close()
break
training_time = sum(epoch_times)
average_epoch_time = training_time / len(epoch_times)
logger.info(
f"total training time: {training_time:.2f}s | epoch average training time: {average_epoch_time:.2f}s ({epoch} epochs)"
)
return checkpoint_path
def test_network(checkpoint_path, print_sample_assignments=False):
"""
Calculate test loss and generate metrics.
"""
experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
logger.info("start testing classifier")
logger.info(f"experiment:\n{experiment}")
logger.info(f"network:\n{network}")
# get test dataloader
_, _, test_loader = generate_dataloaders(experiment)
criterion = experiment.criterion
num_test_batches = math.ceil(experiment.test_size / experiment.batch_size)
num_batches_length = len(str(num_test_batches))
test_losses = []
test_accuracy = torchmetrics.Accuracy().to(DEVICE)
test_precision = torchmetrics.Precision(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
test_recall = torchmetrics.Recall(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
with torch.no_grad():
network.eval()
for batch_number, (inputs, labels) in enumerate(test_loader, start=1):
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# calculate test loss
test_loss = criterion(output, labels)
test_losses.append(test_loss.item())
batch_accuracy = test_accuracy(predictions, labels)
test_precision(predictions, labels)
test_recall(predictions, labels)
logger.info(
f"test batch {batch_number:{num_batches_length}} of {num_test_batches} | accuracy: {batch_accuracy:.4f}"
)
# log statistics
average_test_loss = np.mean(test_losses)
total_test_accuracy = test_accuracy.compute()
precision = test_precision.compute()
recall = test_recall.compute()
logger.info(
f"testing complete | average loss: {average_test_loss:.4f} | accuracy: {total_test_accuracy:.4f}"
)
logger.info(f"precision: {precision:.4f} | recall: {recall:.4f}")
if print_sample_assignments:
num_sample_assignments = 10
# num_sample_assignments = 20
# num_sample_assignments = 100
with torch.no_grad():
network.eval()
inputs, labels = next(iter(test_loader))
# inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
inputs = inputs.to(DEVICE)
with torch.random.fork_rng():
torch.manual_seed(time.time() * 1000)
permutation = torch.randperm(len(inputs))
inputs = inputs[permutation[0:num_sample_assignments]]
labels = labels[permutation[0:num_sample_assignments]]
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# reset logger, add raw messages format
logger.remove()
logger.add(sys.stderr, format="{message}")
log_file_path = pathlib.Path(checkpoint_path).with_suffix(".log")
logger.add(log_file_path, format="{message}")
assignments = network.symbol_mapper.one_hot_to_label(predictions.cpu())
labels = network.symbol_mapper.one_hot_to_label(labels)
logger.info("\nsample assignments")
logger.info("assignment | true label")
logger.info("-----------------------")
for assignment, label in zip(assignments, labels):
if assignment == label:
logger.info(f"{assignment:>10} | {label:>10}")
else:
logger.info(f"{assignment:>10} | {label:>10} !!!")
def assign_symbols(
network,
symbols_metadata,
sequences_fasta,
scientific_name=None,
taxonomy_id=None,
output_directory=None,
):
"""
Use the trained network to assign symbols to the sequences in the FASTA file.
"""
sequences_fasta_path = pathlib.Path(sequences_fasta)
if scientific_name is not None:
taxonomy_id = get_species_taxonomy_id(scientific_name)
clade = get_taxonomy_id_clade(taxonomy_id)
# logger.info(f"got clade {clade} for {scientific_name}")
if output_directory is None:
output_directory = sequences_fasta_path.parent
assignments_csv_path = pathlib.Path(
f"{output_directory}/{sequences_fasta_path.stem}_symbols.csv"
)
# read the FASTA file in chunks and assign symbols
with open(assignments_csv_path, "w+", newline="") as csv_file:
# generate a csv writer, create the CSV file with a header
field_names = ["stable_id", "symbol", "probability", "description", "source"]
csv_writer = csv.writer(csv_file, delimiter="\t", lineterminator="\n")
csv_writer.writerow(field_names)
for fasta_entries in read_fasta_in_chunks(sequences_fasta_path):
if fasta_entries[-1] is None:
fasta_entries = [
fasta_entry
for fasta_entry in fasta_entries
if fasta_entry is not None
]
identifiers = [fasta_entry[0].split(" ")[0] for fasta_entry in fasta_entries]
sequences = [fasta_entry[1] for fasta_entry in fasta_entries]
clades = [clade for _ in range(len(fasta_entries))]
assignments_probabilities = network.predict_probabilities(sequences, clades)
# save assignments and probabilities to the CSV file
for identifier, (assignment, probability) in zip(
identifiers, assignments_probabilities
):
symbol_description = symbols_metadata[assignment]["description"]
symbol_source = symbols_metadata[assignment]["source"]
csv_writer.writerow(
[
identifier,
assignment,
probability,
symbol_description,
symbol_source,
]
)
logger.info(f"symbol assignments saved at {assignments_csv_path}")
def save_network_from_checkpoint(checkpoint_path):
"""
Save the network in a checkpoint file as a separate file.
"""
_experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
path = checkpoint_path
network_path = pathlib.Path(f"{path.parent}/{path.stem}_network.pth")
torch.save(network, network_path)
return network_path
def log_pytorch_cuda_info():
"""
Log PyTorch and CUDA info and device to be used.
"""
logger.debug(f"{torch.__version__=}")
logger.debug(f"{DEVICE=}")
logger.debug(f"{torch.version.cuda=}")
logger.debug(f"{torch.backends.cudnn.enabled=}")
logger.debug(f"{torch.cuda.is_available()=}")
if torch.cuda.is_available():
logger.debug(f"{torch.cuda.device_count()=}")
logger.debug(f"{torch.cuda.get_device_properties(DEVICE)}")
def evaluate_network(checkpoint_path, complete=False):
"""
Evaluate a trained network by assigning gene symbols to the protein sequences
of genome assemblies in the latest Ensembl release, and comparing them to the existing
Xref assignments.
Args:
checkpoint_path (Path): path to the experiment checkpoint
complete (bool): Whether or not to run the evaluation for all genome assemblies.
Defaults to False, which runs the evaluation only for a selection of
the most important species genome assemblies.
"""
experiment, network, _optimizer, symbols_metadata = load_checkpoint(checkpoint_path)
symbols_set = set(symbol.lower() for symbol in experiment.symbol_mapper.categories)
assemblies = get_assemblies_metadata()
comparison_statistics_list = []
for assembly in assemblies:
if not complete and assembly.assembly_accession not in selected_genome_assemblies:
continue
canonical_fasta_filename = assembly.fasta_filename.replace(
"pep.all.fa", "pep.all_canonical.fa"
)
canonical_fasta_path = sequences_directory / canonical_fasta_filename
# assign symbols
assignments_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{canonical_fasta_path.stem}_symbols.csv"
)
if not assignments_csv_path.exists():
logger.info(f"assigning gene symbols to {canonical_fasta_path}")
assign_symbols(
network,
symbols_metadata,
canonical_fasta_path,
scientific_name=assembly.scientific_name,
output_directory=checkpoint_path.parent,
)
comparisons_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
if not comparisons_csv_path.exists():
comparison_successful = compare_with_database(
assignments_csv_path,
assembly.core_db,
assembly.scientific_name,
symbols_set,
)
if not comparison_successful:
continue
comparison_statistics = get_comparison_statistics(comparisons_csv_path)
comparison_statistics["scientific_name"] = assembly.scientific_name
comparison_statistics["taxonomy_id"] = assembly.taxonomy_id
comparison_statistics["clade"] = assembly.clade
comparison_statistics_list.append(comparison_statistics)
message = "{}: {} assignments, {} exact matches ({:.2f}%), {} fuzzy matches ({:.2f}%), {} total matches ({:.2f}%)".format(
comparison_statistics["scientific_name"],
comparison_statistics["num_assignments"],
comparison_statistics["num_exact_matches"],
comparison_statistics["matching_percentage"],
comparison_statistics["num_fuzzy_matches"],
comparison_statistics["fuzzy_percentage"],
comparison_statistics["num_total_matches"],
comparison_statistics["total_matches_percentage"],
)
logger.info(message)
dataframe_columns = [
"clade",
"scientific_name",
"num_assignments",
"num_exact_matches",
"matching_percentage",
"num_fuzzy_matches",
"fuzzy_percentage",
"num_total_matches",
"total_matches_percentage",
]
comparison_statistics = pd.DataFrame(
comparison_statistics_list,
columns=dataframe_columns,
)
clade_groups = comparison_statistics.groupby(["clade"])
clade_groups_statistics = []
for clade, group in clade_groups:
with pd.option_context("display.float_format", "{:.2f}".format):
group_string = group.to_string(index=False)
num_assignments_sum = group["num_assignments"].sum()
num_exact_matches_sum = group["num_exact_matches"].sum()
num_fuzzy_matches_sum = group["num_fuzzy_matches"].sum()
num_total_matches_sum = num_exact_matches_sum + num_fuzzy_matches_sum
matching_percentage_weighted_average = (
num_exact_matches_sum / num_assignments_sum
) * 100
fuzzy_percentage_weighted_average = (
num_fuzzy_matches_sum / num_assignments_sum
) * 100
total_percentage_weighted_average = (
num_total_matches_sum / num_assignments_sum
) * 100
averages_message = "{} weighted averages: {:.2f}% exact matches, {:.2f}% fuzzy matches, {:.2f}% total matches".format(
clade,
matching_percentage_weighted_average,
fuzzy_percentage_weighted_average,
total_percentage_weighted_average,
)
clade_statistics = f"{group_string}\n{averages_message}"
clade_groups_statistics.append(clade_statistics)
comparison_statistics_string = "comparison statistics:\n"
comparison_statistics_string += "\n\n".join(
clade_statistics for clade_statistics in clade_groups_statistics
)
logger.info(comparison_statistics_string)
def is_exact_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "exact_match"
else:
return "no_exact_match"
def is_fuzzy_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "no_fuzzy_match"
if (symbol_a in symbol_b) or (symbol_b in symbol_a):
return "fuzzy_match"
else:
return "no_fuzzy_match"
def is_known_symbol(symbol, symbols_set):
symbol = symbol.lower()
if symbol in symbols_set:
return "known"
else:
return "unknown"
def compare_with_database(
assignments_csv,
ensembl_database,
scientific_name=None,
symbols_set=None,
EntrezGene=False,
Uniprot_gn=False,
):
"""
Compare classifier assignments with the gene symbols in the genome assembly
ensembl_database core database on the public Ensembl MySQL server.
"""
assignments_csv_path = pathlib.Path(assignments_csv)
canonical_translations = get_xref_canonical_translations(
ensembl_database, EntrezGene=EntrezGene, Uniprot_gn=Uniprot_gn
)
if len(canonical_translations) == 0:
if scientific_name is None:
logger.info("0 canonical translations retrieved, nothing to compare")
else:
logger.info(
f"{scientific_name}: 0 canonical translations retrieved, nothing to compare"
)
return False
comparisons = []
with open(assignments_csv_path, "r", newline="") as assignments_file:
csv_reader = csv.reader(assignments_file, delimiter="\t")
_csv_field_names = next(csv_reader)
for csv_row in csv_reader:
csv_stable_id = csv_row[0]
classifier_symbol = csv_row[1]
probability = csv_row[2]
translation_stable_id = csv_stable_id.split(".")[0]
if (
translation_stable_id
in canonical_translations["translation.stable_id"].values
):
xref_symbol = canonical_translations.loc[
canonical_translations["translation.stable_id"]
== translation_stable_id,
"Xref_symbol",
].values[0]
comparisons.append(
(csv_stable_id, xref_symbol, classifier_symbol, probability)
)
dataframe_columns = [
"csv_stable_id",
"xref_symbol",
"classifier_symbol",
"probability",
]
compare_df = pd.DataFrame(comparisons, columns=dataframe_columns)
compare_df["exact_match"] = compare_df.apply(
lambda x: is_exact_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
compare_df["fuzzy_match"] = compare_df.apply(
lambda x: is_fuzzy_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
if symbols_set:
compare_df["known_symbol"] = compare_df.apply(
lambda x: is_known_symbol(x["xref_symbol"], symbols_set),
axis=1,
result_type="reduce",
)
comparisons_csv_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
compare_df.to_csv(comparisons_csv_path, sep="\t", index=False)
return True
def get_comparison_statistics(comparisons_csv_path):
compare_df = pd.read_csv(comparisons_csv_path, sep="\t", index_col=False)
num_assignments = len(compare_df)
if num_assignments > 0:
num_exact_matches = len(compare_df[compare_df["exact_match"] == "exact_match"])
num_fuzzy_matches = len(compare_df[compare_df["fuzzy_match"] == "fuzzy_match"])
matching_percentage = (num_exact_matches / num_assignments) * 100
fuzzy_percentage = (num_fuzzy_matches / num_assignments) * 100
num_total_matches = num_exact_matches + num_fuzzy_matches
total_matches_percentage = (num_total_matches / num_assignments) * 100
comparison_statistics = {
"num_assignments": num_assignments,
"num_exact_matches": num_exact_matches,
"matching_percentage": matching_percentage,
"num_fuzzy_matches": num_fuzzy_matches,
"fuzzy_percentage": fuzzy_percentage,
"num_total_matches": num_total_matches,
"total_matches_percentage": total_matches_percentage,
}
else:
comparison_statistics = {
"num_assignments": 0,
"num_exact_matches": 0,
"matching_percentage": 0,
"num_fuzzy_matches": 0,
"fuzzy_percentage": 0,
"num_total_matches": 0,
"total_matches_percentage": 0,
}
return comparison_statistics
def compare_assignments(
assignments_csv, ensembl_database, scientific_name, checkpoint=None
):
"""Compare assignments with the ones on the latest Ensembl release."""
assignments_csv_path = pathlib.Path(assignments_csv)
log_file_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.log"
)
logger.add(log_file_path, format=logging_format)
if checkpoint is None:
symbols_set = None
else:
experiment, _network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint)
symbols_set = set(
symbol.lower() for symbol in experiment.symbol_mapper.categories
)
comparisons_csv_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
if not comparisons_csv_path.exists():
compare_with_database(
assignments_csv_path, ensembl_database, scientific_name, symbols_set
)
comparison_statistics = get_comparison_statistics(comparisons_csv_path)
taxonomy_id = get_species_taxonomy_id(scientific_name)
clade = get_taxonomy_id_clade(taxonomy_id)
comparison_statistics["scientific_name"] = scientific_name
comparison_statistics["taxonomy_id"] = taxonomy_id
comparison_statistics["clade"] = clade
message = "{} assignments, {} exact matches ({:.2f}%), {} fuzzy matches ({:.2f}%), {} total matches ({:.2f}%)".format(
comparison_statistics["num_assignments"],
comparison_statistics["num_exact_matches"],
comparison_statistics["matching_percentage"],
comparison_statistics["num_fuzzy_matches"],
comparison_statistics["fuzzy_percentage"],
comparison_statistics["num_total_matches"],
comparison_statistics["total_matches_percentage"],
)
logger.info(message)
dataframe_columns = [
"clade",
"scientific_name",
"num_assignments",
"num_exact_matches",
"matching_percentage",
"num_fuzzy_matches",
"fuzzy_percentage",
"num_total_matches",
"total_matches_percentage",
]
comparison_statistics = pd.DataFrame(
[comparison_statistics],
columns=dataframe_columns,
)
with
|
pd.option_context("display.float_format", "{:.2f}".format)
|
pandas.option_context
|
import pandas as pd
import numpy as np
import re
from unidecode import unidecode
# Map district in Kraków to integers.
# For details see:
# https://en.wikipedia.org/wiki/Districts_of_Krak%C3%B3w
districts = {'stare miasto': 1,
'grzegórzki': 2,
'prądnik czerwony': 3,
'prądnik biały': 4,
'krowodrza': 5,
'bronowice': 6,
'zwierzyniec': 7,
'dębniki': 8,
'łagiewniki': 9,
'borek fałęcki': 9,
'swoszowice': 10,
'podgórze duchackie': 11,
'bieżanów': 12,
'prokocim': 12,
'podgórze': 13,
'czyżyny': 14,
'mistrzejowice': 15,
'bieńczyce': 16,
'wzgórza krzesławickie': 17,
'nowa huta': 18}
# Remove polish characters from key names
for key in list(districts.keys()):
districts[unidecode(key)] = districts.pop(key)
# Translate data from polish to english.
translation = {'Cena': 'Price',
'Lokalizacja': 'Location',
'Data dodania': 'Date',
'Na sprzedaż przez': 'Seller',
'Rodzaj nieruchomości': 'Property',
'Liczba pokoi': 'Rooms',
'Liczba łazienek': 'Bathrooms',
'Wielkość (m2)': 'Area',
'Parking': 'Parking',
'Tytuł': 'Title',
'Opis': 'Description',
'Link': 'Link'}
def remove_polish_characters(x):
"""
Remove polsih chars
Examples
--------
>>> remove_polish_characters('ąćęłńóśźż')
'acelnoszz'
"""
if pd.isnull(x):
return x
else:
x = unidecode(x)
return x
def parse_price(x):
"""
Convert string with price to a integer value.
Parameters
----------
x : str
Row from price column.
Returns
-------
int :
Price of the property.
Example
-------
>>> parse_price('349\xa0000 zł')
349000
>>> parse_price('349 000 zł')
349000
>>> parse_price('349\xa0000')
349000
>>> parse_price('349000')
349000
>>> parse_price(349000)
349000
>>> parse_price(349000.1235)
349000
>>> parse_price(np.nan)
nan
>>> parse_price('Proszę o kontakt')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.replace('\xa0', '')
x = x.replace('zł', '')
x = x.replace(' ', '')
x = x.strip()
try:
x = int(x)
except ValueError:
x = np.nan
return x
elif isinstance(x, int):
return x
elif isinstance(x, float):
x = int(x)
return x
else:
return np.nan
def extract_currency(x):
"""
Exctract currency from price column.
Examples
--------
>>> extract_currency('123000zł')
'pln'
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'zł' in x or 'zł' in x or 'pln' in x:
return 'pln'
else:
return np.nan
else:
return np.nan
def parse_bathrooms(x):
"""
Extract first digit from string
describing the number of bathrooms.
Parameters
----------
x : str
String describing the number of bathrooms.
Returns
-------
int :
The number of bathrooms or nan.
Examples
--------
>>> parse_bathrooms('1 łazienka')
1
>>> parse_bathrooms('2 łazienki')
2
>>> parse_bathrooms('4')
4
>>> parse_bathrooms(3)
3
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = [s for s in x if s.isdigit()]
if x:
return int(x[0])
else:
return np.nan
elif isinstance(x, int):
return x
elif isinstance(x, float):
return int(x)
else:
return np.nan
def parse_rooms(x):
"""
Extract first digit in string
describing the number of bathrooms.
Parameters
----------
x : str
Row of rooms column.
Returns
-------
int
The number of rooms in the property.
Examples
--------
>>> parse_rooms('2 pokoje')
2
>>> parse_rooms('5')
5
>>> parse_rooms('3')
3
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
# Check for special
# cases first
x = x.lower()
if 'kawalerka' in x:
return 1
elif 'garsoniera' in x:
return 1
else:
# If not special case extract
# first digit in string.
x = [s for s in x if s.isdigit()]
if x:
return int(x[0])
else:
return np.nan
elif isinstance(x, float):
return int(x)
elif isinstance(x, int):
return x
else:
return np.nan
def extract_city(x):
"""
Extract city from location column.
Parameters
----------
x : str
Row of location column.
Returns
-------
str :
Kraków if the property is
located in Kraków else nan.
Examples
--------
>>> extract_city('Piotra Stachiewicza, Kraków-Krowodrza, Kraków')
'kraków'
>>> extract_city('os. Na Stoku, Kraków-Nowa Huta, Kraków')
'kraków'
>>> extract_city('Modlniczka, Wielka Wieś, krakowski')
nan
>>> extract_city('random string')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.split(',')
x = [s.strip().lower() for s in x]
if 'kraków' in x or 'krakow' in x or 'cracow' in x:
return 'kraków'
else:
return np.nan
else:
return np.nan
def extract_district(x):
"""
Extract district from location column.
Parameters
----------
x : str
Row from location column.
Returns
-------
str :
The district where the property is located.
Examples
--------
>>> extract_district('Piotra Stachiewicza, Kraków-Krowodrza, Kraków')
'krowodrza'
>>> extract_district('os. Na Stoku, Kraków-Nowa Huta, Kraków')
'nowa huta'
>>> extract_district('Modlniczka, Wielka Wieś, krakowski')
nan
>>> extract_city('random string')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
x = x.replace('kraków', '')
x = x.replace(',', ' ')
x = x.replace('-', ' ')
x = x.replace('.', ' ')
x = x.split(' ')
x = [s.replace(' ', '') for s in x if s != '']
x = ' '.join(x)
if x == '':
return np.nan
else:
for key in districts:
if key in x:
return key
return np.nan
def parse_seller(x):
"""
Translate seller column to english.
"""
if
|
pd.isnull(x)
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 13:28:14 2021
@author: Raghavakrishna
Copy of the "post_processing.py" from 08th Aug 2021
"""
import pandas as pd
from datetime import timedelta
import datetime as dt
import statistics
import datetime
from sqlalchemy import create_engine
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.units as munits
from uncertainties import ufloat
from uncertainties import unumpy
from uncertainties import umath
import tabulate
def prRed(skk): print("\033[31;1;m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[33;1;m {}\033[00m" .format(skk))
#%%
class ResidenceTimeMethodError(ValueError):
def __str__(self):
return 'You need to select a valid method: iso, trapez or simpson (default)'
#%%
class CBO_ESHL:
def __init__(self, experiment = "W_I_e0_Herdern",
testo_sensor = "2a_testo",
aperture_sensor = "2l",
column_name = 'hw_m/sec'):
"""
Takes 3 inputs some of them are not necessary for certain methods.
Input Parameters
----------
experiment : str
The name of the experiment available in the master_time_sheet.xlsx or in my thesis.
testo_sensor : str
the type of nomenclature used to describe Testo sensors. This
input is required to evaluate the Testo data like wind speed
column_name : str
The column name used when saving testo data. the column name
is also an indication of the units used for the measured parameter.
This alrady describes what sensor is measuring.
Imported Parameters
----------
t0 : datetime
The actual start of the experiment.
tn : datetime
The approximate end of the experiemnt.
tau_nom : float
The nominal time constant of the measurement obtained from
master_time_sheet.xlsx
"""
excel_sheet = "master_time_sheet.xlsx"
self.times = pd.read_excel(excel_sheet, sheet_name = "Sheet1")
self.input = pd.read_excel(excel_sheet, sheet_name = "inputs")
self.experiment = experiment
self.testo_sensor = testo_sensor
self.column_name = column_name
#self.engine = create_engine("mysql+pymysql://wojtek:Password#<EMAIL>/",pool_pre_ping=True)
self.engine = create_engine("mysql+pymysql://root:Password123@localhost/",pool_pre_ping=True)
self.aperture_sensor = aperture_sensor
self.database = self.times[self.times["experiment"] == self.experiment].iloc[0,3]
self.t0 = self.times[self.times["experiment"] == experiment].iloc[0,1]
self.tn = self.times[self.times["experiment"] == experiment].iloc[0,2]
self.exclude = self.times[self.times["experiment"] == experiment].iloc[0,4].split(",")
self.calibration = self.times[self.times["experiment"] == experiment].iloc[0,5]
# self.engine1 = create_engine("mysql+pymysql://wojtek:Password#<EMAIL>/{}".format(self.calibration),pool_pre_ping=True)
self.engine1 = create_engine("mysql+pymysql://root:Password123@localhost/{}".format(self.calibration),pool_pre_ping=True)
self.wall_database = self.times[self.times["experiment"] == experiment].iloc[0,6]
self.testos = ["1a_testo","2a_testo","3a_testo","4a_testo"]
self.t0_20 = self.t0 - timedelta(minutes = 20)
self.tn_20 = self.tn + timedelta(minutes = 20)
self.tau_nom = self.input.loc[self.input["experiment"] == self.experiment]["tau_nom"].iat[0]
def wind_velocity_indoor(self):
"""
Prints the person's name and age.
If the argument 'additional' is passed, then it is appended after the main info.
Parameters
----------
additional : str, optional
More info to be displayed (default is None)
Returns
-------
None
"""
self.df1 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, self.testo_sensor, self.t0_20, self.tn_20), con = self.engine)
self.df2 = self.df1.loc[:, ["datetime", "hw_m/sec"]]
self.df2 = self.df2.set_index("datetime")
self.df2 = self.df2.truncate(str(self.t0), str(self.tn) )
self.stats = self.df2.describe().iloc[[0,1,2,3,7],:]
self.stats.columns = ["values"]
self.data = {"values":[self.experiment, self.sensor_name, "hw_m/sec", self.t0, self.tn]}
self.empty_df = pd.DataFrame(self.data, index =['experiment',
'sensor name',
'column name', "Start", "End"])
self.res = pd.concat([self.empty_df, self.stats], axis = 0)
return self.res
def wind_velocity_outdoor(self):
self.df1 = pd.read_sql_query("SELECT * FROM weather.weather_all WHERE datetime BETWEEN '{}' AND '{}'".format( self.t0_20, self.tn_20), con = self.engine)
self.df2 = self.df1.loc[:, ["datetime", "Wind Speed, m/s", "Gust Speed, m/s", "Wind Direction"]]
self.df2 = self.df2.set_index("datetime")
self.df2 = self.df2.truncate(str(self.t0), str(self.tn) )
self.stats = self.df2.describe().iloc[[0,1,2,3,7],:]
self.empty_df = pd.DataFrame(index =['experiment',
'table name', 'Start', 'End'],
columns =["Wind Speed, m/s", "Gust Speed, m/s", "Wind Direction"])
self.empty_df.loc["experiment", ["Wind Speed, m/s","Gust Speed, m/s", "Wind Direction"]] = self.experiment
self.empty_df.loc["table name", ["Wind Speed, m/s","Gust Speed, m/s", "Wind Direction"]] = "weather_all"
self.empty_df.loc["Start", ["Wind Speed, m/s","Gust Speed, m/s", "Wind Direction"]] = self.t0
self.empty_df.loc["End", ["Wind Speed, m/s","Gust Speed, m/s", "Wind Direction"]] = self.tn
self.res = pd.concat([self.empty_df, self.stats], axis = 0)
return self.res
def aussen(self, plot = False, save = False):
"""
This method calculates the outdoor CO2 concentration from the HOBO sensor
ALso this produces a graph of outdoor CO2 data which is rolled for 120 seconds
Parameters
----------
plot : BOOL, optional
if True displays a graph. The default is False.
save : BOOL, optional
If True saves in the current directory. The default is False.
You can also change the plot saving and rendering settings in the code
Returns
-------
dictionary
The dictionary contains the mean , std , max and min of CO2 for the
experimental period.
"""
if self.experiment == "S_I_e0_Herdern" or self.experiment == "S_I_e1_Herdern":
self.Cout = {'meanCO2': 445.1524174626867,
'sgm_CO2': 113.06109664245112,
'maxCO2': 514.3716999999999,
'minCO2': 373.21639999999996}
self.cout_mean, self.cout_max, self.cout_min = 445.1524174626867, 514.3716999999999, 373.21639999999996
if plot:
print("The outdoor plot for this experiment is missing due to lack of data")
return self.Cout
else:
accuracy1 = 50 # it comes from the equation of uncertainity for testo 450 XL
accuracy2 = 0.02 # ±(50 ppm CO2 ±2% of mv)(0 to 5000 ppm CO2 )
accuracy3 = 50 # the same equation for second testo 450 XL
accuracy4 = 0.02
accuracy5 = 75 # # the same equation for second testo 480
accuracy6 = 0.03 # Citavi Title: Testo AG
'''
The following if esle statement is writtten to import the right data
for calibration offset equation
There are two time periods where calibration was done and this
'''
'''standard syntax to import sql data as dataframe
self.engine is measurement campagin experimentl data and engine1 is calibration data'''
'''Calibration data is imported '''
reg_result = pd.read_sql_table("reg_result", con = self.engine1).drop("index", axis = 1)
'''Calibration data for the particular sensor alone is filtered '''
res = reg_result[reg_result['sensor'].str.lower() == "außen"].reset_index(drop = True)
'''This is to filter the HOBOs from testos, The hobos will have a res variable Testos will not have
because they dont have experimantal calibration offset'''
if res.shape[0] == 1:
''' The imported sql data is cleaned and columns are renamed to suit to out calculation'''
self.sensor_df3 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, "außen", self.t0, self.tn) , self.engine).drop('index', axis =1)
self.sensor_df3['CO2_ppm_reg'] = self.sensor_df3.eval(res.loc[0, "equation"])
self.sensor_df3_plot = self.sensor_df3.copy()
self.sensor_df3 = self.sensor_df3.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'C_CO2 in ppm'})
self.sensor_df3 = self.sensor_df3.drop_duplicates(subset=['datetime'])
self.sensor_df3 = self.sensor_df3.loc[:, ["datetime", "C_CO2 in ppm", "CO2_ppm_original"]]
self.sensor_df3 = self.sensor_df3.dropna()
'''This is the absolute uncertainity at each point of measurement saved in the
dataframe at each timestamp Ref: equation D2 in DIN ISO 16000-8:2008-12'''
'''For ESHL summer ideally we take mean of all three sensors and also propogate
the uncertainities of al three testo sensors, This is not done here at the moment
But to get the most uncertainity possible we peopogte the uncertainity first'''
# Why RSE ? https://stats.stackexchange.com/questions/204238/why-divide-rss-by-n-2-to-get-rse
self.sensor_df3["s_meas"] = np.sqrt(np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy2)) + np.square(accuracy1) + np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy4)) + np.square(accuracy3) + np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy6)) + np.square(accuracy5)+ np.square(res.loc[0, "rse"]))
# Die Messunsicherheit hängt sicher in einem bestimmten Umfang vom Konzentrationsbereich ab.DIN ISO 16000-8:2008-12 (page 36)
x = self.sensor_df3["datetime"][2] - self.sensor_df3["datetime"][1]
self.sec3 = int(x.total_seconds())
if plot:
self.sensor_df3_plot = self.sensor_df3_plot.loc[:,['datetime', 'temp_°C', 'RH_%rH', 'CO2_ppm_reg']]
self.sensor_df3_plot = self.sensor_df3_plot.set_index("datetime")
self.sensor_df3_plot = self.sensor_df3_plot.rolling(int(120/self.sec3)).mean()
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, host = plt.subplots()
fig.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines["right"].set_visible(True)
p1, = host.plot(self.sensor_df3_plot.index, self.sensor_df3_plot['temp_°C'], "b-", label="Temperature (°C)", linewidth=1)
p2, = par1.plot(self.sensor_df3_plot.index, self.sensor_df3_plot['CO2_ppm_reg'], "r--", label="CO2 (ppm)", linewidth=1)
p3, = par2.plot(self.sensor_df3_plot.index, self.sensor_df3_plot['RH_%rH'], "g-.", label="RH (%)", linewidth=1)
# host.set_xlim(0, 2)
host.set_ylim(0, 30)
par1.set_ylim(0, 3000)
par2.set_ylim(0, 100)
host.set_xlabel("Time")
host.set_ylabel("Temperature (°C)")
par1.set_ylabel(r'$\mathrm{CO_2 (ppm)} $')
par2.set_ylabel("RH (%)")
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
import matplotlib.dates as mdates
locator = mdates.AutoDateLocator(minticks=3, maxticks=11)
formatter = mdates.ConciseDateFormatter(locator)
host.xaxis.set_major_locator(locator)
host.xaxis.set_major_formatter(formatter)
lines = [p1, p2, p3]
plt.title("Outdoor data for {}".format(self.experiment))
host.legend(lines, [l.get_label() for l in lines])
if save:
plt.savefig('{} outdoor data (HOBO)'.format(self.experiment), bbox_inches='tight', dpi=400)
plt.show()
"""
Creating a runtime column with t0 as 0 or centre of the time axes
"""
t0_cd = self.sensor_df3['datetime'].loc[0]
while not(self.t0 in self.sensor_df3["datetime"].to_list()):
self.t0 = self.t0 + dt.timedelta(seconds=1)
# print(self.t0)
dtl_t0 = (self.t0 - t0_cd)//dt.timedelta(seconds=1)
"""
Calucates the elapsed time stored in the array x as an interger of seconds
"""
endpoint = len(self.sensor_df3) * self.sec3 - dtl_t0
"""
Creates an array starting with 0 till endpoint with stepsize sec3.
"""
x = np.arange(-dtl_t0,endpoint,self.sec3)
self.sensor_df3['runtime'] = x
self.sensor_df2 = self.sensor_df3.set_index('datetime')
self.rhg = pd.date_range(self.sensor_df2.index[0], self.sensor_df2.index[-1], freq=str(self.sec3)+'S')
self.au_mean = self.sensor_df2.reindex(self.rhg).interpolate()
self.au_mean['C_CO2 in ppm_out'] = self.au_mean['C_CO2 in ppm']
self.cout_max = self.au_mean['C_CO2 in ppm_out'].max()
self.cout_min = self.au_mean['C_CO2 in ppm_out'].min()
self.cout_mean = self.au_mean['C_CO2 in ppm_out'].mean()
"""
The default value (499±97)ppm (kp=2) has been calculated as the average CO2-
concentration of the available outdoor measurement data in
...\CO2-concentration_outdoor\.
However the value should be setted as a list of datapoints for the natural
outdoor concentration for a time inverval covering the measurement interval.
In future it would be great to have a dataframe with CO2-concentrations for
coresponding time stamps.
"""
self.Cout = {'meanCO2':self.cout_mean,
'sgm_CO2':self.au_mean["s_meas"].mean(), # More clarification needed on uncertainity
'maxCO2':self.cout_max,
'minCO2':self.cout_min}
return self.Cout
def mean_curve(self, plot = False, method='simpson'):
"""
method:
'iso' (Default) The method described in ISO 16000-8 will be applied
however this method has a weak uncertainty analysis.
'trapez' corrected ISO 16000-8 method applying the trapezoidal method
for the interval integration and considers this in the
uncertainty evaluation.
'simpson' Applies the Simpson-Rule for the integration and consequently
considers this in the uncertainty evaluation.
"""
self.names = pd.read_sql_query('SHOW TABLES FROM {}'.format(self.database), con = self.engine)
self.names = self.names.iloc[:,0].to_list()
self.new_names = [x for x in self.names if (x not in self.exclude)]
accuracy1 = 50 # it comes from the equation of uncertainity for testo 450 XL
accuracy2 = 0.02 # ±(50 ppm CO2 ±2% of mv)(0 to 5000 ppm CO2 )
accuracy3 = 50 # the same equation for second testo 450 XL
accuracy4 = 0.02
accuracy5 = 75 # # the same equation for second testo 480
accuracy6 = 0.03 # Citavi Title: Testo AG
self.cdf_list, self.df_tau, self.tau_hr, self.s_total_abs_hr = [], [], [], []
for self.table in self.new_names:
self.cdf1 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, self.table, self.t0, self.tn), con = self.engine)
self.cdf2 = self.cdf1.loc[:,["datetime", "CO2_ppm"]]
self.reg_result = pd.read_sql_table("reg_result", con = self.engine1).drop("index", axis = 1)
'''Calibration data for the particular sensor alone is filtered '''
self.res = self.reg_result[self.reg_result['sensor'].str.lower() == self.table].reset_index(drop = True)
if "testo" not in self.table:
self.cdf2['CO2_ppm_reg'] = self.cdf2.eval(self.res.loc[0, "equation"])
self.cdf2 = self.cdf2.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'CO2_ppm'})
self.cdf2 = self.cdf2.drop_duplicates(subset=['datetime'])
self.cdf2 = self.cdf2.loc[:, ["datetime", "CO2_ppm"]]
self.cdf2 = self.cdf2.dropna()
if self.cdf2["CO2_ppm"].min() < self.aussen()["meanCO2"]:
self.cdf2.loc[:,"CO2_ppm"] = self.cdf2.loc[:,"CO2_ppm"] - (self.cdf2.loc[:,"CO2_ppm"].min() - 3)
else:
self.cdf2.loc[:,"CO2_ppm"] = self.cdf2.loc[:,"CO2_ppm"] - self.aussen()["meanCO2"]
self.cdf2 = self.cdf2.fillna(method="bfill", limit=2)
self.cdf2 = self.cdf2.fillna(method="pad", limit=2)
self.cdf2.columns = ["datetime", str(self.table)]
self.cdf2["log"] = np.log(self.cdf2[str(self.table)])
self.diff_sec = (self.cdf2["datetime"][1] - self.cdf2["datetime"][0]).seconds
self.cdf2["s_meas"] = np.sqrt(np.square((self.cdf2[str(self.table)] * accuracy2))
+ np.square(accuracy1) + np.square((self.cdf2[str(self.table)] * accuracy4))
+ np.square(accuracy3) + np.square((self.cdf2[str(self.table)] * accuracy6))
+ np.square(accuracy5))
self.ns_meas = self.cdf2['s_meas'].mean()
self.n = len(self.cdf2['s_meas'])
### ISO 16000-8 option to calculate slope (defined to be calculated by Spread-Sheat/Excel)
self.cdf2["runtime"] = np.arange(0,len(self.cdf2) * self.diff_sec, self.diff_sec)
self.cdf2["t-te"] = self.cdf2["runtime"] - self.cdf2["runtime"][len(self.cdf2)-1]
self.cdf2["lnte/t"] = self.cdf2["log"] - self.cdf2["log"][len(self.cdf2)-1]
self.cdf2["slope"] = self.cdf2["lnte/t"] / self.cdf2["t-te"]
try:
if method=='iso':
self.slope = self.cdf2["slope"].mean()
self.sumconz = self.cdf2["CO2_ppm"].iloc[1:-1].sum()
self.area_sup = (self.diff_sec * (self.cdf2[str(self.table)][0]/2 + self.sumconz + self.cdf2[str(self.table)][len(self.cdf2)-1]/2))
self.cdf2.loc[[len(self.cdf2)-1], "slope"] = abs(self.slope)
self.s_phi_e = self.cdf2["slope"][:-1].std()/abs(self.slope)
self.s_lambda = self.cdf2["slope"][:-1].std()/abs(self.cdf2["slope"][:-1].mean())
print('ATTENTION: ISO 16000-8 method has a weak uncertainty evaluation consider using trapezoidal method is correcting this.')
elif method=='trapez':
### More acurate option to calculate the solpe of each (sub-)curve
self.x1 = self.cdf2["runtime"].values
self.y1 = self.cdf2["log"].values
from scipy.stats import linregress
self.slope = -linregress(self.x1,self.y1)[0]
self.reg_slope = linregress(self.x1,self.y1)
self.cdf2.loc[[len(self.cdf2)-1], "slope"] = -self.reg_slope.slope
self.s_phi_e = (-self.cdf2["t-te"][len(self.cdf2)-1] * self.reg_slope.intercept_stderr)
self.s_lambda = (-self.cdf2["t-te"][len(self.cdf2)-1] * self.reg_slope.stderr)
from numpy import trapz
self.area_sup = np.ptrapz(self.cdf2[str(self.table)].values, dx=self.diff_sec) # proof that both methods have same answer: area_sup_2 = area_sup_1
print('ATTENTION: Trapezoidal method is used in ISO 16000-8 and here also considered in the uncertainty evaluation. However, more precise results are given by applying the Simpson-Rule.')
elif method=='simpson':
### More acurate option to calculate the solpe of each (sub-)curve
self.x1 = self.cdf2["runtime"].values
self.y1 = self.cdf2["log"].values
from scipy.stats import linregress
self.slope = -linregress(self.x1,self.y1)[0]
self.reg_slope = linregress(self.x1,self.y1)
self.cdf2.loc[[len(self.cdf2)-1], "slope"] = -self.reg_slope.slope
self.s_phi_e = (-self.cdf2["t-te"][len(self.cdf2)-1] * self.reg_slope.intercept_stderr)
self.s_lambda = (-self.cdf2["t-te"][len(self.cdf2)-1] * self.reg_slope.stderr)
from scipy.integrate import simpson
self.area_sup = sc.integrate.simpson(self.cdf2[str(self.table)].values, dx=self.diff_sec, even='first') # proof that both methods have same answer: area_sup_2 = area_s
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
self.a_rest = self.cdf2[str(self.table)].iloc[-1]/abs(self.slope)
self.a_tot = self.area_sup + self.a_rest
self.tau = (self.area_sup + self.a_rest)/self.cdf2[str(self.table)][0]
try:
if method =='iso':
# Taken from DIN ISO 16000-8:2008-12, Equation D2 units are cm3.m-3.sec
self.sa_num = self.ns_meas * (self.diff_sec) * ((self.n - 1)/np.sqrt(self.n)) # Taken from DIN ISO 16000-8:2008-12, Equation D2 units are cm3.m-3.sec
# The uncertainty of the summed trapezoidal method itself is not covered by ISO 16000-8.
self.sa_tm = 0
elif method =='trapez':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
self.sa_num = (self.diff_sec) * self.ns_meas * np.sqrt((2*self.n -1)/2*self.n )
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
self.sa_tm = self.diff_sec**2/12*(self.cdf2["runtime"].loc[len(self.cdf2)-1]- self.cdf2["runtime"][0])*self.cdf2[str(self.table)][0]/self.tau**2
elif method =='simpson':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
self.sa_num = 1/3*self.diff_sec*self.ns_meas*np.sqrt(2+20*round(self.n/2-0.5))
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
self.sa_tm = self.diff_sec**4/2880*(self.cdf2["runtime"].loc[len(self.cdf2)-1]-self.cdf2["runtime"][0])*self.cdf2[str(self.table)][0]/self.tau**4
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
self.s_rest = np.sqrt(pow(self.s_lambda,2) + pow(self.s_phi_e,2))
self.sa_rest = self.s_rest * self.a_rest
self.s_area = np.sqrt(pow(self.sa_num,2) + pow(self.sa_tm,2) + pow(self.sa_rest,2))/self.a_tot # s_area is a relative uncertainty in percent
self.s_total = np.sqrt(pow(self.s_area,2) + pow(0.05,2))
self.s_total_abs = self.s_total * self.tau
self.tau_hr.append(self.tau/3600)
self.cdf2["tau_hr"] = self.tau/3600
self.cdf2.loc[:, "s_total"] = self.s_total
self.cdf2.loc[:, "s_total_abs_hr"] = self.s_total_abs/3600
self.s_total_abs_hr.append(self.s_total_abs/3600)
self.df_tau.append(self.cdf2)
self.cdf3 = self.cdf2.loc[:, ["datetime", str(self.table)]]
self.cdf3 = self.cdf3.set_index("datetime")
self.cdf_list.append(self.cdf3)
self.mega_cdf = pd.concat(self.cdf_list,axis = 1).interpolate(method = "linear")
# self.mega_cdf.columns = self.new_names
self.mega_cdf["mean_delta"] = self.mega_cdf.mean(axis = 1)
self.mega_cdf["std mean_delta"] = self.mega_cdf.std(axis = 1)
# self.mega_cdf = self.mega_cdf.set_index("datetime")
self.mega_cdf = self.mega_cdf.fillna(method="bfill", limit=2)
self.mega_cdf = self.mega_cdf.fillna(method="pad", limit=2)
self.tau_hr_mean = np.mean(self.tau_hr)
self.s_tau_hr_mean = (np.sqrt(pow(np.array(self.s_total_abs_hr),2).sum())
+ statistics.variance(self.tau_hr))/len(np.array(self.tau_hr)-1)
if plot:
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#######################################################################
pd.options.plotting.backend = "plotly"
import plotly.io as pio
pio.renderers.default='browser'
import plotly.express as px
fig = px.line(self.mega_cdf, x=self.mega_cdf.index, y=self.mega_cdf.columns, title="mean of {}".format(self.experiment))
fig.show()
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#self.df_tau, self.mega_cdfv
return [self.tau_hr_mean, self.s_tau_hr_mean], self.df_tau, self.mega_cdf
def decay_curve_comparison_plot(self, save = False):
"""
This method produces a plot that shows the decay curve of the selected
experiment and corresponding curves if the experiment were to be a fully
mixed ventilation or ideal plug flow ventilation.
Run this method to see the graph it will make more sense
Parameters
----------
save : BOOL, optional
if True saves the plot to the default directory. The default is False.
Returns
-------
figure
returns a figure.
"""
self.d = self.mean_curve()[2].loc[:,["mean_delta"]]
self.d['mean_delta_norm'] = self.d["mean_delta"]/self.d["mean_delta"].iat[0]
self.d["runtime"] = np.arange(0,len(self.d) * self.diff_sec, self.diff_sec)
self.d["min"] = self.d["runtime"]/(np.mean(self.tau_nom) * 3600)
self.d["min"] = 1 - self.d["min"]
self.slope = 1/(np.mean(a.tau_hr) * 3600)
self.fig, ax = plt.subplots()
def func(x, a, b):
return a * np.exp(-b * x)
self.slope_50 = 1/(a.tau_nom *3600)
y_50 = func(self.d["runtime"].values, 1, self.slope_50)
self.d["ea_50"] = y_50
self.d["ea_50_max"] = self.d[["min", "ea_50"]].max(axis = 1)
self.d["mean_delta_norm_max"] = self.d[["min", "mean_delta_norm"]].max(axis = 1)
ax.plot(self.d["runtime"], self.d["ea_50_max"].values, label = "50 % efficiency (estimated)")
ax.plot(self.d["runtime"], self.d["mean_delta_norm_max"].values, label = "{} % efficiency (measured)".format(round(self.tau_nom/(np.mean(self.tau_hr)*2) * 100) ))
ax.plot(self.d["runtime"], self.d["min"].values, label = "maximum effieiency (estimated)")
ax.set_xlabel("time (sec)")
ax.set_ylabel("CO2 (normalized)")
ax.set_title("Decay curves for {}".format(self.experiment))
ax.legend()
if save:
ax.savefig("{} decay curve comparison".format(self.experiment))
return self.fig
def outdoor_data(self):
"""
This method calculates the mean , std, max and min of the parameters measured
on the outdoor of the measurement site.
The outdoor data comes from two sources. 1) from the HOBO sensor
2) From the weather station
Returns
-------
dataframe
The dataframe contains the summary of the parameters for the selected
experiment period
"""
adf = pd.read_sql_query("SELECT * FROM weather.außen WHERE datetime BETWEEN '{}' AND '{}'".format(self.t0,self.tn), con = self.engine).drop("index", axis = 1).set_index("datetime")
wdf = pd.read_sql_query("SELECT * FROM weather.weather_all WHERE datetime BETWEEN '{}' AND '{}'".format(self.t0,self.tn), con = self.engine).set_index("datetime")
data = [
[adf['temp_°C'].mean(), adf['temp_°C'].std(), adf['temp_°C'].max(), adf['temp_°C'].min()],
[adf['RH_%rH'].mean(), adf['RH_%rH'].std(), adf['RH_%rH'].max(), adf['RH_%rH'].min()],
[self.aussen()["meanCO2"], self.Cout["sgm_CO2"], self.Cout["maxCO2"], self.Cout["minCO2"]],
[wdf["Wind Speed, m/s"].mean(), wdf["Wind Speed, m/s"].std(), wdf["Wind Speed, m/s"].max(), wdf["Wind Speed, m/s"].min()],
[wdf["Gust Speed, m/s"].mean(), wdf["Gust Speed, m/s"].std(), wdf["Gust Speed, m/s"].max(), wdf["Gust Speed, m/s"].min()],
[wdf["Wind Direction"].mean(), wdf["Wind Direction"].std(), wdf["Wind Direction"].max(), wdf["Wind Direction"].min()],
[wdf["Temperature °C"].mean(), wdf["Temperature °C"].std(), wdf["Temperature °C"].max(), wdf["Temperature °C"].min()],
[wdf["RH %"].mean(), wdf["RH %"].std(), wdf["RH %"].max(), wdf["RH %"].min()]
]
self.outdoor_summary = pd.DataFrame(data = data, index = ["temp_°C","RH_%rH", "CO2_ppm", "Wind Speed, m/s","Gust Speed, m/s","Wind Direction", "Temperature °C", "RH %" ], columns = ["mean", "std", "max", "min"] )
return self.outdoor_summary
def indoor_data(self):
self.names = pd.read_sql_query('SHOW TABLES FROM {}'.format(self.database), con = self.engine)
self.names = self.names.iloc[:,0].to_list()
self.new_names = [x for x in self.names if (x not in self.exclude)]
self.humidity = []
self.temp = []
for i in self.new_names:
# print(i)
self.hudf = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database,i,self.t0,self.tn), con = self.engine).set_index("datetime").dropna()
if 'RH_%rH' in self.hudf.columns:
self.humidity.append(self.hudf["RH_%rH"].mean())
if 'temp_°C' in self.hudf.columns:
self.temp.append(self.hudf["temp_°C"].mean())
self.humidity = [x for x in self.humidity if x == x]
self.temp = [x for x in self.temp if x == x] # to remove nans
self.indoor_list = [[statistics.mean(self.humidity), statistics.stdev(self.humidity), max(self.humidity), min(self.humidity)]]
self.indoor_list.append([statistics.mean(self.temp), statistics.stdev(self.temp), max(self.temp), min(self.temp)])
for i in self.testos:
sdf = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database.lower(),i,self.t0,self.tn), con = self.engine)
if not(sdf.empty):
self.sdf = sdf.drop_duplicates(subset="datetime").set_index("datetime")
self.sdf = self.sdf.loc[:,["hw_m/sec"]].dropna()
self.indoor_list.append([self.sdf["hw_m/sec"].mean(), self.sdf["hw_m/sec"].std(), self.sdf["hw_m/sec"].max(), self.sdf["hw_m/sec"].min()])
self.wadf = pd.read_sql_query("SELECT * FROM weather.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.wall_database,self.t0,self.tn), con = self.engine).set_index("datetime")
self.indoor_list.append([self.wadf.mean().mean(), self.wadf.values.std(ddof=1), self.wadf.values.max(), self.wadf.values.min()])
self.indoor_summary = pd.DataFrame(data = self.indoor_list, index = ["RH_%rH", "temp_°C", "hw_m/sec", "wall_temp_°C"], columns = ["mean", "std", "max", "min"] )
return self.indoor_summary
def outdoor_windspeed_plot(self, save = False):
"""
This method produces a plot for the outdoor wind speeds during the measurement
Parameters
----------
save : BOOL, optional
If True , the plot is saved. The default is False.
Returns
-------
Figure.
"""
global df1
df1 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND \
'{}'".format("weather", "weather_all", self.t0,\
self.tn), con = self.engine)
df1 = df1.loc[:,['datetime', 'Wind Speed, m/s', 'Gust Speed, m/s', 'Wind Direction']]
u = df1['Wind Direction'].to_numpy()
U = np.sin(np.radians(u))
V = np.cos(np.radians(u))
wdf_plot = df1.set_index("datetime")
wdf_plot['u'] = U
wdf_plot['v'] = V
wdf_plot['y'] = 0
converter = mdates.ConciseDateConverter()
munits.registry[np.datetime64] = converter
munits.registry[datetime.date] = converter
munits.registry[datetime.datetime] = converter
fig, ax1 = plt.subplots()
ax1.plot(wdf_plot['Gust Speed, m/s'],color = 'silver', label = 'Gust Speed', zorder=1)
ax1.set_ylabel('Gust speed (m/sec)')
ax1.set_xlabel('Time')
# ax2 = ax1.twinx()
ax1.plot(wdf_plot['Wind Speed, m/s'], label = 'Wind Speed', zorder=2)
ax1.quiver(wdf_plot.index, wdf_plot['Wind Speed, m/s'], U, V , width = 0.001, zorder=3)
ax1.set_ylabel('wind speed (m/sec) and direction (up is north)')
plt.ylim(bottom=-0.1)
title = "Wind and Gust speed during {}".format(self.experiment)
plt.legend( loc='upper right')
plt.title(title)
if save:
plt.savefig(title + '.png', bbox_inches='tight', dpi=400)
plt.show()
return fig
def residence_time_sup_exh(self, experimentno=16, deviceno=0, periodtime=120,
experimentname=False, plot=False,
export_sublist=False, method='simpson',
filter_maxTrel=0.25, logging=False):
"""
method:
'iso' (Default) The method described in ISO 16000-8 will be applied
however this method has a weak uncertainty analysis.
'trapez' corrected ISO 16000-8 method applying the trapezoidal method
for the interval integration and considers this in the
uncertainty evaluation.
'simpson' Applies the Simpson-Rule for the integration and consequently
considers this in the uncertainty evaluation.
filter_maxTrel:
Percentage value for the allowed deviation of the predefined
periodtime T of the devices. Only half-cycles which meet the
criterion ]T/2*(1-filter_maxTrel),T/2*(1+filter_maxTrel)[
are going to be evaluated.
"""
#%% Function import
"""Syntax to import a function from any folder. Useful if the function.py file
is in another folder other than the working folder"""
# import sys
# import sys
# sys.path.append("C:/Users/Devineni/OneDrive - bwedu/4_Recirculation/python_files/")
self.alpha_mean, self.df_alpha, self.df_indoor = self.mean_curve()
#%% Function to find outliers
def find_outliers(col):
from scipy import stats
z = np.abs(stats.zscore(col))
idx_outliers = np.where(z>3,True,False)
return pd.Series(idx_outliers,index=col.index)
#%% Control plot properties"
"""This syntax controls the plot properties(default plot font, shape, etc),
more attributes can be added and removed depending on the requirement """
from pylab import rcParams
rcParams['figure.figsize'] = 7,4.5
plt.rcParams["font.family"] = "calibri"
plt.rcParams["font.weight"] = "normal"
plt.rcParams["font.size"] = 10
plt.close("all")
#%% Load relevant data
if periodtime is None:
T = 120
prYellow('ATTENTION: periodtime has not been defined. I setted T=120s instead')
else:
T = periodtime
# T in s; period time of the ventilation systems push-pull devices.
# time = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/4_Recirculation/Times_thesis.xlsx", sheet_name="Timeframes")
# The dataframe time comes from the excel sheet in the path above, to make -
# - changes go to this excel sheet, edit and upload it to mysql.
lb = T/2*(1-filter_maxTrel) # lower bound of considered cycles
ub = T/2*(1+filter_maxTrel) # upper bound of considered cycles
time = pd.read_sql_query("SELECT * FROM testdb.timeframes;", con = self.engine)
#standard syntax to fetch a table from Mysql; In this case a table with the
# short-names of the measurements, all the start and end times, the DB-name
# of the measurement and the required table-names of the DB/schema is loaded into a dataframe.
# start, end = self.t0_20, self.tn_20
fdelay = 2
self.t0_2T = time.loc[time['short_name']==self.experiment].iat[0,3] + dt.timedelta(seconds=fdelay*T)
# actual start of the experiment, out of the dataframe "time" + device periods,
# since the decay of the moving average curve of the subsystem 23 is at the
# beginning falsified by the drop from the accumulation level in subsystem 3.
# The drop is the response signal of the entire system 123. After this
# about 2*T the falisification due to the respones of the entire system is
# negligable.
# table = time["tables"][t].split(",")[l] #Name of the ventilation device
# dum = [["Experiment",time["short_name"][t] ], ["Sensor", table]] # Creates a list of 2 rows filled with string tuples specifying the experiment and the sensor.
# if experimentname:
# print(tabulate(dum)) # Prints the inut details in a table
# else:
# pass
database = self.database # Selects the name of the database as a string
#%%% Load data for the occupied space V3
#experimentglo = CBO_ESHL(experiment = dum[0][1], sensor_name = dum[1][1])
alpha_mean_u = ufloat(self.alpha_mean[0], self.alpha_mean[1])
self.dfin_dCmean = self.df_indoor.loc[:,['mean_delta', 'std mean_delta']]
while not(self.t0 in self.dfin_dCmean.index.to_list()):
self.t0 = self.t0 + dt.timedelta(seconds=1)
# print(self.t0)
mean_delta_0_room = self.dfin_dCmean.loc[self.t0]
dfin_dCmean = self.dfin_dCmean.copy()
mean_delta_0_room_u = ufloat(mean_delta_0_room[0],mean_delta_0_room[1])
#%%%%% Add mean and exhaust concentrations indoor (V3) to the dfin_dCmean
'''
mean concentrations:
Based on the calculated spatial and statistical mean air
age in the occupied space and the spacial average initial
concentration in the occupied space at t0glob.
'''
count = 0
dfin_dCmean['room_av'] = pd.Series(dtype='float64')
dfin_dCmean['std room_av'] =
|
pd.Series(dtype='float64')
|
pandas.Series
|
from rest_framework import status
from rest_framework.decorators import api_view
from api.models import Movie, Rating, User, Crew, Cast, UserCluster
from api.algorithms.kmeansClustering import U_Cluster
# from django.contrib.auth.models import User
from api.serializers import MovieSerializer,MovieAgeSerializer,MovieGenderSerializer
from rest_framework.response import Response
from django.http import JsonResponse
# Django Database Model
from django.db.models import F
import json
import operator
import re
import os
import math
import time
import csv
import datetime
from django.core.cache import cache
from django.db.models import F
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import sklearn.preprocessing as pp
import pandas as pd
import numpy as np
from numpy import dot
from numpy.linalg import norm
from django.conf import settings
from ast import literal_eval
from rake_nltk import Rake
from scipy import sparse
# Caching
from django.http import JsonResponse
from django.core.cache import cache
from scipy import sparse
import sklearn.preprocessing as pp
# Numba
from numba import jit, prange
from numba import types, typeof
from numba.typed import Dict
@api_view(['GET'])
def algorithm(request):
"""
Content-Based Algorithm
"""
if request.method == 'GET':
email = request.GET.get('email', None)
page = request.GET.get('page', 1)
feature = request.GET.get('feature', None)
if email is None:
return JsonResponse({'status': status.HTTP_400_BAD_REQUEST})
user = User.objects.get(email=email)
page = int(page)
user = User.objects.get(email=email)
ratings = Rating.objects.filter(user=user)
rating_count = len(ratings)
# 평가한 영화가 하나도 없다면 최신 영화를 추천해줍니다.
if len(ratings) == 0:
movies = Movie.objects.all()
movies = movies.order_by(F('release_date').desc(nulls_first=False))
movies = movies[(50 * (page - 1)): (50 * page)]
else:
# Read preprocessing data
if feature == 'Director':
df_keys = pd.read_csv('df_keys_crew.csv')
elif feature == 'Actor':
df_keys = pd.read_csv('df_keys_cast.csv')
elif feature == 'Director/Actor':
df_keys =
|
pd.read_csv('df_keys_crew_cast.csv')
|
pandas.read_csv
|
import unittest
import copy
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pandas.testing as pd_test
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
from pyblackscholesanalytics.utils.utils import scalarize
class TestPlainVanillaOption(unittest.TestCase):
"""Class to test public methods of PlainVanillaOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = PlainVanillaOption(mkt_env)
self.put_opt = PlainVanillaOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 7.548381716811839
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 4.672730506407959
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]])
np_test.assert_allclose(test_put, expected_put)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 4.060979245868182
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -5.368600081057167
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]])
np_test.assert_allclose(test_put, expected_put)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.6054075531684143
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = -0.3945924468315857
self.assertEqual(test_put, expected_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = np.array([[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]])
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put)
# assert call and put gamma coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_vega_scalar(self):
"""Test Vega - scalar case"""
# call
test_call = scalarize(self.call_opt.vega(**self.scalar_params))
expected_call = 0.29405622811847903
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.vega(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put vega coincide
self.assertEqual(test_call, test_put)
def test_vega_vector_np(self):
"""Test Vega - np.ndarray output case"""
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = np.array([[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
# assert call and put vega coincide
np_test.assert_allclose(test_call, test_put)
def test_vega_vector_df(self):
"""Test Vega - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
# assert call and put vega coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_theta_scalar(self):
"""Test Theta - scalar case"""
# call
test_call = scalarize(self.call_opt.theta(**self.scalar_params))
expected_call = -0.021064685979455443
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.theta(**self.scalar_params))
expected_put = -0.007759980665812141
self.assertEqual(test_put, expected_put)
def test_theta_vector_np(self):
"""Test Theta - np.ndarray output case"""
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = np.array([[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-4)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = np.array([[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_theta_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = pd.DataFrame(data=[[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_rho_scalar(self):
"""Test Rho - scalar case"""
# call
test_call = scalarize(self.call_opt.rho(**self.scalar_params))
expected_call = 0.309243166487844
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.rho(**self.scalar_params))
expected_put = -0.2575372798733608
self.assertEqual(test_put, expected_put)
def test_rho_vector_np(self):
"""Test Rho - np.ndarray output case"""
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = np.array([[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = np.array([[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_rho_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = pd.DataFrame(data=[[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = pd.DataFrame(data=[[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_Implied_Vol_scalar(self):
"""Test Implied Volatility - scalar case"""
# call
test_call = scalarize(self.call_opt.implied_volatility(**self.scalar_params))
expected_call = 0.2
self.assertAlmostEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.implied_volatility(**self.scalar_params))
expected_put = 0.2
self.assertAlmostEqual(test_put, expected_put)
def test_Implied_Vol_vector_np(self):
"""Test Implied Volatility - np.ndarray output case"""
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = 0.2 + np.zeros_like(test_call)
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = 0.2 + np.zeros_like(test_put)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_Implied_Vol_vector_df(self):
"""Test Implied Volatility - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = pd.DataFrame(data=0.2 + np.zeros_like(test_call),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = pd.DataFrame(data=0.2 + np.zeros_like(test_put),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_complex_parameters_setup(self):
"""
Test complex parameter setup:
(S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
"""
# call
test_call_price = self.call_opt.price(**self.complex_params)
test_call_PnL = self.call_opt.PnL(**self.complex_params)
test_call_delta = self.call_opt.delta(**self.complex_params)
test_call_gamma = self.call_opt.gamma(**self.complex_params)
test_call_vega = self.call_opt.vega(**self.complex_params)
test_call_theta = self.call_opt.theta(**self.complex_params)
test_call_rho = self.call_opt.rho(**self.complex_params)
test_call_iv = self.call_opt.implied_volatility(**self.complex_params)
expected_call_price = pd.DataFrame(data=[[15.55231058, 9.40714796, 9.87150919, 10.97983523],
[20.05777231, 16.15277891, 16.02977848, 16.27588191],
[15.81433361, 8.75227505, 6.65476799, 5.19785143]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_price.rename_axis("K", axis='columns', inplace=True)
expected_call_price.rename_axis("t", axis='rows', inplace=True)
expected_call_PnL = pd.DataFrame(data=[[12.06490811, 5.91974549, 6.38410672, 7.49243276],
[16.57036984, 12.66537644, 12.54237601, 12.78847944],
[12.32693114, 5.26487258, 3.16736552, 1.71044896]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_PnL.rename_axis("K", axis='columns', inplace=True)
expected_call_PnL.rename_axis("t", axis='rows', inplace=True)
expected_call_delta = pd.DataFrame(data=[[0.98935079, 0.69453583, 0.58292013, 0.53579465],
[0.79256302, 0.65515368, 0.60705014, 0.57529078],
[0.90573251, 0.6717088, 0.54283905, 0.43788167]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_delta.rename_axis("K", axis='columns', inplace=True)
expected_call_delta.rename_axis("t", axis='rows', inplace=True)
expected_call_gamma = pd.DataFrame(data=[[0.00373538, 0.02325203, 0.01726052, 0.01317896],
[0.01053321, 0.01130107, 0.01011038, 0.0090151],
[0.01253481, 0.0242596, 0.02420515, 0.02204576]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_gamma.rename_axis("K", axis='columns', inplace=True)
expected_call_gamma.rename_axis("t", axis='rows', inplace=True)
expected_call_vega = pd.DataFrame(data=[[0.02122104, 0.26419398, 0.29417607, 0.29948378],
[0.15544424, 0.20013116, 0.20888592, 0.2128651],
[0.02503527, 0.05383637, 0.05908709, 0.05870816]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_vega.rename_axis("K", axis='columns', inplace=True)
expected_call_vega.rename_axis("t", axis='rows', inplace=True)
expected_call_theta = pd.DataFrame(data=[[-0.00242788, -0.01322973, -0.02073753, -0.02747845],
[-0.03624253, -0.0521798, -0.06237363, -0.07180046],
[-0.12885912, -0.28334665, -0.33769702, -0.36349655]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_theta.rename_axis("K", axis='columns', inplace=True)
expected_call_theta.rename_axis("t", axis='rows', inplace=True)
expected_call_rho = pd.DataFrame(data=[[0.51543152, 0.37243495, 0.29872256, 0.26120194],
[0.18683002, 0.15599644, 0.14066931, 0.12935721],
[0.01800044, 0.0141648, 0.01156185, 0.00937301]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_rho.rename_axis("K", axis='columns', inplace=True)
expected_call_rho.rename_axis("t", axis='rows', inplace=True)
expected_call_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_iv.rename_axis("K", axis='columns', inplace=True)
expected_call_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call_price, expected_call_price)
pd_test.assert_frame_equal(test_call_PnL, expected_call_PnL)
pd_test.assert_frame_equal(test_call_delta, expected_call_delta)
pd_test.assert_frame_equal(test_call_gamma, expected_call_gamma)
pd_test.assert_frame_equal(test_call_vega, expected_call_vega)
pd_test.assert_frame_equal(test_call_theta, expected_call_theta)
pd_test.assert_frame_equal(test_call_rho, expected_call_rho)
pd_test.assert_frame_equal(test_call_iv, expected_call_iv)
# put
test_put_price = self.put_opt.price(**self.complex_params)
test_put_PnL = self.put_opt.PnL(**self.complex_params)
test_put_delta = self.put_opt.delta(**self.complex_params)
test_put_gamma = self.put_opt.gamma(**self.complex_params)
test_put_vega = self.put_opt.vega(**self.complex_params)
test_put_theta = self.put_opt.theta(**self.complex_params)
test_put_rho = self.put_opt.rho(**self.complex_params)
test_put_iv = self.put_opt.implied_volatility(**self.complex_params)
expected_put_price = pd.DataFrame(data=[[0.02812357, 3.22314287, 7.9975943, 13.35166847],
[3.70370639, 9.31459014, 13.76319167, 18.54654119],
[0.62962992, 3.51971706, 6.38394341, 9.88603552]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_price.rename_axis("K", axis='columns', inplace=True)
expected_put_price.rename_axis("t", axis='rows', inplace=True)
expected_put_PnL = pd.DataFrame(data=[[-10.01320701, -6.81818772, -2.04373628, 3.31033788],
[-6.3376242, -0.72674045, 3.72186108, 8.5052106],
[-9.41170067, -6.52161353, -3.65738717, -0.15529507]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_PnL.rename_axis("K", axis='columns', inplace=True)
expected_put_PnL.rename_axis("t", axis='rows', inplace=True)
expected_put_delta = pd.DataFrame(data=[[-0.01064921, -0.30546417, -0.41707987, -0.46420535],
[-0.20743698, -0.34484632, -0.39294986, -0.42470922],
[-0.09426749, -0.3282912, -0.45716095, -0.56211833]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_delta.rename_axis("K", axis='columns', inplace=True)
expected_put_delta.rename_axis("t", axis='rows', inplace=True)
expected_put_gamma = copy.deepcopy(expected_call_gamma)
expected_put_vega = copy.deepcopy(expected_call_vega)
expected_put_theta = pd.DataFrame(data=[[-0.00038744, -0.00863707, -0.01349429, -0.01735551],
[-0.02615404, -0.03850937, -0.04554804, -0.05157676],
[-0.11041151, -0.26012269, -0.31065535, -0.33236619]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_theta.rename_axis("K", axis='columns', inplace=True)
expected_put_theta.rename_axis("t", axis='rows', inplace=True)
expected_put_rho = pd.DataFrame(data=[[-0.00691938, -0.21542518, -0.31936724, -0.38666626],
[-0.08152366, -0.14703153, -0.17901683, -0.2068619],
[-0.00249691, -0.00905916, -0.01302149, -0.01656895]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_rho.rename_axis("K", axis='columns', inplace=True)
expected_put_rho.rename_axis("t", axis='rows', inplace=True)
expected_put_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_iv.rename_axis("K", axis='columns', inplace=True)
expected_put_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put_price, expected_put_price)
pd_test.assert_frame_equal(test_put_PnL, expected_put_PnL)
pd_test.assert_frame_equal(test_put_delta, expected_put_delta)
pd_test.assert_frame_equal(test_put_gamma, expected_put_gamma)
pd_test.assert_frame_equal(test_put_vega, expected_put_vega)
pd_test.assert_frame_equal(test_put_theta, expected_put_theta, check_less_precise=True)
pd_test.assert_frame_equal(test_put_rho, expected_put_rho)
pd_test.assert_frame_equal(test_put_iv, expected_put_iv)
# test gamma and vega consistency
pd_test.assert_frame_equal(test_call_gamma, test_put_gamma)
pd_test.assert_frame_equal(test_call_vega, test_put_vega)
class TestDigitalOption(unittest.TestCase):
"""Class to test public methods of DigitalOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = DigitalOption(mkt_env)
self.put_opt = DigitalOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 0.529923736000296
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 0.4413197518956652
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[2.96746057e-01, 5.31031469e-01, 7.30298621e-01],
[2.62783065e-01, 5.29285722e-01, 7.56890348e-01],
[2.13141191e-01, 5.26395060e-01, 7.95937699e-01],
[1.28345302e-01, 5.21278768e-01, 8.65777496e-01],
[7.93566840e-04, 5.09205971e-01, 9.96790994e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[0.66879322, 0.43450781, 0.23524066],
[0.71099161, 0.44448895, 0.21688433],
[0.7688046, 0.45555073, 0.18600809],
[0.86197582, 0.46904235, 0.12454362],
[0.99783751, 0.4894251, 0.00184008]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-6)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[2.96746057e-01, 5.31031469e-01, 7.30298621e-01],
[2.62783065e-01, 5.29285722e-01, 7.56890348e-01],
[2.13141191e-01, 5.26395060e-01, 7.95937699e-01],
[1.28345302e-01, 5.21278768e-01, 8.65777496e-01],
[7.93566840e-04, 5.09205971e-01, 9.96790994e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[0.66879322, 0.43450781, 0.23524066],
[0.71099161, 0.44448895, 0.21688433],
[0.7688046, 0.45555073, 0.18600809],
[0.86197582, 0.46904235, 0.12454362],
[0.99783751, 0.4894251, 0.00184008]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 0.23317767915072352
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -0.22747347241997717
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 0.23428541, 0.43355256],
[-0.03396299, 0.23253966, 0.46014429],
[-0.08360487, 0.229649, 0.49919164],
[-0.16840076, 0.22453271, 0.56903144],
[-0.29595249, 0.21245991, 0.70004494]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -0.23428541, -0.43355256],
[0.04219839, -0.22430427, -0.4519089],
[0.10001137, -0.2132425, -0.48278514],
[0.19318259, -0.19975088, -0.5442496],
[0.32904428, -0.17936812, -0.66695314]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-6)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 0.23428541, 0.43355256],
[-0.03396299, 0.23253966, 0.46014429],
[-0.08360487, 0.229649, 0.49919164],
[-0.16840076, 0.22453271, 0.56903144],
[-0.29595249, 0.21245991, 0.70004494]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -0.23428541, -0.43355256],
[0.04219839, -0.22430427, -0.4519089],
[0.10001137, -0.2132425, -0.48278514],
[0.19318259, -0.19975088, -0.5442496],
[0.32904428, -0.17936812, -0.66695314]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = copy.deepcopy(-expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put delta consistency
self.assertEqual(test_call, -test_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[0.02251146, 0.02281654, 0.01642484],
[0.0245291, 0.02648423, 0.01810373],
[0.02655219, 0.03231528, 0.02002786],
[0.02633276, 0.0446913, 0.02109933],
[0.00091364, 0.12030889, 0.00161394]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = copy.deepcopy(-expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put delta consistency
np_test.assert_allclose(test_call, -test_put)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02251146, 0.02281654, 0.01642484],
[0.0245291, 0.02648423, 0.01810373],
[0.02655219, 0.03231528, 0.02002786],
[0.02633276, 0.0446913, 0.02109933],
[0.00091364, 0.12030889, 0.00161394]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = copy.deepcopy(-expected_call)
pd_test.assert_frame_equal(test_put, expected_put)
# assert call and put delta consistency
pd_test.assert_frame_equal(test_call, -test_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = -0.0004409117739687288
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(-expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, -test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.00050164, -0.00039929, -0.00076858],
[0.00087371, -0.00046347, -0.00102583],
[0.00161634, -0.00056552, -0.00150922],
[0.0034499, -0.0007821, -0.00268525],
[0.00095822, -0.00210541, -0.00130173]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(-expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, -test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.00050164, -0.00039929, -0.00076858],
[0.00087371, -0.00046347, -0.00102583],
[0.00161634, -0.00056552, -0.00150922],
[0.0034499, -0.0007821, -0.00268525],
[0.00095822, -0.00210541, -0.00130173]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(-expected_call)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
# assert call and put gamma coincide
pd_test.assert_frame_equal(test_call, -test_put)
def test_vega_scalar(self):
"""Test Vega - scalar case"""
# call
test_call = scalarize(self.call_opt.vega(**self.scalar_params))
expected_call = -0.005145983992073383
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.vega(**self.scalar_params))
expected_put = copy.deepcopy(-expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put vega coincide
self.assertEqual(test_call, -test_put)
def test_vega_vector_np(self):
"""Test Vega - np.ndarray output case"""
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = np.array([[0.00569969, -0.00560099, -0.01304515],
[0.00752302, -0.00492679, -0.01319465],
[0.0095413, -0.0041213, -0.01330838],
[0.01087143, -0.00304268, -0.01264053],
[0.00042529, -0.00115365, -0.00086306]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(-expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-5)
# assert call and put vega coincide
np_test.assert_allclose(test_call, -test_put)
def test_vega_vector_df(self):
"""Test Vega - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.00569969, -0.00560099, -0.01304515],
[0.00752302, -0.00492679, -0.01319465],
[0.0095413, -0.0041213, -0.01330838],
[0.01087143, -0.00304268, -0.01264053],
[0.00042529, -0.00115365, -0.00086306]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
|
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
|
tm.assert_frame_equal(dm, self.frame)
|
pandas.util.testing.assert_frame_equal
|
"""
Wall
====
This module holds the class for the Wall object.
"""
import os
from typing import Dict, List, Union
from uuid import uuid4
from datetime import datetime
import re
from matplotlib.backends.backend_pdf import PdfPages # type: ignore
import pandas as pd # type: ignore
from frewpy.utils import (
get_num_nodes,
get_num_stages,
get_stage_names,
get_titles,
get_design_case_names,
check_results_present,
)
from .plot import FrewMPL, FrewBokeh
from .exceptions import FrewError
class Wall:
""" A class used to contain any wall related functionality of frewpy.
"""
def __init__(self, json_data):
self.json_data = json_data
def get_node_levels(self) -> List[float]:
""" Method to get the levels of the nodes in a Frew model.
Returns
-------
node_levels : List[float]
The levels of each node in a Frew model.
"""
num_nodes = get_num_nodes(self.json_data)
try:
node_information = self.json_data["Stages"][0]["GeoFrewNodes"]
except KeyError:
raise FrewError("Unable to retreive node information.")
except IndexError:
raise FrewError("Unable to retreive node information.")
if len(node_information) != num_nodes:
raise FrewError(
"""
Number of nodes does not equal the length of the node
information
"""
)
return [node_information[node]["Level"] for node in range(num_nodes)]
def get_results(self) -> Dict[int, dict]:
""" Method to get the shear, bending moment and displacement of the
wall for each stage, node, and design case.
Returns
-------
wall_results : Dict[int, dict]
The shear, bending and displacement of the wall.
"""
num_nodes = get_num_nodes(self.json_data)
num_stages = get_num_stages(self.json_data)
check_results_present(self.json_data)
wall_results: Dict[int, dict] = {}
for stage in range(num_stages):
wall_results[stage] = {}
for result_set in self.json_data["Frew Results"]:
result_set_name = result_set["GeoPartialFactorSet"]["Name"]
wall_results[stage][result_set_name] = {
"shear": [],
"bending": [],
"displacement": [],
}
for node in range(num_nodes):
stage_results = result_set["Stageresults"][stage][
"Noderesults"
]
wall_results[stage][result_set_name]["shear"].append(
stage_results[node]["Shear"] / 1000
)
wall_results[stage][result_set_name]["bending"].append(
stage_results[node]["Bending"] / 1000
)
wall_results[stage][result_set_name][
"displacement"
].append(stage_results[node]["Displacement"] * 1000)
return wall_results
def get_envelopes(self) -> Dict[str, dict]:
""" Method to return the envelopes of max and min shear, bending and
displacements for each design case.
Returns
-------
envelopes : Dict[str, dict]
The maximum and minimum shear, bending and displacement for each
design case for all stages.
"""
check_results_present(self.json_data)
num_stages = get_num_stages(self.json_data)
num_nodes = get_num_nodes(self.json_data)
design_cases = get_design_case_names(self.json_data)
wall_results = self.get_results()
envelopes: Dict[str, dict] = {
design_case: {
"maximum": {"shear": [], "bending": [], "disp": []},
"minimum": {"shear": [], "bending": [], "disp": []},
}
for design_case in design_cases
}
for design_case in design_cases:
for node in range(num_nodes):
shear = []
bending = []
disp = []
for stage in range(num_stages):
shear.append(
wall_results[stage][design_case]["shear"][node]
)
bending.append(
wall_results[stage][design_case]["bending"][node]
)
disp.append(
wall_results[stage][design_case]["displacement"][node]
)
envelopes[design_case]["maximum"]["shear"].append(max(shear))
envelopes[design_case]["maximum"]["bending"].append(
max(bending)
)
envelopes[design_case]["maximum"]["disp"].append(max(disp))
envelopes[design_case]["minimum"]["shear"].append(min(shear))
envelopes[design_case]["minimum"]["bending"].append(
min(bending)
)
envelopes[design_case]["minimum"]["disp"].append(min(disp))
return envelopes
def results_to_excel(self, out_folder: str) -> None:
""" Method to exports the wall results to an excel file where each
sheet in the spreadsheet is a design case. The spreadsheet also
a title sheet and the envelopes.
Parameters
----------
out_folder : str
The folder path to save the results at.
Returns
-------
None
"""
if not os.path.exists(out_folder):
raise FrewError(f"Path {out_folder} does not exist.")
num_nodes: int = get_num_nodes(self.json_data)
num_stages: int = get_num_stages(self.json_data)
node_levels: List[float] = self.get_node_levels()
wall_results: Dict[int, dict] = self.get_results()
design_cases: List[str] = get_design_case_names(self.json_data)
envelopes: Dict[str, dict] = self.get_envelopes()
export_envelopes = pd.DataFrame(
self._format_envelope_data(
num_nodes, node_levels, envelopes, design_cases
)
)
titles: Dict[str, str] = get_titles(self.json_data)
export_titles = pd.DataFrame(self._format_titles_data(titles))
job_title: str = titles["JobTitle"]
uuid_str: str = str(uuid4()).split("-")[0]
file_name: str = f"{job_title}_{uuid_str}_results.xlsx"
export_data: Dict[str, dict] = {}
for design_case in design_cases:
export_data[design_case] = {
"Node #": [],
"Node levels (m)": [],
"Stage": [],
"Bending (kNm/m)": [],
"Shear (kN/m)": [],
"Displacement (mm)": [],
}
for stage in range(num_stages):
node_array = list(range(1, num_nodes + 1))
stage_array = [stage] * num_nodes
bending_results = wall_results[stage][design_case]["bending"]
shear_results = wall_results[stage][design_case]["shear"]
displacement_results = wall_results[stage][design_case][
"displacement"
]
export_data[design_case]["Node #"].extend(node_array)
export_data[design_case]["Node levels (m)"].extend(node_levels)
export_data[design_case]["Stage"].extend(stage_array)
export_data[design_case]["Bending (kNm/m)"].extend(
bending_results
)
export_data[design_case]["Shear (kN/m)"].extend(shear_results)
export_data[design_case]["Displacement (mm)"].extend(
displacement_results
)
try:
with pd.ExcelWriter(os.path.join(out_folder, file_name)) as writer:
export_titles.to_excel(
writer, sheet_name="Titles", index=False, header=False
)
export_envelopes.to_excel(
writer, sheet_name="Envelopes", index=False,
)
for design_case in design_cases:
export_data_df =
|
pd.DataFrame(export_data[design_case])
|
pandas.DataFrame
|
from xstac import xarray_to_stac, fix_attrs
import xarray as xr
import numpy as np
import pandas as pd
import pytest
import pystac
data = np.empty((40, 584, 284), dtype="float32")
x = xr.DataArray(
np.arange(-5802250.0, -5519250 + 1000, 1000),
name="x",
dims="x",
attrs={
"units": "m",
"long_name": "x coordinate of projection",
"standard_name": "projection_x_coordinate",
},
)
y = xr.DataArray(
np.arange(-39000.0, -622000.0 - 1000, -1000.0),
name="y",
dims="y",
attrs={
"units": "m",
"long_name": "y coordinate of projection",
"standard_name": "projection_y_coordinate",
},
)
time = xr.DataArray(
pd.date_range(start="1980-07-01", freq="A-JUL", periods=40),
name="time",
dims="time",
attrs={
"standard_name": "time",
"bounds": "time_bnds",
"long_name": "24-hour day based on local time",
},
)
lat = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lat",
attrs={
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
)
lon = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lon",
attrs={
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
)
coords = dict(time=time, y=y, x=x, lat=lat, lon=lon)
@pytest.fixture
def ds():
ds = xr.Dataset(
{
"prcp": xr.DataArray(
data,
coords=coords,
dims=("time", "y", "x"),
attrs={
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
),
"swe": xr.DataArray(data, coords=coords, dims=("time", "y", "x")),
"time_bnds": xr.DataArray(
np.empty((40, 2), dtype="datetime64[ns]"),
name="time_bnds",
coords={"time": time},
dims=("time", "nv"),
attrs={"time": "days since 1950-01-01 00:00:00"},
),
"lambert_conformal_conic": xr.DataArray(
np.array(-32767, dtype="int16"),
name="lambert_conformal_conic",
attrs={
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": np.array([25.0, 60.0]),
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
),
},
attrs={
"Conventions": "CF-1.6",
"Version_data": "Daymet Data Version 4.0",
"Version_software": "Daymet Software Version 4.0",
"citation": "Please see http://daymet.ornl.gov/ for current Daymet data citation information",
"references": "Please see http://daymet.ornl.gov/ for current information on Daymet references",
"source": "Daymet Software Version 4.0",
"start_year": [1980],
},
)
return ds
def test_xarray_to_stac(ds):
ds = fix_attrs(ds)
template = {
"id": "id",
"type": "Collection",
"links": [],
"description": "description",
"license": "license",
"stac_version": "1.0.0",
}
result = xarray_to_stac(
ds,
template=template,
temporal_dimension="time",
x_dimension="x",
y_dimension="y",
)
assert result.id == "id"
assert isinstance(result, pystac.Collection)
assert result.description == "description"
assert result.license == "license"
dimensions = result.extra_fields["cube:dimensions"]
expected = {
"time": {
"type": "temporal",
"description": "24-hour day based on local time",
# "values": None,
"extent": ["1980-07-31T00:00:00Z", "2019-07-31T00:00:00Z"],
"step": None,
},
"x": {
"type": "spatial",
"axis": "x",
"description": "x coordinate of projection",
"extent": [-5802250.0, -5519250.0],
"values": None,
"step": 1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
"y": {
"type": "spatial",
"axis": "y",
"description": "y coordinate of projection",
"extent": [-622000.0, -39000.0],
"values": None,
"step": -1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
}
assert dimensions == expected
variables = result.extra_fields["cube:variables"]
expected = {
"lat": {
"type": "auxiliary",
"description": "latitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_north",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
},
"lon": {
"type": "auxiliary",
"description": "longitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_east",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
},
"prcp": {
"type": "data",
"description": "annual total precipitation",
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "mm",
"shape": [40, 584, 284],
"chunks": None,
"attrs": {
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
},
"swe": {
"type": "data",
"description": None,
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 584, 284],
"chunks": None,
"attrs": {},
},
"time_bnds": {
"type": "data",
"description": None,
"dimensions": ["time", "nv"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 2],
"chunks": None,
"attrs": {"time": "days since 1950-01-01 00:00:00"},
},
"lambert_conformal_conic": {
"type": "data",
"description": None,
"dimensions": [],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [],
"chunks": None,
"attrs": {
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": [25.0, 60.0],
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
},
}
assert result.extra_fields["cube:variables"] == expected
def test_validation_with_none():
# https://github.com/TomAugspurger/xstac/issues/9
template = {
"type": "Collection",
"id": "cesm2-lens",
"stac_version": "1.0.0",
"description": "desc",
"stac_extensions": [
"https://stac-extensions.github.io/datacube/v1.0.0/schema.json"
],
"extent": {
"spatial": {"bbox": [[-180, -90, 180, 90]]},
"temporal": {
"interval": [["1851-01-01T00:00:00Z", "1851-01-01T00:00:00Z"]]
},
},
"providers": [],
"license": "CC0-1.0",
"links": [],
}
ds = xr.Dataset(
{
"data": xr.DataArray(
[1, 2],
dims=("time",),
coords={"time":
|
pd.to_datetime(["2021-01-01", "2021-01-02"])
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 11:29:46 2018
@author: kazuki.onodera
parameters:
classes: [42, 52, 62, 67, 90]
days: [10, 20, 30]
aggfunc: [mean, median]
detected: [0, 1]
specz
date_from: 10 days before from peak
template augment: True
train, test augment: True
"""
import numpy as np
import pandas as pd
import os
from glob import glob
from multiprocessing import cpu_count, Pool
from itertools import combinations
import sys
argvs = sys.argv
import utils
PREF = 'f022'
os.system(f'rm ../data/t*_{PREF}*')
os.system(f'rm ../feature/t*_{PREF}*')
DAYS_FROM = 10
DAYS_TO = 10
DATE_AUGMENT = 2
class_SN = [42, 52, 62, 67, 90]
tr = pd.read_pickle('../data/train.pkl')
# =============================================================================
# template
# =============================================================================
tr_log = pd.read_pickle('../data/train_log.pkl')
tr_log = pd.merge(tr_log, tr[['object_id', 'hostgal_photoz', 'target']],
on='object_id', how='left')
tr_log = tr_log[(tr_log.target.isin(class_SN))].reset_index(drop=True)
# -DAYS_FROM ~ peak + DAYS_TO
idxmax = tr_log.groupby('object_id').flux.idxmax()
base = tr_log.iloc[idxmax][['object_id', 'date']]
li = []
for i in range(-DAYS_FROM, 0):
tmp = base.copy()
tmp['date'] += i
li.append(tmp)
lag = pd.concat(li)
lag = pd.merge(lag, tr_log, on=['object_id', 'date'], how='left')
lag = lag.sort_values(['object_id', 'date']).reset_index(drop=True)
li = []
for i in range(0, DAYS_TO):
tmp = base.copy()
tmp['date'] += i
li.append(tmp)
lead = pd.concat(li)
lead = pd.merge(lead, tr_log, on=['object_id', 'date'], how='left')
lead = lead[lead.object_id.isin(lag.object_id)].sort_values(['object_id', 'date']).reset_index(drop=True)
tr_log = pd.concat([lag, lead], ignore_index=True).sort_values(['object_id', 'date']).reset_index(drop=True)
# TODO: specz bin
# remove specz 2.0
#tr_log = tr_log[tr_log['hostgal_specz']>2.0]
template_log = tr_log.copy()
# used oid for template
oid_target = {}
for k,v in tr_log[['object_id' , 'target']].values:
oid_target[k] = v
target_oids = {}
for t in class_SN:
target_oids[t] = tr_log[tr_log.target==t].object_id.unique().tolist()
# =============================================================================
# def
# =============================================================================
def norm_flux_date(df):
# df.flux -= df.groupby(['object_id']).flux.transform('min')
df.flux /= df.groupby('object_id').flux.transform('max')
df.date -= df.groupby('object_id').date.transform('min')
norm_flux_date(template_log)
# augment
def augment(df, n):
if n > 0:
li = []
for i in range(1, n+1):
tmp = df.copy()
tmp['date'] += i
li.append(tmp)
tmp = df.copy()
tmp['date'] -= i
li.append(tmp)
tmp =
|
pd.concat(li)
|
pandas.concat
|
__author__ = 'lucabasa'
__version__ = '1.4.0'
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from source.torch_utils import seed_everything, MoADataset, TestDataset, train_fn, valid_fn, inference_fn
from source.process import add_pca, var_tr, process_data, scale_data
from source.analyze import plot_learning
from source.torch_utils import SmoothBCEwLogits
class Model(nn.Module):
def __init__(self, num_features, num_targets, hidden_size, dropout, lay_4=False):
super().__init__()
self.dropout = dropout
self.lay_4 = lay_4
self.batch_norm1 = nn.BatchNorm1d(num_features)
self.dropout1 = nn.Dropout(self.dropout)
self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.dropout2 = nn.Dropout(self.dropout)
self.dense2 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))
if self.lay_4:
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.dropout3 = nn.Dropout(self.dropout)
self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))
self.batch_norm_end = nn.BatchNorm1d(hidden_size)
self.dropout_end = nn.Dropout(self.dropout)
self.dense_end = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))
def forward(self, x):
x = self.batch_norm1(x)
x = self.dropout1(x)
x = F.leaky_relu(self.dense1(x), 1e-3)
x = self.batch_norm2(x)
x = self.dropout2(x)
x = F.leaky_relu(self.dense2(x), 1e-3)
if self.lay_4:
x = self.batch_norm3(x)
x = self.dropout3(x)
x = F.leaky_relu(self.dense3(x), 1e-3)
x = self.batch_norm_end(x)
x = self.dropout_end(x)
x = self.dense_end(x)
return x
def prepare_data(train_df, valid_df, test_df, target_cols, scaling, n_quantiles,
g_comp, c_comp, g_feat, c_feat, pca_add, thr):
train_df, valid_df, test_df = add_pca(train_df=train_df,
valid_df=valid_df,
test_df=test_df,
scaling=scaling, n_quantiles=n_quantiles,
g_comp=g_comp, c_comp=c_comp,
g_feat=g_feat, c_feat=c_feat, add=pca_add)
if pca_add:
train_df = process_data(data=train_df, features_g=g_feat, features_c=c_feat)
valid_df = process_data(data=valid_df, features_g=g_feat, features_c=c_feat)
test_df = process_data(data=test_df, features_g=g_feat, features_c=c_feat)
train_df, valid_df, test_df = var_tr(train_df=train_df,
valid_df=valid_df,
test_df=test_df,
thr=thr,
cat_cols=['sig_id','cp_type','cp_time','cp_dose'])
train_df = train_df.drop('cp_type', axis=1)
valid_df = valid_df.drop('cp_type', axis=1)
test_df = test_df.drop('cp_type', axis=1)
train_df['time_dose'] = train_df['cp_time'].astype(str)+train_df['cp_dose']
valid_df['time_dose'] = valid_df['cp_time'].astype(str)+valid_df['cp_dose']
test_df['time_dose'] = test_df['cp_time'].astype(str)+test_df['cp_dose']
train_df =
|
pd.get_dummies(train_df, columns=['cp_time','cp_dose','time_dose'])
|
pandas.get_dummies
|
import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.left_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.outer_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
|
tm.assert_almost_equal(ridx, exp_ridx)
|
pandas._testing.assert_almost_equal
|
import glob
from functools import partial
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import albumentations as albu
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import scipy
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, ListConfig, OmegaConf
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
from src.dataset.dataset import WaveformDataset
from src.dataset.utils import calc_triangle_center, get_groundtruth
from src.postprocess.postporcess import apply_gauss_smoothing, apply_kf_smoothing
from src.postprocess.visualize import add_distance_diff
IMG_MEAN = (0.485, 0.456, 0.406, 0.485, 0.456, 0.406, 0.485, 0.456, 0.406)
IMG_STD = (0.229, 0.224, 0.225, 0.229, 0.224, 0.225, 0.485, 0.456, 0.406)
class GsdcDatamodule(pl.LightningDataModule):
def __init__(
self,
conf: DictConfig,
val_fold: int = 0,
batch_size: int = 64,
num_workers: int = 16,
aug_mode: int = 0,
is_debug: bool = False,
) -> None:
super().__init__()
self.conf = conf
self.batch_size = batch_size
self.aug_mode = aug_mode
self.num_workers = num_workers
self.is_debug = is_debug
self.val_fold = val_fold
self.input_width = conf["input_width"]
self.num_inchannels = len(conf["stft_targets"]) * 3
self.img_mean = np.array(IMG_MEAN[: self.num_inchannels])
self.img_std = np.array(IMG_STD[: self.num_inchannels])
def prepare_data(self):
# check
assert Path(get_original_cwd(), self.conf["data_dir"]).is_dir()
def _onehot_to_set(self, onehot: np.ndarray):
return set(np.where(onehot == 1)[0].astype(str).tolist())
def _use_cached_kalman(self, df: pd.DataFrame, is_test=False) -> pd.DataFrame:
print("apply kalman filttering")
processed_kf_path = (
"../input/kf_test.csv" if is_test else "../input/kf_train.csv"
)
processed_kf_path = Path(get_original_cwd(), processed_kf_path)
try:
df = pd.read_csv(processed_kf_path)
except Exception:
df = apply_kf_smoothing(df=df) # nan each phone first or last row
df.to_csv(processed_kf_path, index=False)
return df
def setup(self, stage: Optional[str] = None):
# Assign Train/val split(s) for use in Dataloaders
conf = self.conf
if stage == "fit" or stage is None:
# read data
data_dir = Path(get_original_cwd(), self.conf["data_dir"])
self.train_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
df_path = pd.read_csv(
Path(get_original_cwd(), "./src/meta_data/path_meta_info.csv")
)
# merge graoundtruth
self.train_df = self.train_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
if self.conf.apply_kalman_filtering:
self.train_df = self._use_cached_kalman(df=self.train_df, is_test=False)
# there is nan at being and end...
if self.conf.stft_targets[0].find("center") > -1:
self.train_df = calc_triangle_center(
df=self.train_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
self.train_df = add_distance_diff(df=self.train_df, is_test=False)
# train/val split
df_path = make_split(df=df_path, n_splits=3)
self.train_df = merge_split_info(data_df=self.train_df, split_df=df_path)
self.train_df = choose_paths(df=self.train_df, target=self.conf.target_path)
train_df = self.train_df.loc[self.train_df["fold"] != self.val_fold, :]
val_df = self.train_df.loc[self.train_df["fold"] == self.val_fold, :]
if self.conf.data_aug_with_kf:
train_phone = train_df.phone.unique()
if self.conf.apply_kalman_filtering:
orig_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
else:
orig_df = self._use_cached_kalman(df=train_df, is_test=False)
orig_df = orig_df.loc[orig_df.phone.isin(train_phone)]
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=False)
split_info_df = train_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_kf_aug"
train_df = pd.concat([train_df, orig_df], axis=0).reset_index(drop=True)
if self.conf.data_aug_with_gaussian:
train_phone = train_df.phone.unique()
orig_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
orig_df = orig_df.loc[orig_df.phone.isin(train_phone)]
orig_df = apply_gauss_smoothing(
df=orig_df, params={"sz_1": 0.85, "sz_2": 5.65, "sz_crit": 1.5}
)
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=False)
split_info_df = train_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_gauss"
train_df = pd.concat([train_df, orig_df], axis=0).reset_index(drop=True)
train_df, train_list = make_sampling_list(
df=train_df,
input_width=conf["input_width"],
sampling_delta=conf["train_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=False,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
train_sequences = get_phone_sequences(
df=train_df, targets=conf["stft_targets"], is_test=False
)
val_df, val_list = make_sampling_list(
df=val_df,
input_width=conf["input_width"],
sampling_delta=conf["val_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=False,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
val_df.to_csv("./val.csv")
val_sequences = get_phone_sequences(
df=val_df, targets=conf["stft_targets"], is_test=False
)
self.train_dataset = WaveformDataset(
sampling_list=train_list,
phone_sequences=train_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.train_transform(),
is_test=False,
gt_as_mask=self.conf.gt_as_mask,
rand_freq=self.conf.rand_freq,
rand_ratio=self.conf.rand_ratio,
sigma=self.conf.sigma,
)
self.val_dataset = WaveformDataset(
sampling_list=val_list,
phone_sequences=val_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.val_transform(),
is_test=False,
gt_as_mask=self.conf.gt_as_mask,
)
self.plot_dataset(self.train_dataset)
self.train_df = train_df
self.val_df = val_df
# Assign Test split(s) for use in Dataloaders
if stage == "test" or stage is None:
# read data
data_dir = Path(get_original_cwd(), self.conf["data_dir"])
if self.conf.test_with_val:
self.train_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
df_path = pd.read_csv(
Path(get_original_cwd(), "../input/path_meta_info.csv")
)
if self.conf.apply_kalman_filtering:
self.train_df = self._use_cached_kalman(
df=self.train_df, is_test=False
)
# train/val split
df_path = make_split(df=df_path, n_splits=3)
self.train_df = merge_split_info(
data_df=self.train_df, split_df=df_path
)
self.test_df = self.train_df.loc[
self.train_df["fold"] == self.val_fold, :
]
else:
self.test_df = pd.read_csv(data_dir / "baseline_locations_test.csv")
if self.conf.apply_kalman_filtering:
self.test_df = self._use_cached_kalman(
df=self.test_df, is_test=True
)
# there is nan at being and end...
if self.conf.stft_targets[0].find("center") > -1:
self.test_df = calc_triangle_center(
df=self.test_df, targets=["latDeg", "lngDeg"],
)
else:
self.test_df = add_distance_diff(df=self.test_df, is_test=True)
if self.conf.tta_with_kf:
test_phone = self.test_df.phone.unique()
if self.conf.apply_kalman_filtering:
orig_df = pd.read_csv(data_dir / "baseline_locations_test.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
else:
orig_df = self._use_cached_kalman(df=self.test_df, is_test=True)
orig_df = orig_df.loc[orig_df.phone.isin(test_phone)]
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=True)
split_info_df = self.test_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_kf_aug"
self.test_df = pd.concat([self.test_df, orig_df], axis=0).reset_index(
drop=True
)
self.test_df, test_list = make_sampling_list(
df=self.test_df,
input_width=conf["input_width"],
sampling_delta=conf["test_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=True,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
self.test_df.to_csv("./test_input.csv", index=False)
test_sequences = get_phone_sequences(
df=self.test_df, targets=conf["stft_targets"], is_test=True
)
self.test_dataset = WaveformDataset(
sampling_list=test_list,
phone_sequences=test_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.test_transform(),
is_test=True,
gt_as_mask=self.conf.gt_as_mask,
)
self.plot_dataset(self.test_dataset)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def train_transform(self):
return self.get_transforms(mode=self.aug_mode)
def val_transform(self):
return self.get_transforms(mode=0)
def test_transform(self):
return self.get_transforms(mode=0)
def get_transforms(self, mode: int = 0) -> albu.Compose:
self.input_size = WaveformDataset.calc_stft_resize(
input_width=self.conf.input_width, n_fft=self.conf.stft_params.n_fft
)
def pad_image(
image: np.ndarray,
input_size: List[int],
constant_values: float = 255.0,
**kwargs,
):
pad_size = (input_size[0] - image.shape[0], input_size[1] - image.shape[1])
if np.any(np.array(pad_size) > 0):
image = np.pad(
image, [[0, pad_size[0]], [0, pad_size[1]], [0, 0]], mode="reflect",
)
# image[:, :, orig_width:] = constant_values
return image
add_pad_img = partial(
pad_image, input_size=self.input_size, constant_values=255.0
)
add_pad_mask = partial(
pad_image, input_size=self.input_size, constant_values=1.0
)
if mode == 0:
transforms = [
albu.Lambda(image=add_pad_img, mask=add_pad_mask, name="padding"),
albu.Normalize(mean=self.img_mean, std=self.img_std),
]
elif mode == 1:
transforms = [
albu.HorizontalFlip(p=0.5),
albu.Lambda(image=add_pad_img, mask=add_pad_mask, name="padding"),
albu.Normalize(mean=self.img_mean, std=self.img_std),
]
else:
raise NotImplementedError
if self.conf.gt_as_mask:
additional_targets = {"target_image": "mask"}
else:
additional_targets = {"target_image": "image"}
composed = albu.Compose(transforms, additional_targets=additional_targets)
return composed
def plot_dataset(
self, dataset, plot_num: int = 3, df: Optional[pd.DataFrame] = None,
) -> None:
inds = np.random.choice(len(dataset), plot_num)
h_, w_ = get_input_size_wo_pad(
n_fft=self.conf.stft_params.n_fft, input_width=self.conf.input_width
)
for i in inds:
plt.figure(figsize=(16, 8))
data = dataset[i]
im = data["image"].numpy().transpose(1, 2, 0)
im = im[:h_, :w_]
# === PLOT ===
nrows = 3
ncols = 3
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
data["phone"],
str(data["millisSinceGpsEpoch"]),
str(data["phone_time"]),
]
)
)
cnum = len(self.conf["stft_targets"])
D_abs, D_cos, D_sin = WaveformDataset.handle_stft_normalize(
img=im,
cnum=cnum,
is_encode=False,
is_db=self.conf["stft_params"]["is_db"],
img_mean=self.img_mean,
img_std=self.img_std,
)
for stft_ind, stft_name in enumerate(self.conf["stft_targets"]):
show_stft(
conf=self.conf,
D_abs=D_abs[..., stft_ind],
D_cos=D_cos[..., stft_ind],
D_sin=D_sin[..., stft_ind],
ax=ax,
stft_ind=stft_ind,
stft_name=stft_name,
)
if data["target_image"].shape[0] != 0:
im = data["target_image"].numpy().transpose(1, 2, 0)
im = im[:h_, :w_]
# === PLOT ===
nrows = 3
ncols = 3
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
data["phone"],
str(data["millisSinceGpsEpoch"]),
str(data["phone_time"]),
]
)
)
cnum = len(self.conf["stft_targets"])
D_abs, D_cos, D_sin = WaveformDataset.handle_stft_normalize(
img=im,
cnum=cnum,
is_encode=False,
is_db=self.conf["stft_params"]["is_db"],
img_mean=self.img_mean,
img_std=self.img_std,
gt_as_mask=self.conf.gt_as_mask,
)
for stft_ind, stft_name in enumerate(self.conf["stft_targets"]):
show_stft(
conf=self.conf,
D_abs=D_abs[..., stft_ind],
D_cos=D_cos[..., stft_ind],
D_sin=D_sin[..., stft_ind],
ax=ax,
stft_ind=stft_ind,
stft_name=stft_name.replace("_diff", "_gt_diff"),
)
def get_input_size_wo_pad(n_fft: int = 256, input_width: int = 128) -> Tuple[int, int]:
input_height = n_fft // 2 + 1
input_width = input_width + 1
return input_height, input_width
def show_stft(
conf: DictConfig,
D_abs: np.ndarray,
D_cos: np.ndarray,
D_sin: np.ndarray,
ax: plt.axes,
stft_ind: int,
stft_name: str = None,
) -> None:
for nrow, mat in enumerate([D_abs, D_cos, D_sin]):
img = librosa.display.specshow(
mat,
sr=1,
hop_length=conf["stft_params"]["hop_length"],
x_axis="time",
y_axis="hz",
cmap="cool",
ax=ax[nrow][stft_ind],
)
plt.colorbar(img, ax=ax[nrow][stft_ind])
ax[0][stft_ind].set_title(stft_name)
def choose_paths(df: pd.DataFrame, target: str = "short") -> pd.DataFrame:
if target is not None:
return df.loc[df["length"].apply(lambda x: x.split("-")[0]) == target, :]
else:
return df
def make_split(df: pd.DataFrame, n_splits: int = 3) -> pd.DataFrame:
df["fold"] = -1
df["groups"] = df["location"].apply(lambda x: x.split("-")[0])
df["groups"] = df["groups"] + "_" + df["length"]
# gkf = GroupKFold(n_splits=n_splits)
gkf = StratifiedKFold(n_splits=n_splits)
for i, (train_idx, valid_idx) in enumerate(gkf.split(df, df["groups"])):
df.loc[valid_idx, "fold"] = i
return df
def merge_split_info(data_df: pd.DataFrame, split_df: pd.DataFrame) -> pd.DataFrame:
split_col = ["collectionName", "location", "length", "fold"]
df = pd.merge(data_df, split_df.loc[:, split_col], on="collectionName")
return df
def interpolate_vel(
velocity: np.ndarray,
base_time: np.ndarray,
ref_time: np.ndarray,
drop_first_vel: bool = True,
) -> np.ndarray:
if velocity.ndim == 1:
raise NotImplementedError
if ref_time.max() > base_time.max():
assert ref_time.max() - base_time.max() <= 1000
base_time = np.pad(
base_time, [0, 1], mode="constant", constant_values=base_time.max() + 1000
)
velocity = np.pad(velocity, [[0, 1], [0, 0]], mode="edge")
if drop_first_vel:
assert np.all(velocity[0] == np.nan) or np.all(velocity[0] == 0.0)
velocity = velocity[
1:,
]
# (sequence, feats)
rel_posi = np.cumsum(velocity, axis=0)
rel_posi = np.pad(rel_posi, [[1, 0], [0, 0]], mode="constant", constant_values=0.0)
rel_posi_ref = scipy.interpolate.interp1d(base_time, rel_posi, axis=0)(ref_time)
vel_ref = np.diff(rel_posi_ref, axis=0)
if drop_first_vel:
vel_ref = np.pad(
vel_ref, [[1, 0], [0, 0]], mode="constant", constant_values=np.nan
)
return vel_ref
def make_sampling_list(
df: pd.DataFrame,
input_width: int = 256,
sampling_delta: int = 1,
remove_starts: bool = True,
remove_ends: bool = False,
stft_targets: List[str] = ["latDeg_diff_prev", "lngDeg_diff_prev"],
is_test: bool = False,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
sampling_list = []
initial_time_offset = 0
dfs = []
if isinstance(stft_targets, ListConfig):
stft_targets = OmegaConf.to_container(stft_targets)
if not is_test:
gt_targets = [target.replace("_diff", "_gt_diff") for target in stft_targets]
stft_targets = stft_targets + gt_targets
if remove_starts:
# for remove initial
initial_time_offset += 1
for phone, df_ in df.groupby("phone"):
# length includes the min value, so we need "+1"
second_length = (
np.ceil(
(df_["millisSinceGpsEpoch"].max() - df_["millisSinceGpsEpoch"].min())
/ 1000,
).astype(np.int64)
+ 1
)
inter_gps_epochs = (
np.arange(0, second_length, dtype=np.int64,) * 1000
+ df_["millisSinceGpsEpoch"].min()
)
assert inter_gps_epochs[-1] // 1000 >= df_["millisSinceGpsEpoch"].max() // 1000
inter_targets = interpolate_vel(
velocity=df_.loc[:, stft_targets].fillna(0.0).values,
base_time=df_.loc[:, "millisSinceGpsEpoch"].values,
ref_time=inter_gps_epochs,
drop_first_vel=True,
)
for_df = {
"phone": np.repeat(phone, inter_gps_epochs.shape[0]),
"millisSinceGpsEpoch": inter_gps_epochs,
}
for_df.update({key: inter_targets[:, i] for i, key in enumerate(stft_targets)})
inter_df = pd.DataFrame(for_df)
end_point = (
second_length - input_width * 2
if remove_ends
else second_length - input_width
)
samplings = np.linspace(
initial_time_offset,
end_point,
np.ceil((end_point - initial_time_offset) / sampling_delta + 1).astype(
np.int64
),
dtype=np.int64,
endpoint=True,
)
inter_df["phone_time"] = inter_df.reset_index().index.values
assert inter_df.iloc[samplings[-1] :].shape[0] == input_width
sampling_list.append(
inter_df.loc[:, ["phone", "millisSinceGpsEpoch", "phone_time"]].iloc[
samplings
]
)
if inter_gps_epochs[-1] > df_["millisSinceGpsEpoch"].max():
pass
else:
if np.any(np.diff(df_["millisSinceGpsEpoch"]) != 1000):
if np.all(np.diff(df_["millisSinceGpsEpoch"]) % 1000 == 0):
assert np.all(
inter_df.loc[:, stft_targets].values[-1]
== df_.loc[:, stft_targets].values[-1]
)
dfs.append(inter_df)
sampling_list = pd.concat(sampling_list)
df =
|
pd.concat(dfs)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 21:25:44 2021
@author: laukkara
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.cluster import DBSCAN
import scipy.stats as ss
replacements = {'USP': 'PRP',
'UST': 'PRT',
'BSWE': 'BSW',
'PICB': 'EHR',
'TRC': 'BSR',
'SW': 'PVP',
'USH': 'USH',
'YP1': 'YP',
'YP3': 'YP'}
def get_new_names(old_names):
new_names = []
for old_name in old_names:
items_list = old_name.split('_')
first = items_list[0]
if 'north' in first:
first = first[0:-5]
elif 'south' in first:
first = first[0:-5]
else:
print(first)
str_dummy = '{}, {}, {}'.format(replacements[first],
int(items_list[-2].replace('df','')) + 1,
items_list[-1])
new_names.append(str_dummy)
return(new_names)
#############################################
def create_X(data, case_filters, y_yes_filters, y_not_filters):
# This function filters spesific case+mp pairs
list_values = []
list_names = []
for idx_case, case in enumerate(data):
# loop through cases
# Check that location, climate and year columns are identical
if idx_case == 0:
ids1 = data[case].loc[:, ['location', 'climate', 'year']].copy()
else:
ids2 = data[case].loc[:, ['location', 'climate', 'year']].copy()
if not ids1.equals(ids2):
print('NOT EQUAL:', case)
for idx_column, column in enumerate(data[case].columns):
# loop through columns
cond_case_names = all(x in case for x in case_filters)
cond_yes_column_names = all(x in column for x in y_yes_filters)
cond_not_column_names = all(x not in column for x in y_not_filters)
cond_all = cond_case_names and cond_yes_column_names and cond_not_column_names
if cond_all:
column_str = '{}__{}'.format(case, column)
list_values.append(data[case].loc[:, column])
list_names.append(column_str)
df_X = pd.concat(list_values, axis=1, keys=list_names)
df_X =
|
pd.concat([df_X, ids1], axis=1)
|
pandas.concat
|
#! /usr/bin/env python
#
import os
import warnings
from glob import glob
import pandas
import numpy as np
import sncosmo
from . import base
_INDIR = "saltparam"
def read_saltresult_directory(directory):
files = glob(os.path.join(directory,"*.pkl"))
target_data = {}
for f_ in files:
name = os.path.basename(f_).split(".")[0]
try:
target_data[name] = pandas.read_pickle(f_)
except:
warnings.warn(f"ERROR loading {f_}")
return
|
pandas.DataFrame(target_data)
|
pandas.DataFrame
|
from datetime import datetime, timedelta
import os
import pandas as pd
THESHOLDS = [timedelta(seconds=120), timedelta(hours=3), timedelta(hours=7), timedelta(hours=24), timedelta(days=2), timedelta(
days=4), timedelta(days=8), timedelta(days=16), timedelta(days=28), timedelta(days=90), timedelta(days=180)]
class Card:
def __init__(self, id, question, answer, num=0, due_date=datetime.now(), active=False):
self.id = id
self.question = question
self.answer = answer
self.num = num
self.due_date = due_date
self.active = active
# self.no_incorrect = 0
# self.no_of_tries = 0
def increment(self):
# self.no_of_tries += 1
if self.num < len(THESHOLDS):
self.num = self.num + 1
else:
self.num = len(THESHOLDS)
def decrement(self):
# self.no_of_tries += 1
# punish if wrong after 28 days
if self.num >= 8:
self.num -= 6
elif self.num >= 0:
self.num = self.num - 1
# self.no_incorrect += 1
else:
self.num = 0
# self.no_incorrect += 1
def update_due_date(self):
try:
self.due_date = datetime.now() + THESHOLDS[self.num]
except Exception as ex:
self.due_date = datetime.now() + THESHOLDS[self.num-1]
def toggle_active(self):
self.active = not self.active
def reset_date(self, seconds=600):
self.due_data = datetime.now()+timedelta(seconds=seconds)
def __repr__(self):
return "{0} {1} {2} {3} {4}".format(self.id, self.question, self.num, self.active, self.due_date)
class Deck():
"""
Deck will be a file , with all the cards.
"""
def __init__(self, fname = "words.csv"):
self.fname = fname
self.cards = None
self.nextid = 0
self._get_all_cards()
def _get_words(self):
nextid = 0
if os.path.exists(self.fname):
df = pd.read_csv(self.fname, infer_datetime_format=True,
parse_dates=["due_date"], index_col=0)
df = df.sort_values(by="due_date", ascending=False)
wordlists = [Card(index, row.question, row.answer, num=row.num,
due_date=row.due_date, active=row.active) for index, row in df.iterrows()]
self._get_nextid(df)
else:
wordlists = []
return wordlists
def _get_nextid(self, df):
df.sort_values(by="id", inplace=True)
self.nextid = df.index[-1]+1
def _get_all_cards(self):
if self.cards is None:
self.cards = self._get_words()
def is_time_to_add_words(self):
next_due_date = self.get_next_review_day()
seconds_to_next_review = next_due_date-datetime.now()
# check if the time has past 5 hours
return seconds_to_next_review.seconds >= 60*60*5
def check_next_active(self):
num=2 # add 5 words at a time
if not self.is_time_to_add_words():
return
selected_word = self.get_inactive_cards()
if len(selected_word) > num:
selected_word = selected_word[:num]
for word in selected_word:
word.toggle_active()
word.due_data = datetime.now()+timedelta(seconds=600)
self.save_words(selected_word)
def save(self):
if self.cards:
df = pd.DataFrame(data=[(word.id, word.question, word.answer, word.due_date, word.num, word.active)
for word in self.cards], columns=["id","question", "answer", "due_date", "num", "active"])
df.sort_values(by="id", inplace=True)
df.to_csv(self.fname, index=False)
self._get_nextid(df)
def save_words(self, wordslist):
for word in wordslist:
for aword in self.cards:
if aword.id == word.id:
aword.num = word.num
aword.update_due_date()
self.save()
def get_due_cards(self):
self._get_all_cards()
now = datetime.now()
selected_word = [
word for word in self.cards if word.due_date < now and word.active]
if len(selected_word) < 5: # if less than five,check next update
self.check_next_active()
return selected_word
def get_inactive_cards(self):
self._get_all_cards()
selected_word = [word for word in self.cards if not word.active]
return selected_word
def get_active_cards(self):
self._get_all_cards()
selected_word = [word for word in self.cards if word.active]
return selected_word
def reload_cards(self):
self.cards = self._get_words()
def __repr__(self):
return "{} deck has {} cards".format(self.fname, len(self.cards))
def _add_card(self, question, answer, active=False):
card = Card(id = self.nextid, question=question, answer=answer, active=active)
self.nextid = self.nextid + 1
self.cards.append(card)
def add_card(self, question, answer, active=False, save=True):
self._get_all_cards()
self._add_card(question, answer,active)
if save:
self.save()
def get_next_review_day(self):
df =
|
pd.read_csv(self.fname, infer_datetime_format=True, parse_dates=["due_date"], index_col=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from copy import deepcopy
from collections import Counter
from sklearn.metrics import calinski_harabasz_score
from sklearn.cluster import (
KMeans,
AgglomerativeClustering,
MiniBatchKMeans
)
from minisom import MiniSom
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import (
StratifiedKFold,
StratifiedShuffleSplit
)
from imblearn.under_sampling.base import BaseCleaningSampler
from .utils import get_2Dcoordinates_matrix
from sklearn.ensemble import IsolationForest
################################################################################
# iForest
################################################################################
class PerClassiForest(BaseCleaningSampler):
def __init__(self,
n_estimators=100,
max_samples='auto',
contamination=0.1,
max_features=1.0,
bootstrap=False,
n_jobs=None,
behaviour='new',
random_state=None,
verbose=0,
warm_start=False
):
self.n_estimators = n_estimators
self.max_samples = max_samples
self.contamination = contamination
self.max_features = max_features
self.bootstrap = bootstrap
self.n_jobs = n_jobs
self.behaviour = behaviour
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.iForest_ = IsolationForest(
n_estimators = self.n_estimators,
max_samples = self.max_samples,
contamination = self.contamination,
max_features = self.max_features,
bootstrap = self.bootstrap,
n_jobs = self.n_jobs,
behaviour = self.behaviour,
random_state = self.random_state,
verbose = self.verbose,
warm_start = self.warm_start
)
def fit(self, X, y):
self.iforests = {}
#outcome = np.zeros(X.shape[0])
for label in np.unique(y):
iforest = deepcopy(self.iForest_)
#outcome[y==label] = iforest.fit_predict(X[y==label])
self.iforests[label] = iforest.fit(X[y==label])
return self
def resample(self, X, y):
outcome = np.zeros(X.shape[0])
for label in np.unique(y):
outcome[y==label] = self.iforests[label].predict(X[y==label])
return X[outcome==1], y[outcome==1]
def _fit_resample(self, X, y):
self.iforests = {}
outcome = np.zeros(X.shape[0])
for label in np.unique(y):
iforest = deepcopy(self.iForest_)
outcome[y==label] = iforest.fit_predict(X[y==label])
self.iforests[label] = iforest.fit(X[y==label])
return X[outcome==1], y[outcome==1]
def fit_resample(self, X, y):
return self._fit_resample(X, y)
################################################################################
# Paris new
################################################################################
class ParisDataFiltering(BaseCleaningSampler):
def __init__(self, k_max=6, random_state=None):
self.k_max = k_max
self.random_state = random_state
def fit(self, X, y, ids=None):
return self
def resample(self, X, y, ids=None):
if ids is None:
ids=y
status = np.zeros(y.shape)*np.nan
cluster = np.zeros(y.shape)*np.nan
for pol_id in np.unique(ids):
_labels = _find_optimal_k_and_cluster(X=X[ids==pol_id], k_max=self.k_max, random_state=self.random_state)
cluster[ids==pol_id] = _labels
status[ids==pol_id] = get_dominant_pixels(_labels)
final_status = np.zeros(y.shape).astype(bool)
for label in np.unique(y):
_final_status = final_status[y==label]
_clu = cluster[y==label][status[y==label].astype(bool)]
_ids = ids[y==label][status[y==label].astype(bool)]
_ban = X[y==label][status[y==label].astype(bool)]
unique_ids = np.unique(_ids)
b_dist = np.zeros(unique_ids.shape)*np.nan
for i, polygon_cluster_id in enumerate(unique_ids):
b = _ban[_ids==polygon_cluster_id]
b_dist[i] = Bhattacharyya(_ban, b)
ranks = b_dist.argsort().argsort()
accepted = unique_ids[ranks<int(np.ceil(ranks.shape[0]*.65))]
_final_status[status[y==label].astype(bool)] = np.isin(_ids, accepted)
final_status[y==label] = _final_status
return X[final_status]
def _fit_resample(self, X, y, ids=None):
return self.transform(X, y, ids)
def fit_resample(self, X, y, ids=None):
return self.resample(X, y, ids)
def _find_optimal_k_and_cluster(X, k_max=12, random_state=None):
label_list = []
CH_score = []
for k in range(2,k_max+1):
if X.shape[0] > k:
labels = KMeans(n_clusters=k, n_init=10, max_iter=300, random_state=random_state, n_jobs=None).fit_predict(X)
score = calinski_harabasz_score(X, labels)
label_list.append(labels)
CH_score.append(score)
return label_list[np.argmax(CH_score)]
def get_dominant_pixels(labels):
return labels==Counter(labels).most_common(1)[0][0]
def Bhattacharyya(a, b):
a_mean = np.expand_dims(a.mean(axis=0), 1)
a_cov = np.cov(a.T)
b_mean = np.expand_dims(b.mean(axis=0), 1)
b_cov = np.cov(b.T)
sigma = (a_cov + b_cov)/2
sigma_inv = np.linalg.inv(sigma)
term_1 = (1/8)*np.dot(np.dot((a_mean-b_mean).T,sigma_inv),(a_mean-b_mean))
#term_2 = (1/2)*np.log(np.linalg.det(sigma)/np.sqrt(np.linalg.det(a_cov)*np.linalg.det(b_cov)))
#return float(np.squeeze(term_1+term_2))
return term_1
################################################################################
# Filter based methods
################################################################################
class MBKMeansFilter(BaseCleaningSampler):
"""My own method"""
def __init__(self, n_splits=5, granularity=5, method='obs_percent', threshold=0.5, random_state=None):
assert method in ['obs_percent', 'mislabel_rate'], 'method must be either \'obs_percent\', \'mislabel_rate\''
super().__init__(sampling_strategy='all')
self.n_splits = n_splits
self.granularity = granularity
self.method = method
self.threshold = threshold
self.random_state = random_state
def _fit_resample(self, X, y, filters):
#assert X.shape[0]==y.shape[0], 'X and y must have the same length.'
## cluster data
#print('n_splits:', self.n_splits, ', granularity:', self.granularity, ', method:', self.method, ', threshold:', self.threshold, ', random_state:', self.random_state)
self.filters = deepcopy(filters)
index = np.arange(len(y))
clusters_list = []
index_list = []
self.kmeans = {}
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters, kmeans = self._KMeans_clustering(X_label)
self.kmeans[analysis_label] = kmeans
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
self.filter_list = {}
filter_outputs = {}
for n, (_, split) in enumerate(self.stratifiedkfold.split(X, y_)):
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[split], y_[split])
filter_outputs[f'filter_{n}_{name}'] = classifier.predict(X)
self.filter_list[f'{n}_{name}'] = classifier
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col =
|
pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
|
pandas.Series
|
from photutils import SkyCircularAperture,aperture_photometry
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
import pandas as pd
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from astropy.stats import SigmaClip
from photutils import Background2D, MedianBackground
from astropy.io import fits
class ImageData():
def __init__(self,objectname=None,image_file=None,se_file=None):
self.objectname = objectname
self.image_file = image_file
self.se_file = se_file
def photometry(self,target,references,method='sextracter',**kwargs):
if method.startswith('se'):
p = Sextracter(self.se_file)
elif method.startswith('ap'):
p = AperturePhotometry(self.image_file)
res = p.run(target,references,**kwargs)
self.result = res
def get_image_info(self):
hdul = fits.open(self.image_file)
image_info = hdul[1].header
self.info = image_info
def to_file(self,fname,mwebv_corr=0.):
self.get_image_info()
mjd = self.info['MJD-OBS']
flt = self.info['FILTER'][0]
self.result.update({'filter':flt,'mjd':mjd,'name':self.objectname})
self.result['mag'] = self.result['mag'] - mwebv_corr
df =
|
pd.DataFrame(self.result,index=[0])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from operator import itemgetter
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from util import keystrokes2events
N_CLUSTERS = 10
def stratified_kfold(df, n_folds):
"""
Create stratified k-folds from an indexed dataframe
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def user_folds(df, target):
users = df.index.get_level_values(0).unique()
return [df.loc[u].reset_index().set_index([target, 'session']) for u in users]
class RandomForest():
def __init__(self, keystroke_feature_fn):
self.keystroke_feature_fn = keystroke_feature_fn
self.keystroke_model = RandomForestClassifier(n_estimators=100)
def fit(self, samples, labels):
assert len(samples) == len(labels)
features = []
for sample in samples:
features.append(self.keystroke_feature_fn(sample))
features = pd.concat(features).values
self.keystroke_model.fit(features, labels)
return self
def predict(self, sample):
features = self.keystroke_feature_fn(sample)
probas = self.keystroke_model.predict_proba(features)
scores = dict(zip(self.keystroke_model.classes_, probas.squeeze()))
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
def classification_acc(df, target, n_folds):
if target == 'user':
folds = stratified_kfold(df, n_folds)
else:
folds = user_folds(df, target)
predictions = []
for i in range(n_folds):
print('Fold %d of %d' % (i + 1, n_folds))
test, train = folds[i], pd.concat(folds[:i] + folds[i + 1:])
test_labels = test.index.get_level_values(0).values
train_labels = train.index.get_level_values(0).values
test_features = pd.concat(test['features'].values).values
train_features = pd.concat(train['features'].values).values
cl = RandomForestClassifier(n_estimators=200)
cl.fit(train_features, train_labels)
results = cl.predict(test_features)
predictions.extend(zip([i] * len(test_labels), test_labels, results))
predictions = pd.DataFrame(predictions, columns=['fold', 'label', 'prediction'])
summary = predictions.groupby('fold').apply(lambda x: (x['label'] == x['prediction']).sum() / len(x)).describe()
print('Results')
print(summary)
return summary['mean']
def SMAPE(ground_truth, predictions):
return np.abs((ground_truth - predictions) / (ground_truth + predictions))
def predictions_smape2(df):
def process_sample(x):
x = keystrokes2events(x)
tau = x['time'].diff()
predictions = pd.expanding_mean(tau).shift()
return SMAPE(tau, predictions).dropna().mean()
return df.groupby(level=[0, 1]).apply(process_sample).mean()
def predictions_smape(df):
def pp_smape(x):
tau = x['timepress'].diff()
predictions =
|
pd.expanding_mean(tau)
|
pandas.expanding_mean
|
from sklearn.metrics import f1_score,recall_score,precision_score,confusion_matrix,accuracy_score
from pylab import *
import torch
import torch.nn as nn
import copy
import random
import pandas as pd
import numpy as np
from tqdm import trange
import pickle
import json
import sys
import time
import shap
from sklearn.model_selection import train_test_split
sys.path.append("classes")
sys.path.append("/home/matilda/PycharmProjects/log_level_estimation/TorchLRP")
from loss_functions import NuLogsyLossCompute
from model import *
from networks import *
from tokenizer import *
from data_loader import *
from prototype import get_prototypes
from collections import defaultdict
import torch.nn.functional as F
import pickle
import spacy
from collections import defaultdict
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
class Baseline(nn.Module):
def __init__(self, n_dimension, n_targets, max_size, d_model):
super(Baseline, self).__init__()
self.layer0 = nn.ModuleList([nn.Linear(d_model, d_model) for i in range(max_size)])
self.l1 = nn.Linear(n_dimension, n_dimension)
self.l2 = nn.Linear(n_dimension, n_dimension)
self.l3 = nn.Linear(n_dimension, n_targets)
self.max_size = max_size
self.activation = torch.tanh
def forward(self, input):
input = input.reshape(-1, 50, 16)
out = []
for idx in range(self.max_size):
out.append(self.layer0[idx](input[:, idx, :]))
input = torch.cat(out, dim=1)
input = self.activation(self.l1(input))
input = self.activation(self.l2(input))
input = self.l3(input)
return input
def run_train_baseline(dataloader, model, optimizer, f_loss, epoch, device="cpu"):
model.train()
total_loss = 0
start = time.time()
for i, batch in enumerate(dataloader):
load, y = batch
# print("device")
if device == "cuda":
out = model.forward(load.cuda())
else:
out = model.forward(load)
if device == "cuda":
loss = f_loss(out, y.cuda().long())
else:
loss = f_loss(out, y.long())
loss.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss
elapsed = time.time() - start
if i % 5 == 0:
print("Epoch %d Train Step: %d / %d Loss: %f" % (epoch, i, len(dataloader), loss), end='\r')
print("Epoch %d Train Step: %d / %d Loss: %f" % (epoch, i, len(dataloader), loss), end='\r')
return total_loss / len(dataloader)
def run_test_baseline(dataloader, model, optimizer, f_loss, epoch, device="cpu"):
model.eval()
preds = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
load, y = batch
if device=="cuda":
out = model.forward(load.cuda())
else:
out = model.forward(load)
if device=="cuda":
tmp = out.detach().cpu().numpy()
else:
tmp = out.detach().cpu().numpy()
preds += list(np.argmax(tmp, axis=1))
return preds
def run_optimizer_baseline(model, train_dataloader, test_dataloader_good_repos, test_dataloader_bad_repos, load_test_good_repos_labels, load_test_bad_repos_labels, optimizer, n_epochs,cross_entropoy_loss,class_weights, device):
conf_matrix_good = []
conf_matrix_bad = []
preds = []
best_f1_score = 0
best_conf_matrix = []
best_model = []
best_preds = []
for epoch in range(1, 1 + n_epochs):
loss = run_train_baseline(train_dataloader, model, optimizer, cross_entropoy_loss, epoch, device=device)
print("Epoch %d Train Loss: %f" % (epoch, loss), " " * 30)
start_time = time.time()
print("----------GOOD REPOS----------")
preds1 = run_test_baseline(test_dataloader_good_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)
print(f"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"confusion matrix: ", confusion_matrix(preds1, load_test_good_repos_labels))
conf_matrix_good.append(confusion_matrix(preds1, load_test_good_repos_labels))
calc_f1_score = f1_score(preds1, load_test_good_repos_labels, average='binary')
if best_f1_score < calc_f1_score:
best_f1_score = calc_f1_score
best_conf_matrix = confusion_matrix(preds1, load_test_good_repos_labels)
best_model = model
best_preds = preds1
# print("----------BAD REPOS----------")
#
# preds = run_test_baseline(test_dataloader_bad_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)
# print(f"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}")
# print(f"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
# print(f"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
# print(f"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
#
# conf_matrix_bad.append(confusion_matrix(preds, load_test_bad_repos_labels))
return best_model, best_preds, best_f1_score, best_conf_matrix
def extract_load(df):
print("Split descriptive and target data into numpay arrays.")
load = df['log_message'].values
labels = df['log_level'].values
return load, labels
def tokenization_dataset(df, load, labels, label_mapper):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(np.array(tokenizer.tokenize(df['log_message'][i])))
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def word2_vec_representation(df, load, labels, label_mapper, nlp):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(nlp(df['log_message'][i]).vector)
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def convert_normal_anomaly(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'normal'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'anomaly'
def convert_error_info(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'log'
elif x == 'warning':
return 'log'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def convert_error_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'debug'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def convert_info_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'log'
def convert_error_info_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def read_data(path):
print("Reading data at path ", path)
return pd.read_csv(path).drop(columns=["Unnamed: 0"])
def preprocess_data(df, scenario, verbose=True):
if verbose:
print("Filtering the special characters in the dataframe!")
df['log_message'] = df['log_message'].str.replace("\<\*\>", " ")
df['log_message'] = df['log_message'].str.replace("\[STR\]", " ")
df['log_message'] = df['log_message'].str.replace("\[NUM\]", " ")
if verbose:
print("Converting the classes into required categories. Pair or triplet of (INFO, ERROR, WARNING). ")
if scenario=="error_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_warning(x))
elif scenario == "info_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_info_warning(x))
elif scenario == "info_error":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_info(x))
elif scenario=="info_error_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_info_warning(x))
else:
print("Insert a valid scenario, one in error_warning, info_warning, info_error")
exit(-1)
if verbose:
print("Prior removing (DEBUG, LOG and TRACE) ", df.shape)
df = df[df['log_level'] != 'debug']
df = df[df['log_level'] != 'log']
df = df[df['log_level'] != 'trace']
if verbose:
print("Size after removal ", df.shape)
indecies_to_preserve = df.index
df = df.reset_index()
df = df.drop("index", axis=1)
return df, indecies_to_preserve
def extract_load(df):
print("Split descriptive and target data into numpay arrays.")
load = df['log_message'].values
labels = df['log_level'].values
return load, labels
def tokenization_dataset(df, load, labels, label_mapper):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(np.array(tokenizer.tokenize(df['log_message'][i])))
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def run_train(dataloader, model, optimizer, f_loss, epoch, polars=None, device="cpu"):
model.train()
total_loss = 0
start = time.time()
for i, batch in enumerate(dataloader):
load, y = batch
if polars is not None:
y = polars[y.numpy()]
y = torch.autograd.Variable(y).cuda()
if device == "gpu":
out = model.forward(load.cuda().long())
else:
out = model.forward(load.long())
if isinstance(f_loss, nn.CosineSimilarity):
loss = (1 - f_loss(out, y)).pow(2).sum()
else:
if device=="gpu":
loss = f_loss(out, y.cuda().long())
else:
loss = f_loss(out, y.long())
loss.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss
elapsed = time.time() - start
if i % 5 == 0:
print("Epoch %d Train Step: %d / %d Loss: %f" %
(epoch, i, len(dataloader), loss), end='\r')
print("Epoch %d Train Step: %d / %d Loss: %f" %
(epoch, i, len(dataloader), loss), end='\r')
return total_loss / len(dataloader)
def run_test(dataloader, model, optimizer, f_loss, epoch, polars=None, device="cpu"):
model.eval()
preds = []
tmps = []
scores_head1 = []
scores_head2 = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
load, y = batch
if device=="gpu":
out = model.forward(load.cuda().long())
else:
out = model.forward(load.long())
if isinstance(f_loss, nn.CosineSimilarity):
x = F.normalize(out, p=2, dim=1)
x = torch.mm(x, polars.t().cuda())
pred = x.max(1, keepdim=True)[1].reshape(1, -1)[0]
preds += list(pred.detach().cpu().numpy())
else:
tmp = out.detach().cpu().numpy()
preds += list(np.argmax(tmp, axis=1))
tmps += list(tmp)
scores_head1 += model.encoder.layers[0].self_attn.attn[:, 0, :, :].detach().cpu()
scores_head2 += model.encoder.layers[0].self_attn.attn[:, 1, :, :].detach().cpu()
return preds, scores_head1, scores_head2
def run_optimizer(model, train_dataloader, test_dataloader, test_dataloader_bad_repos, labels_test, labels_test_bad_repos, optimizer, n_epochs, f_loss, polars, class_weights, device):
conf_matrix_good = []
conf_matrix_bad = []
best_f1_good = 0
best_f1_bad = 0
idx_good = 0
idx_bad = 0
best_model = 0
best_preds = 0
for epoch in range(1, 1 + n_epochs):
print("Epoch", epoch)
loss = run_train(train_dataloader, model, optimizer, f_loss, epoch, polars, device)
print("Epoch %d Train Loss: %f" % (epoch, loss), " " * 30)
start_time = time.time()
print("----------GOOD REPOS----------")
preds1, scores11, scores12 = run_test(test_dataloader, model, optimizer, f_loss, epoch, polars, device)
print(f"Accuracy:{round(accuracy_score(preds1, labels_test), 2)}")
print(f"f1_score:{round(f1_score(preds1, labels_test, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, labels_test, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, labels_test, average='binary'), 2)}")
conf_matrix_good.append(confusion_matrix(preds1, labels_test))
pp = confusion_matrix(preds1, labels_test)
print(pp)
if pp.shape[0]<3:
if best_f1_good < f1_score(preds1, labels_test, average='binary') and pp[0][0] >0 and pp[1][1] > 0:
best_f1_good = f1_score(preds1, labels_test, average='binary')
idx_good = epoch-1
best_model = model
# torch.save(model,
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + ".pth")
# with open(
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + "_label_mapper.pickle",
# "wb") as file:
# pickle.dump(label_mapper, file)
else:
if best_f1_good < f1_score(preds1, labels_test, average='binary') and pp[0][0] >0 and pp[1][1] > 0 and pp[2][2]:
best_f1_good = f1_score(preds1, labels_test, average='binary')
idx_good = epoch-1
best_model = model
# torch.save(model,
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + ".pth")
# with open(
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + "_label_mapper.pickle",
# "wb") as file:
# pickle.dump(label_mapper, file)
print("----------BAD REPOS----------")
preds, scores21, scores22 = run_test(test_dataloader_bad_repos, model, optimizer, f_loss, epoch, polars, device)
print(f"Accuracy:{round(accuracy_score(preds, labels_test_bad_repos), 2)}")
print(f"f1_score:{round(f1_score(preds, labels_test_bad_repos, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds, labels_test_bad_repos, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds, labels_test_bad_repos, average='binary'), 2)}")
conf_matrix_bad.append(confusion_matrix(preds, labels_test_bad_repos))
pp = confusion_matrix(preds, labels_test_bad_repos)
if pp.shape[0] < 3:
if best_f1_bad < f1_score(preds, labels_test_bad_repos, average='binary') and pp[0][0] > 0 and pp[1][1] > 0:
best_f1_bad = f1_score(preds, labels_test_bad_repos, average='binary')
idx_bad = epoch - 1
else:
if best_f1_bad < f1_score(preds, labels_test_bad_repos, average='binary') and pp[0][0] > 0 and pp[1][1] > 0 and pp[2][2]:
best_f1_bad = f1_score(preds, labels_test_bad_repos, average='binary')
idx_bad = epoch - 1
return best_model, preds1, preds, conf_matrix_good, conf_matrix_bad, scores11, scores12, scores21, scores22, best_f1_good, best_f1_bad, idx_good, idx_bad
def top_ranked_repos(repositories, star_repos, number_repos_good, number_bad_repos, number_validation_repos, good_bad_hypo):
repositories= repositories.drop('index', axis=1)
repositories = repositories.reset_index()
repositories.columns = ["id", "repo_link"]
if good_bad_hypo:
top_repos = star_repos.iloc[:number_repos_good, :].repo_name
bottom_repos = star_repos.iloc[(-1)*number_bad_repos:,:].repo_name # THIS TRAINS ON TOP repositories
else:
top_repos = star_repos.iloc[(-1)*number_repos_good:, :].repo_name.values
bottom_repos = star_repos.iloc[:number_bad_repos,:].repo_name # THIS TRAINS ON BOTTOM repos
grepos = np.arange(number_repos_good).tolist()
validation_repos = set(random.sample(grepos, number_validation_repos))
train_repos = set(grepos).difference(validation_repos)
top_ranked_indecies = []
top_ranked_validation_indecies = []
bottom_ranked_indecies = []
joint = []
for good_repos in top_repos[list(train_repos)]:
top_ranked_indecies.append(repositories[repositories.repo_link==good_repos].id.values)
joint.append(repositories[repositories.repo_link==good_repos].id.values)
for good_repos in top_repos[list(validation_repos)]:
top_ranked_validation_indecies.append(repositories[repositories.repo_link==good_repos].id.values)
joint.append(repositories[repositories.repo_link==good_repos].id.values)
for bad_repos in bottom_repos:
bottom_ranked_indecies.append(repositories[repositories.repo_link==bad_repos].id.values)
joint.append(repositories[repositories.repo_link==bad_repos].id.values)
return np.hstack(top_ranked_indecies), np.hstack(top_ranked_validation_indecies), np.hstack(bottom_ranked_indecies), np.hstack(joint)
def create_data_loaders_baselines(load_train, labels_train, load_test, labels_test, batch_size):
train_data = TensorDataset(torch.tensor(load_train, dtype=torch.float32), torch.tensor(labels_train.astype(np.int32), dtype=torch.int32))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
test_data = TensorDataset(
torch.tensor(load_test, dtype=torch.float32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return train_dataloader, test_dataloader
def evaluate(preds1, load_test_good_repos_labels, preds, load_test_bad_repos_labels, good_bad_hypo):
fin_results = defaultdict(dict)
print("********"*10)
print("----------GOOD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
d = {}
d["Accuracy"] = accuracy_score(preds1, load_test_good_repos_labels)
d['F1_score'] = f1_score(preds1, load_test_good_repos_labels, average='binary')
d["recall_score"] = recall_score(preds1, load_test_good_repos_labels, average='binary')
d["precision_score"] = precision_score(preds1, load_test_good_repos_labels, average='binary')
d["confusion_matrix"] = confusion_matrix(preds1, load_test_good_repos_labels)
if good_bad_hypo == True:
fin_results["good_repos"] = d
else:
fin_results["bad_repos"] = d
print("----------BAD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
conf_matrix_bad.append(confusion_matrix(preds, load_test_bad_repos_labels))
d = {}
d["Accuracy"] = accuracy_score(preds, load_test_bad_repos_labels)
d['F1_score'] = f1_score(preds, load_test_bad_repos_labels, average='binary')
d["recall_score"] = recall_score(preds, load_test_bad_repos_labels, average='binary')
d["precision_score"] = precision_score(preds, load_test_bad_repos_labels, average='binary')
d["confusion_matrix"] = confusion_matrix(preds, load_test_bad_repos_labels)
if good_bad_hypo == True:
fin_results["bad_repos"] = d
else:
fin_results["good_repos"] = d
return fin_results
def create_data_loaders_baselines_test( load_test, labels_test, batch_size):
test_data = TensorDataset(
torch.tensor(load_test, dtype=torch.float32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return test_dataloader
all_results = defaultdict(dict)
all_results_m1 = defaultdict(dict)
all_results_m2 = defaultdict(dict)
all_results_m3 = defaultdict(dict)
#
#
# good_bad_hypo = True
# scenario = "info_error_warning"
# store_path = "../../5_results/models/learning_scenario1/"
# results_name = store_path + scenario + "/10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name, "rb") as file:
# all_results = pickle.load(file)
#
# with open(label_mapper_name, "rb") as file:
# label_mapper_name = pickle.load(file)
#
# store_path = "../../5_results/models/baseline/"
# results_name_m1 = store_path + scenario + "/model1_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m2 = store_path + scenario + "/model2_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m3 = store_path + scenario + "/model3_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name_m1, "rb") as file:
# all_results_m1 = pickle.load(file)
#
# with open(results_name_m2, "rb") as file:
# all_results_m2 = pickle.load(file)
#
# with open(results_name_m3, "rb") as file:
# all_results_m3 = pickle.load(file)
#
# with open(label_mapper_name, "rb") as file:
# label_mapper_name = pickle.load(file)
#
# print(all_results_m3.keys())
for seed in np.arange(1):
print("CURRENTLY PROCESSING SEED {}".format(seed))
PATH = "../../3_preprocessed_data/filtered_log_df_reduced.csv"
PATH_COUNTS = "../../3_preprocessed_data/stars_repos.csv"
learning_rate = 0.0001
decay = 0.001
betas = (0.9, 0.999)
momentum = 0.9
number_repos_good = 700
number_bad_repos = 1
number_validation_repos = 100
batch_size = 2048
pad_len = 50
n_layers=2
in_features=16
out_features=16
num_heads=2
dropout=0.05
max_len=50
n_targets = 2
device = "gpu"
random_seed = seed
torch.manual_seed(random_seed)
np.random.seed(random_seed)
scenario = "info_error" # ONE IN: "info_warning", "info_error", "error_warning", "info_error_warning"
n_epochs = 50
good_bad_hypo = True
df = read_data(PATH)
repositories = df['repo_link']
df, indecies_to_preserve = preprocess_data(df, scenario)
repositories = repositories.loc[indecies_to_preserve]
repositories = repositories.reset_index()
star_repos = pd.read_csv(PATH_COUNTS)
train_good_repos, validation_good_repos, bad_repos, good_bad_repos = top_ranked_repos(repositories, star_repos, number_repos_good, number_bad_repos, number_validation_repos, good_bad_hypo=good_bad_hypo)
df = df.loc[good_bad_repos]
df = df.reset_index()
df1 = copy.copy(df)
df1.columns = ["original_index",'log_message', 'repo_topic', 'repo_link', 'file_name', 'log_level']
df1 = df1.reset_index().iloc[:, :2]
df1.index = df1.original_index
df = df.drop('index', axis=1)
load, labels = extract_load(df)
class_count = df.groupby("log_level").count()['log_message']
label_mapper = {class_count.index[i]:i for i in range(len(class_count))}
tokenized, labels_tokenized, tokenizer = tokenization_dataset(df, load, labels, label_mapper)
assert len(tokenized) == df.shape[0], "Some data samples have been lost during tokenization. Take care of this."
load_train = np.array(tokenized, dtype=object)[df1.loc[train_good_repos].iloc[:, 0].values]
load_train_labels = np.array(labels_tokenized)[df1.loc[train_good_repos].iloc[:, 0].values]
load_test_good_repos = np.array(tokenized, dtype=object)[df1.loc[validation_good_repos].iloc[:, 0].values]
load_test_good_repos_labels = np.array(labels_tokenized)[df1.loc[validation_good_repos].iloc[:, 0].values]
load_test_bad_repos = np.array(tokenized, dtype=object)[df1.loc[bad_repos].iloc[:, 0].values]
load_test_bad_repos_labels = np.array(labels_tokenized)[df1.loc[bad_repos].iloc[:, 0].values]
train_dataloader, test_dataloader_good_repos = create_data_loaders(load_train, load_train_labels, load_test_good_repos, load_test_good_repos_labels, pad_len, batch_size)
test_dataloader_bad_repos = create_test_data_loaders(load_test_bad_repos, load_test_bad_repos_labels, pad_len, batch_size)
if device =="gpu":
torch.cuda.empty_cache()
src_vocab = tokenizer.n_words
calculate_weights = lambda x, i: x.sum() / (len(x)*x[i])
weights = [calculate_weights(class_count,i) for i in range(len(class_count))]
weights /= max(weights)
if device =="gpu":
class_weights=torch.FloatTensor(weights).cuda()
cross_entropoy_loss = nn.CrossEntropyLoss(weight=class_weights).cuda()
else:
class_weights = torch.FloatTensor(weights)
cross_entropoy_loss = nn.CrossEntropyLoss(weight=class_weights)
loss_f = cross_entropoy_loss
model = NuLogsyModel(src_vocab=src_vocab, tgt_vocab=n_targets,
n_layers=n_layers, in_features=in_features,
out_features=out_features,num_heads=num_heads,
dropout=dropout, max_len=max_len).get_model()
if device == "gpu":
torch.cuda.set_device(0)
model.cuda()
sgd_opt = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=decay)
adam_opt = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=betas, weight_decay=decay)
optimizers = {"adam":adam_opt,"sgd":sgd_opt}
optimizer = optimizers['adam']
model, preds1, preds, conf_matrix_good, conf_matrix_bad, scores11, scores12, scores21, scores22, best_f1_good, best_f1_bad, idx_good, idx_bad = run_optimizer(model, train_dataloader, test_dataloader_good_repos, test_dataloader_bad_repos, load_test_good_repos_labels, load_test_bad_repos_labels, optimizer, n_epochs,cross_entropoy_loss,polars=None,class_weights=weights, device=device)
fin_results = defaultdict(dict)
print("*******"*10)
print("----------GOOD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
d = {}
d["Accuracy"] = accuracy_score(preds1, load_test_good_repos_labels)
d['F1_score_last'] = f1_score(preds1, load_test_good_repos_labels, average='binary')
d["recall_score"] = recall_score(preds1, load_test_good_repos_labels, average='binary')
d["precision_score"] = precision_score(preds1, load_test_good_repos_labels, average='binary')
d["confusion_matrix"] = conf_matrix_good
d["F1_best"] = best_f1_good
d["F1_epoch_good"] = idx_good
if good_bad_hypo==True:
fin_results["good_repos"] = d
else:
fin_results["bad_repos"] = d
print("----------BAD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
d = {}
d["Accuracy"] = accuracy_score(preds, load_test_bad_repos_labels)
d['F1_score'] = f1_score(preds, load_test_bad_repos_labels, average='binary')
d["recall_score"] = recall_score(preds, load_test_bad_repos_labels, average='binary')
d["precision_score"] = precision_score(preds, load_test_bad_repos_labels, average='binary')
d["confusion_matrix"] = conf_matrix_bad
d["F1_best"] = best_f1_bad
d["F1_epoch_best"] = idx_bad
if good_bad_hypo==True:
fin_results["bad_repos"] = d
else:
fin_results["good_repos"] = d
all_results[seed] = fin_results
# nlp = spacy.load("en_core_web_sm")
#
# tokenized, labels_tokenized, tokenizer = word2_vec_representation(df, load, labels, label_mapper, nlp)
#
# assert len(tokenized) == df.shape[0], "Some data samples have been lost during tokenization. Take care of this."
#
# load_train = np.array(tokenized, dtype=np.float32)[df1.loc[train_good_repos].iloc[:, 0].values]
# load_train_labels = np.array(labels_tokenized)[df1.loc[train_good_repos].iloc[:, 0].values]
#
# load_test_good_repos = np.array(tokenized, dtype=np.float32)[df1.loc[validation_good_repos].iloc[:, 0].values]
# load_test_good_repos_labels = np.array(labels_tokenized)[df1.loc[validation_good_repos].iloc[:, 0].values]
#
# load_test_bad_repos = np.array(tokenized, dtype=np.float32)[df1.loc[bad_repos].iloc[:, 0].values]
# load_test_bad_repos_labels = np.array(labels_tokenized)[df1.loc[bad_repos].iloc[:, 0].values]
#
# train_dataloader, test_dataloader_good_repos = create_data_loaders_baselines(load_train, load_train_labels,
# load_test_good_repos,
# load_test_good_repos_labels,
# batch_size)
# test_dataloader_bad_repos = create_data_loaders_baselines_test(load_test_bad_repos, load_test_bad_repos_labels,
# batch_size)
#
# src_vocab = tokenized[0].shape[0]
# model1 = Baseline(n_dimension=src_vocab, n_targets=n_targets)
#
# if device == "gpu":
# torch.cuda.set_device(0)
# model1.cuda()
#
# sgd_opt = torch.optim.SGD(model1.parameters(), lr=learning_rate, momentum=momentum, weight_decay=decay)
# adam_opt = torch.optim.Adam(model1.parameters(), lr=learning_rate, betas=betas, weight_decay=decay)
# optimizers = {"adam":adam_opt,"sgd":sgd_opt}
# optimizer = optimizers['adam']
#
# n_epochs = 200
#
# model1, preds1, preds, conf_matrix_good, conf_matrix_bad = run_optimizer_baseline(model1, train_dataloader, test_dataloader_good_repos, test_dataloader_bad_repos, load_test_good_repos_labels, load_test_bad_repos_labels, optimizer, n_epochs,cross_entropoy_loss,class_weights=weights, device=device)
#
# print("STARTED TRAINING RANDOM FOREST!")
#
# model2 = make_pipeline(StandardScaler(), RandomForestClassifier(n_estimators=100, max_depth=None, max_features=int(src_vocab/3)))
# model2.fit(load_train, load_train_labels)
# preds_good_m2 = model2.predict(load_test_good_repos)
# preds_bad_m2 = model2.predict(load_test_bad_repos)
#
# print("STARTED TRAINING SUPPORT VECTOR MACHINE!")
# model3 = make_pipeline(StandardScaler(), SVC(gamma="auto", kernel="rbf"))
# model3.fit(load_train, load_train_labels)
# preds_good_m3 = model3.predict(load_test_good_repos)
# preds_bad_m3 = model3.predict(load_test_bad_repos)
#
#
# all_results_m1[seed] = evaluate(preds1, load_test_good_repos_labels, preds, load_test_bad_repos_labels, good_bad_hypo)
# all_results_m2[seed] = evaluate(preds_good_m2, load_test_good_repos_labels, preds_bad_m2, load_test_bad_repos_labels, good_bad_hypo)
# all_results_m3[seed] = evaluate(preds_good_m3, load_test_good_repos_labels, preds_bad_m3,
# load_test_bad_repos_labels, good_bad_hypo)
#
#
# store_path = "../../5_results/models/learning_scenario1/"
# results_name = store_path + scenario + "/10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name, "wb") as file:
# pickle.dump(all_results, file)
#
# with open(label_mapper_name, "wb") as file:
# pickle.dump(label_mapper_name, file)
#
# store_path = "../../5_results/models/baseline/"
# results_name_m1 = store_path + scenario + "/model1_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m2 = store_path + scenario + "/model2_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m3 = store_path + scenario + "/model3_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name_m1, "wb") as file:
# pickle.dump(all_results_m1, file)
#
# with open(results_name_m2, "wb") as file:
# pickle.dump(all_results_m2, file)
#
# with open(results_name_m3, "wb") as file:
# pickle.dump(all_results_m3, file)
#
# with open(label_mapper_name, "wb") as file:
# pickle.dump(label_mapper_name, file)
from scipy.linalg import norm
def extract_gradients_input_output(model, original_test_data, ground_truth_labels, predictions_model):
#
# if device =="gpu":
# class_weights=torch.FloatTensor(weights).cuda()
# cross_entropoy_loss = nn.CrossEntropyLoss(weight=class_weights).cuda()
# else:
# class_weights = torch.FloatTensor(weights)
# cross_entropoy_loss = nn.CrossEntropyLoss(weight=class_weights)
#
#
# loss_f = cross_entropoy_loss
# adam_opt = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=betas, weight_decay=decay)
df =
|
pd.DataFrame([ground_truth_labels, predictions_model])
|
pandas.DataFrame
|
import os, cx_Oracle
from datetime import *
import requests
import MySQLdb
import numpy as np
import pandas as pd
from fn import *
from oDT import *
livedb = os.getcwd () + "\\robi_live.csv"
db = os.getcwd () + "\\OMDB.csv"
semcol = os.getcwd () + "\\semcols.txt"
CAT = os.getcwd () + "\\CATdef.txt"
try:
mysqlconn = MySQLdb.connect ("localhost", "root", "admin", "om2")
except:
mysqlconn = ""
n = datetime.now ()
tm = n.strftime("%H:%M") + " on " + n.strftime ("%m-%d-%Y")
def hr_minus(diff):
x = datetime.now ()
d = x - timedelta (hours=diff)
str_d = d.strftime ("%m-%d-%Y %H:%M:%S")
return str_d
def timedelt(diff):
x = datetime.now ()
d = x + timedelta (hours=diff)
str_d = d.strftime ("%d-%m-%Y %H:%M:%S")
return str_d
def text2list(pth):
f = open (pth, 'r+')
ls = []
for i in f.readlines ():
ls.append (i.replace ('\n', ''))
return ls
def text2dic(pth):
f = open (pth, 'r+')
dc = {}
for i in f.readlines():
a1 = i.replace ('\n', '')
a2 = a1.split (':')
dc[a2[0]] = a2[1]
return dc
def getkey(my_dict, ky):
if ky is not None:
for key, value in my_dict.items ():
if key in str (ky):
return value
else:
return "other"
DRCAT = lambda x: '2H' if (x < 120) \
else ('4H' if (x < 240)\
else ('6H' if (x < 360)\
else ('12H' if (x < 720)\
else ('24H' if (x < 1440)\
else ('48H' if (x < 2880)\
else ('72H'))))))
TS = lambda x: '2G' if ('2G' in x) \
else ('3G' if ('3G' in x) \
else ('4G' if ('4G' in x) \
else ('OML' if ('2G' in x) \
else "other")))
def extrafeat(xdf, tmdelta = 0):
xdf = xdf.rename (columns=str.upper)
df = xdf.assign (DURCAT='0')
df = df.assign (LO='0')
df = df.assign (CDLO='0')
df = df.assign (CDLOTECH='0')
df['DURCAT'] = df.apply (lambda x: DRCAT (x.DUR), axis=1)
df['LO'] = df.apply (lambda x: pd.to_datetime (x['LASTOCCURRENCE'], errors='coerce', cache=True).strftime("%d%m%y%H%M"), axis=1)
df['CDLO'] = df['CUSTOMATTR15'].str.cat (df['LO'])
df['CDLOTECH'] = df['CDLO'].str.cat (df['CATX'])
print('done duration')
return df
def catmap_mod(df):
print("strart operation..............")
dfdb1 = pd.read_csv (db)
dfdb = dfdb1[['Code', 'Zone']]
df0 = df.rename (columns=str.upper)
ls = text2list (semcol)
df1 = df0[ls]
dc = text2dic (CAT)
df1 = df1.assign (CAT='0')
df1 = df1.assign (CATX='0')
df1 = df1.assign (Code='0')
df1['CAT'] = df1.apply (lambda x: getkey (dc, x.SUMMARY), axis=1)
df1['CATX'] = df1.apply (lambda x: TS (x.SUMMARY), axis=1)
df1['Code'] = df1.apply (lambda x: x.CUSTOMATTR15[0:5], axis=1)
df2 = df1.merge (dfdb, on='Code')
try:
df3 = DateDiff(df2, "DUR", "LASTOCCURRENCE")
except:
df3 = datediff_ondf(df2, "DUR", 'LASTOCCURRENCE')
df4 = extrafeat(df3)
xdf = df4.replace (np.nan, 0)
ndf = countifs(xdf, xdf['CUSTOMATTR15'], xdf['CUSTOMATTR15'], xdf['DURCAT'], xdf['DURCAT'])
odf = countifs(ndf, xdf['EQUIPMENTKEY'], xdf['EQUIPMENTKEY'], xdf['DURCAT'], xdf['DURCAT'])
odf.to_csv (os.getcwd () + "\\FINAL12.csv", index=False)
return odf
def sort_rvmdup(df):
df1 = df[~df['CATX'].isin(['other']) & ~df['CAT'].isin(['other'])]
df1 = df1.sort_values(by=['CAT','CDLO'], ascending=True)
df1 = df1.drop_duplicates(subset=['CDLOTECH'], inplace=False, ignore_index=True)
#df2 = df1.groupby(['DURCAT','EQUIPMENTKEY','CAT'])['CUSTOMATTR15'].count()
pvt = df1.pivot_table(index=['CUSTOMATTR15','CAT'], columns='DURCAT', values='cnt_x', aggfunc='sum').reset_index()
ndf = pvt[(pvt['72H'] > 10) & (pvt['48H'] > 2)]
return ndf
def fmtmsg_techwise(df, name_thread_col, ls_datacol, name_catcol, cat_text):
lss = []
heap = ''
hp = ""
hpx = ""
for n in range(len(df)):
code = df.loc[n, name_thread_col]
cat = df.loc[n, name_catcol]
if str(cat) == str(cat_text):
for i in range(len(ls_datacol)):
if hp == "":
hp = df.loc[n, ls_datacol[i]]
else:
hp = hp + " | " + df.loc[n, ls_datacol[i]]
hpx = code + ": " + hp
if heap == "":
heap = hpx
hp = ""
else:
heap = heap + chr(10) + hpx
hp = ""
return heap
def tmsg(chatid,msg):
TOK = "1176189570:AAEfPi9TIZIbnhWi4Ko6KQev2Iv7UbMw5js"
url = "https://api.telegram.org/bot" + TOK + "/sendMessage?chat_id=" + str(chatid) + "&text=" + msg
requests.get(url)
return ""
def semqry():
conn = cx_Oracle.connect ('SOC_READ','soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print (conn.version)
agent = ['U2000 TX','Ericsson OSS','EricssonOSS','Huawei U2000 vEPC','Huawei U2020','LTE_BR1_5','MV36-PFM3-MIB','BusinessRule14','BusinessRule14_ERI_ABIP']
cols = "SERIAL,NODE,AGENT,ALERTGROUP,SEVERITY,LOCALSECOBJ,X733EVENTTYPE,X733SPECIFICPROB,MANAGEDOBJCLASS,GEOINFO,CUSTOMATTR3,CUSTOMATTR5,CUSTOMATTR25,TTSEQUENCE,TTSTATUS,SRCDOMAIN,CUSTOMATTR26,OUTAGEDURATION,TALLY,ALARMDETAILS,EQUIPMENTKEY,CUSTOMATTR15,SUMMARY,LASTOCCURRENCE,CLEARTIMESTAMP"
q1 = "SELECT " + cols + " FROM SEMHEDB.ALERTS_STATUS WHERE "
STDT = timedelt(-22)
ENDT = timedelt(1)
q2 = "LASTOCCURRENCE BETWEEN TO_DATE('" + STDT + "','DD-MM-YYYY HH24:MI:SS') AND TO_DATE('" + ENDT + "','DD-MM-YYYY HH24:MI:SS')"
q3 = q1 + q2
print(q3)
print('starts: ', datetime.now())
df =
|
pd.read_sql(q3, con=conn)
|
pandas.read_sql
|
import pandas
from matplotlib import pyplot
import numpy
def division(n, d):
return n / d if d else 0
early_voting = pandas.read_csv('~/Downloads/fl_early_polling_statewide_latsandlongs.csv', sep='\t')
early_voting = early_voting.sort_values(by=['county'])
current_county = ''
current_county_found = 0
current_county_missing = 0
county_stats = []
for index, row in early_voting.iterrows():
if (str(row['county']) != current_county):
county_stats.append((current_county, current_county_found, current_county_missing, (division(current_county_missing, current_county_found) * 100)))
current_county = str(row['county'])
current_county_found = 0
current_county_missing = 0
current_county_found += 1
if (
|
pandas.isna(row['latitude'])
|
pandas.isna
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from IPython.display import display
from tqdm import tqdm
import plotly.express as px
from sklearn import config_context
from lib_spotify_app.model import (
dbcv,
validity_score,
dbcv_validity_score,
abs_dbcv_validity_score,
make_processing,
make_processing_parallel,
make_search,
make_optimization,
make_default_search_param_spaces,
make_default_optim_param_spaces,
make_default_optim_param_spaces_parallel,
analysis_plot_pipe,
)
from lib_spotify_app.api_adapter import (
make_spotify_playlist,
setup_spotipy,
get_credential,
query_liked_songs,
enrich_audiofeature,
normalize_request
)
from lib_spotify_app.enrich_artist_genre import add_genres
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (16, 10)
sns.set_context('notebook')
sns.set_style('whitegrid')
# %%
df = pd.read_json('liked_songs.json')
# %%
n_samples = df.shape[0]/4
prior = 'uniform'
param_space = make_default_optim_param_spaces(n_samples, prior)
param_space_par = make_default_optim_param_spaces_parallel(n_samples, prior)
# %%
n_iter = 100
n_best = 5
# %% [markdown]
# # Runs different metric and scores
# %%
proc = make_processing(w_dist=False)
with config_context(display='diagram'):
display(proc)
optimizer_serial = make_optimization(proc, param_space, n_iter=n_iter, scoring=dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_serial, df, n_best=n_best)
# %%
proc_mah = make_processing(w_dist=True)
with config_context(display='diagram'):
display(proc_mah)
optimizer_serial_mah = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah, df, n_best=n_best)
# %%
proc_par = make_processing_parallel(w_dist=False, kwargs_umap={"metric":"euclidean"})
with config_context(display='diagram'):
display(proc_par)
optimizer_par = make_optimization(proc_par, param_space_par, n_iter=n_iter, scoring=dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_par, df, n_best=n_best)
# %% [markdown]
# Because of issues with HDBSCAN I use DBCV instead when combined with pre-computed.
# `Mahalanobis` does not work directly, or at least not all properties such as `relative_validity` or `validity_index()`.
# %%
proc_mah = make_processing(w_dist=True)
with config_context(display='diagram'):
display(proc_mah)
optimizer_serial_mah = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=dbcv).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah, df, n_best=n_best)
# %%
proc_par_mah = make_processing_parallel(w_dist=True)#, kwargs_hdbscan={"metric":"precomputed"})
with config_context(display='diagram'):
display(proc_par_mah)
optimizer_par_mah = make_optimization(proc_par_mah, param_space_par, n_iter=n_iter, scoring=dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_par_mah, df, n_best=n_best)
# %%
proc_mah = make_processing(w_dist=True)
with config_context(display='diagram'):
display(proc_mah)
def dbcv_validity_score2(pipe, X, y=None):
return dbcv(pipe, X, y=None) + validity_score(pipe, X, y=None)
optimizer_serial_mah2 = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=dbcv_validity_score2).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah2, df, n_best=n_best)
# %% [markdown]
# Up until now we used DBCV * Validity index, but it wasn't as satisfying.
# Below we try with absolute validty index.
# %%
proc_mah = make_processing(w_dist=True)
with config_context(display='diagram'):
display(proc_mah)
optimizer_serial_mah3 = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=abs_dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah3, df, n_best=n_best)
# %%
proc_mah = make_processing(w_dist=True)
with config_context(display='diagram'):
display(proc_mah)
def neg_dbcv_validity_score(pipe, X, y=None):
return dbcv(pipe, X, y=None) * (-validity_score(pipe, X, y=None))
optimizer_serial_mah_neg = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=neg_dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah_neg, df, n_best=10, figsize=(12, 100))
# %%
def inv_dbcv_validity_score(pipe, X, y=None):
return dbcv(pipe, X, y=None) * (1- abs(validity_score(pipe, X, y=None)))
optimizer_serial_mah4 = make_optimization(proc_mah, param_space, n_iter=n_iter, scoring=inv_dbcv_validity_score).fit(df)
# %%
analysis_plot_pipe(optimizer_serial_mah4, df, n_best=10, figsize=(12, 100))
# %%
from hdbscan.validity import validity_index
def find_clusterer(pipe):
if "clusterer" in pipe.named_steps.keys():
clusterer = pipe["clusterer"]
else: #parallel
named_steps = {t[0]:t[1] for t in pipe['transf'].transformer_list}
clusterer = named_steps["clusterer"]
return clusterer
def weighted_validity_score(pipe, X, y=None):
clusterer = find_clusterer(pipe)
X_map = X
for name, estimator in pipe.steps[:-1]:
if isinstance(estimator, str) or estimator is None:
continue
X_map = estimator.transform(X_map)
if isinstance(X_map, pd.DataFrame):
X_map = X_map.to_numpy()
else:
X_map = np.float64(X_map)
vi = validity_index(
X_map,
clusterer.labels_,
metric=clusterer.metric,
per_cluster_scores=True
)
if vi[0] == 0:
return 0
vc =
|
pd.Series(clusterer.labels_)
|
pandas.Series
|
#https://machinelearningmastery.com/time-series-forecasting-with-prophet-in-python/
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import numpy as np
import pandas as pd
import pandas as pdimport
import matplotlib.pyplot as plt
import plotly.offline as py
py.init_notebook_mode()
df=pd.read_csv('tous.csv')
df.head()
indexNames = df[ df['annee'] == 2021 ].index
# Delete these row indexes from dataFrame
df.drop(indexNames , inplace=True)
df.head()
df=df[df['Région']== 'Occitanie']
df.head()
df['Date_datetime'] = pd.to_datetime(df['Date_datetime'])
df.head()
df.info()
df=df[['Date_datetime','Consommation (MW)']]
df.head()
df = df.sort_values(by = 'Date_datetime')
plt.figure(figsize=(20,10))
plt.plot(df['Date_datetime'] , df['Consommation (MW)'])
df.set_index('Date_datetime', inplace = True)
rolling_mean = df.rolling(window = 365).mean()
rolling_std = df.rolling(window = 12).std()
plt.figure( figsize=(8,5))
plt.plot(df, color = 'blue', label = 'Origine')
plt.plot(rolling_mean, color = 'red', label = 'Moyenne mobile')
plt.plot(rolling_std, color = 'black', label = 'Ecart-type mobile')
plt.legend(loc = 'best')
plt.title('Moyenne et Ecart-type mobiles')
plt.show()
from statsmodels.tsa.stattools import adfuller
result = adfuller(df['Consommation (MW)'])
print('Statistiques ADF : {}'.format(result[0]))
print('p-value : {}'.format(result[1]))
print('Valeurs Critiques :')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
df.reset_index('Date_datetime', inplace=True)
print(df[df['Date_datetime']== '2020-02-01'])
df.columns = ['ds', 'y']
model = Prophet()
model.fit(df)
from pandas import DataFrame
from datetime import datetime
# define the period for which we want a prediction
future = list()
for i in range(1, 13):
date = '2020-%01d-01' % i
future.append([date])
future = DataFrame(future)
future.columns = ['ds']
future['ds']= pd.to_datetime(future['ds'])
# summarize the forecast
forecast = model.predict(future)
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].head())
model.plot(forecast)
plt.show()
df2 =
|
pd.DataFrame({'ymanuelle': [2014559,1867936,1897048,1782988,1348716,1266016,1569453,1395306,1540000,1686389,1517011,2302956]})
|
pandas.DataFrame
|
#!/usr/bin/env python
###GENERIC
import pandas as pd
import numpy as np
import os,sys
from pathlib import Path
import logging
import json
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(message)s',
stream=sys.stderr, level=logging.DEBUG)
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
###SKLEARN
from sklearn.model_selection import train_test_split
###TENSORFLOW
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.utils import plot_model, to_categorical
from tensorflow.keras import backend as K
from tensorflow.keras import layers,losses
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense, LeakyReLU, Dropout, Flatten
from tensorflow.keras.layers import LSTM, RepeatVector, TimeDistributed, Input, ZeroPadding2D, ZeroPadding1D
from tensorflow.keras.layers import GlobalAveragePooling1D, BatchNormalization, UpSampling1D
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
from tensorflow.keras.regularizers import l1,l2,l1_l2
from tensorflow.keras.losses import CategoricalCrossentropy
### Generic Helpers
def trainSupervisedAE(params,model_config,topology,X_train,y_train,verbose=0,freeze=False,stacked=False):
#DL MODELS
reconstructedModel = LoadModel(model_config,topology)
if params['optimizer'] == 'adam':
opt = optimizers.Adam(params['learning_rate'])
elif params['optimizer'] == 'adadelta':
opt = optimizers.Adadelta(params['learning_rate'])
elif params['optimizer'] == 'sgd':
opt = optimizers.SGD(params['learning_rate'], momentum=0.9)
METRICS = [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.CategoricalAccuracy(name='acc'),
tf.keras.metrics.AUC(name='auc'),
]
if freeze:
logging.info("Freeze active")
if not stacked:
for layer in reconstructedModel.layers:
if layer.name != 'code':
layer.trainable = False
elif layer.name == 'code':
layer.trainable = False
break
else:
for layer in reconstructedModel.layers:
print(layer.name)
if layer.name != 'supervised_output':
layer.trainable = False
else:
break
print(reconstructedModel.summary())
reconstructedModel.compile(loss="categorical_crossentropy",
#loss = tf.keras.losses.CosineSimilarity(axis=1),
optimizer=opt,
metrics=METRICS)
y_train = to_categorical(y_train)
model_history = reconstructedModel.fit(
X_train,
y_train,
epochs=params['epochs'],
shuffle=True,
callbacks=[
#es,
],
#validation_split = 0.1,
verbose=verbose)
#plot_supervised_metrics(model_history)
reconstructedModel.save(model_config['model_dir'] / 'supervised-dae')
return reconstructedModel
def buildBinaryAE(params,model_config,topology,model_name,stacked=False):
"""Classify anomalous vs. normal"""
#It means loading a full autoencoder
if not stacked:
reconstructedModel = LoadModel(model_config,topology)
pretrainedEncoder = reconstructedModel.encoder
else:
pretrainedEncoder = LoadModel(model_config,topology)
if not params['dense_neuron'] is None:
pretrainedEncoder.add(Dense(params['dense_neuron'],name="supervised_dense"))
pretrainedEncoder.add(Dense(2,activation='softmax',name="supervised_output"))
pretrainedEncoder._name = model_name
print(pretrainedEncoder.summary())
logging.info("Created binary AE")
pretrainedEncoder.save(model_config['model_dir'] / model_name)
return pretrainedEncoder
def buildMulticlassAE(params,model_config,topology,num_class,model_name,stacked=False):
"""Classify anomalous vs. normal"""
#It means loading a full autoencoder
if not stacked:
reconstructedModel = LoadModel(model_config,topology)
pretrainedEncoder = reconstructedModel.encoder
else:
pretrainedEncoder = LoadModel(model_config,topology)
if not params['dense_neuron'] is None:
pretrainedEncoder.add(Dense(params['dense_neuron'],name="supervised_dense"))
pretrainedEncoder.add(Dense(num_class,activation='softmax',name="supervised_output"))
pretrainedEncoder._name = model_name
print(pretrainedEncoder.summary())
logging.info("Created multiclass AE")
pretrainedEncoder.save(model_config['model_dir'] / model_name)
return pretrainedEncoder
def prepareFinetuningDataset(percentage,train_data,train_label,system,rs=42):
#Decide normal and anomalous counts
train_anom_label = train_label[train_label['anom'] != 0]
num_unique_anoms = len(train_anom_label['anom'].unique())
train_normal_label = train_label[train_label['anom'] == 0]
total_anom_count = len(train_anom_label)
average_class_anom_count = int(total_anom_count / num_unique_anoms)
total_normal_count = len(train_normal_label)
total_count = total_anom_count + total_normal_count
#Find how many instance needed for each class
num_instance_class = int((total_count * percentage) / (num_unique_anoms + 1))
print(num_instance_class)
print(average_class_anom_count)
_, labeled_normal_labels = train_test_split(train_normal_label,
test_size = num_instance_class/total_normal_count,
random_state=42)
if num_instance_class/average_class_anom_count >= 1:
labeled_anom_labels = train_anom_label
else:
_, labeled_anom_labels = train_test_split(train_anom_label,
test_size = num_instance_class/average_class_anom_count,
stratify = train_anom_label[['anom']],
random_state=rs)
#Prepare semi-supervised data and labels
train_semisup_label = pd.concat([labeled_normal_labels,labeled_anom_labels])
#Select semisup_training data
train_semisup_data = train_data[train_data.index.get_level_values('node_id').isin(train_semisup_label.index)]
train_semisup_label = train_semisup_label.reindex(train_semisup_data.index.get_level_values('node_id'))
assert list(train_semisup_label.index) == list(train_semisup_data.index.get_level_values('node_id'))
#logging.info("######PERCENTAGE: %s ########",percentage)
logging.info("\nSemi-supervised labeled data class distributions\n%s\n",train_semisup_label['anom'].value_counts())
#If the order is protected, convert data to array format
train_semisup_data = train_semisup_data.values
train_semisup_label = train_semisup_label['anom'].values
return train_semisup_data, train_semisup_label
def plot_supervised_metrics(history):
metrics = ['loss', 'accuracy']
for n, metric in enumerate(metrics):
name = metric.replace("_"," ").capitalize()
plt.subplot(2,2,n+1)
plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
#plt.plot(history.epoch, history.history['val_'+metric],color=colors[1], linestyle="--", label='Val')
plt.xlabel('Epoch')
plt.ylabel(name)
if metric == 'loss':
plt.ylim([0, plt.ylim()[1]])
elif metric == 'auc':
plt.ylim([0,1])
else:
plt.ylim([0,1])
plt.legend()
plt.show()
def filteredTestingProctor(binary_model,multiclass_model,X_test):
logging.info("Double layer testing started")
test_pred_label = []
test_pred_label = np.argmax(binary_model.predict(X_test),1)
pred_as_anom_index = np.where(test_pred_label != 0)
pred_as_anom_data = X_test[pred_as_anom_index]
pred_as_anom_label = np.argmax(multiclass_model.predict(pred_as_anom_data),1)
test_pred_label[pred_as_anom_index] = pred_as_anom_label
return test_pred_label
def filteredTestingProctorScikit(binary_model,multiclass_model,X_test):
logging.info("Double layer testing started")
test_pred_label = []
test_pred_label = binary_model.predict(X_test)
pred_as_anom_index = np.where(test_pred_label != 0)
pred_as_anom_data = X_test[pred_as_anom_index]
pred_as_anom_label = multiclass_model.predict(pred_as_anom_data)
test_pred_label[pred_as_anom_index] = pred_as_anom_label
return test_pred_label
###Model Loaders
def LoadModel(config,model_name):
"""Loads model with all the weights and necessary architecture"""
logging.info("Loading model!")
loaded_model = tf.keras.models.load_model(str(config['model_dir'] / (model_name)))
return loaded_model
def LoadEncoder(config,model_name):
"""Loads model with all the weights and necessary architecture"""
logging.info("Loading encoder model!")
loaded_model = tf.keras.models.load_model(str(config['model_dir'] / (model_name + '_encoder')))
return loaded_model
def LoadModelAndWeights(config,model_name):
"""Loads model and set model weights saved by Checkpoint callback"""
logging.info("Loading model with checkpoint weights!")
loaded_model = tf.keras.models.load_model(str(config['model_dir'] / (model_name)))
loaded_model.load_weights(config['model_dir'] / (model_name + "_weights.h5"))
return loaded_model
###Evaluation
def supervisedTPDSEvaluation(train_data, train_label, test_data, test_label, conf, anom_ratio, cv_index, name, plot_cm=False):
######################## RAW DATA ########################
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
clf = Pipeline([
('clf', RandomForestClassifier(n_estimators=100))
])
clf.fit(train_data, train_label)
pipelineAnalysis(clf,
train_data,train_label,
test_data, test_label,
conf=conf,
cv_index=cv_index,
size=anom_ratio,
save_name=name,
name_cm = name,
plot_cm = plot_cm)
def supervisedEvaluation(encoder, train_data, train_label, test_data, test_label, conf, label_ratio, cv_index, plot_cm=False,name_suffix=''):
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
#stacked_encoder.summary()
hidden_train = encoder.predict(train_data)
hidden_test = encoder.predict(test_data)
######################## ENCODER OUTPUT ########################
from sklearn.multiclass import OneVsRestClassifier
#Aksar LR
# from sklearn.linear_model import LogisticRegression
# clf = LogisticRegression(random_state=0,max_iter=10000,multi_class='ovr')
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_lr' + name_suffix,
# name_cm = 'aksar_lr' + name_suffix,
# plot_cm = plot_cm)
# #Aksar SVM - OVR
# clf = svm.SVC(kernel='rbf')
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_svm-OVR'+ name_suffix,
# name_cm = 'aksar_svm-OVR'+ name_suffix,
# plot_cm = plot_cm)
#Aksar LINEAR SVM - OVR
clf = svm.LinearSVC(max_iter=5000)
clf.fit(hidden_train, train_label)
pipelineAnalysis(clf,
hidden_train,train_label,
hidden_test, test_label,
conf=conf,
cv_index=cv_index,
size=label_ratio,
save_name='aksar_l-svm-OVR'+ name_suffix,
name_cm = 'aksar_l-svm-OVR'+ name_suffix,
plot_cm = plot_cm)
# #Aksar LINEAR SVM - OVR
# clf = svm.LinearSVC(max_iter=5000,
# loss = 'hinge')
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_l-svm-OVR-2',
# name_cm = 'aksar_l-svm-OVR-2',
# plot_cm = plot_cm)
# clf = svm.LinearSVC(max_iter=5000,
# #loss = 'hinge'
# penalty='l1')
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_l-svm-OVR-3',
# name_cm = 'aksar_l-svm-OVR-3',
# plot_cm = plot_cm)
# clf = svm.LinearSVC(max_iter=5000,
# #loss = 'hinge'
# penalty='l2',
# C=0.5)
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_l-svm-OVR-3',
# name_cm = 'aksar_l-svm-OVR-3',
# plot_cm = plot_cm)
#Aksar XGboost
# from xgboost import XGBClassifier
# clf = OneVsRestClassifier(XGBClassifier())
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_xgb',
# name_cm = 'aksar_xgb',
# plot_cm = plot_cm)
# #Aksar RF OVR
# from sklearn.ensemble import RandomForestClassifier
# clf = OneVsRestClassifier(RandomForestClassifier())
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_rf-OVR'+ name_suffix,
# name_cm = 'aksar_rf-OVR'+ name_suffix,
# plot_cm = plot_cm)
# #Aksar DNN
# from sklearn.neural_network import MLPClassifier
# clf = OneVsRestClassifier(MLPClassifier(
# hidden_layer_sizes = [32],
# solver = 'lbfgs',
# max_iter = 600))
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_mlp-lbfgs'+ name_suffix,
# name_cm = 'aksar_mlp-lbfgs'+ name_suffix,
# plot_cm = plot_cm)
# #Aksar DNN-Adam
# from sklearn.neural_network import MLPClassifier
# clf = OneVsRestClassifier(MLPClassifier(
# hidden_layer_sizes = [32],
# solver = 'adam',
# max_iter = 600))
# clf.fit(hidden_train, train_label)
# pipelineAnalysis(clf,
# hidden_train,train_label,
# hidden_test, test_label,
# conf=conf,
# cv_index=cv_index,
# size=label_ratio,
# save_name='aksar_mlp-adam'+ name_suffix,
# name_cm = 'aksar_mlp-adam'+ name_suffix,
# plot_cm = plot_cm)
def FAR_AMR_Calculate(true_label,pred_label):
"""
Calculates false alarm rate and anomaly miss rate
Assumes 0 is normal label and other labels are anomalies
Args:
true_label: Array composed of integer labels, e.g., [0,0,4,2]
pred_label: Array composed of integer labels, e.g., [0,0,4,2]
"""
# • False alarm rate: The percentage of the healthy windows that are identified as anomalous (any anomaly type).
# • Anomaly miss rate: The percentage of the anomalous windows that are identified as healthy
alarm_dict = {}
normal_true_idx = np.where(true_label==0)[0]
anom_true_idx = np.where(true_label!=0)[0]
#Find number of normal samples labeled as anomalous
fp_deploy = pred_label[normal_true_idx][pred_label[normal_true_idx] != 0]
false_alarm_rate = len(fp_deploy) / len(normal_true_idx)
logging.info("Total misclassified normal runs: %s, Total normal runs %s ",str(len(fp_deploy)),str(len(normal_true_idx)))
logging.info("FAR: %s",false_alarm_rate)
#Find number of anomalous samples labeled as normal
fn_deploy = pred_label[anom_true_idx][pred_label[anom_true_idx] == 0]
anom_miss_rate = len(fn_deploy) / len(anom_true_idx)
logging.info("Total misclassified anom runs: %s, Total anom runs %s ",str(len(fn_deploy)),str(len(anom_true_idx)))
logging.info("AMR: %s",anom_miss_rate)
return false_alarm_rate, anom_miss_rate
def analysis_wrapper_multiclass(true_labels, pred_labels,conf,cv_index,name,name_cm='Deployment Data',save=True,plot=True):
"""
true_labels: it should be in the format of an array [0,2,1,3,...]
pred_labels: it should be in the format of an array [0,1,1,4,...]
"""
from sklearn.metrics import classification_report
logging.info("####################################")
logging.info("%s\n%s",name_cm,classification_report(y_true=true_labels, y_pred =pred_labels))
logging.info("#############")
deploy_report = classification_report(y_true=true_labels, y_pred =pred_labels,output_dict=True)
if save:
cv_path = conf['results_dir']
json_dump = json.dumps(deploy_report)
f_json = open(cv_path / ("{}_report_dict.json".format(name)),"w")
f_json.write(json_dump)
f_json.close()
if plot:
plot_cm(true_labels, pred_labels,name=name_cm)
false_anom_rate_calc(true_labels,pred_labels,conf,cv_index,name,save)
def analysis_wrapper_binary(true_labels, pred_labels,conf,cv_index,name,name_cm='Deployment Data',save=True,plot=True):
"""
true_labels: it should be in the format of an array [0,0,1,0,...]
pred_labels: it should be in the format of an array [0,0,1,0,...]
"""
from sklearn.metrics import classification_report
logging.info("####################################")
logging.info("%s\n%s",name_cm,classification_report(y_true=true_labels, y_pred =pred_labels))
logging.info("#############")
deploy_report = classification_report(y_true=true_labels, y_pred =pred_labels,output_dict=True)
# target_names = ['normal','anomaly']
# print(classification_report(y_true=true_labels, y_pred =pred_labels,target_names=target_names))
# deploy_report = classification_report(y_true=true_labels, y_pred =pred_labels,target_names=target_names,output_dict=True)
if save:
cv_path = conf['results_dir']
# cv_path = conf['plots_dir'] / ("CV_" + str(cv_index))
# if not cv_path.exists():
# cv_path.mkdir(parents=True)
json_dump = json.dumps(deploy_report)
f_json = open(cv_path / ("{}_report_dict.json".format(name)),"w")
f_json.write(json_dump)
f_json.close()
if plot:
plot_cm(true_labels, pred_labels,name=name_cm)
false_anom_rate_calc(true_labels,pred_labels,conf,cv_index,name,save)
def pipelineAnalysis(clf,train_data,train_true_label,test_data, test_true_label, conf, cv_index, size, save_name="", name_cm="",plot_cm=True,save=True):
"""
Send the classifier and generate necessary results for train and test data
"""
train_pred_label = clf.predict(train_data)
analysis_wrapper_multiclass(true_labels=train_true_label,
pred_labels=train_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_train_{}").format(size),
name_cm = name_cm + " Train",
save=save,
plot=plot_cm
)
test_pred_label = clf.predict(test_data)
analysis_wrapper_multiclass(true_labels=test_true_label,
pred_labels=test_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_test_{}").format(size),
name_cm = name_cm + " Test",
save=save,
plot=plot_cm
)
def pipelineUnknownAnalysis(test_true_label,test_pred_label, conf, cv_index, size, save_name="", name_cm="",plot_cm=True,save=True):
"""
Send the classifier and generate necessary results for train and test data
"""
# train_pred_label = clf.predict(train_data)
# analysis_wrapper_multiclass(true_labels=train_true_label,
# pred_labels=train_pred_label,
# conf=conf,
# cv_index=cv_index,
# name = (save_name + "_train_{}").format(size),
# name_cm = name_cm + " Train",
# save=save,
# plot=plot_cm
# )
# test_pred_label = clf.predict(test_data)
analysis_wrapper_multiclass(true_labels=test_true_label,
pred_labels=test_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_test_{}").format(size),
name_cm = name_cm + " Test",
save=save,
plot=plot_cm
)
def pipelineAnalysisBorghesi(clf, threshold, train_data,train_true_label,test_data, test_true_label, conf, cv_index, size, save_name="", name_cm="",plot_cm=True,save=True):
"""
Send the classifier and generate necessary results for train and test data
"""
# _, mae_loss = get_MAE_loss(clf,train_data)
# train_pred_label = np.zeros(len(mae_loss)).astype(int)
# train_pred_label[mae_loss > threshold] = 1
# analysis_wrapper_multiclass(true_labels=train_true_label,
# pred_labels=train_pred_label,
# conf=conf,
# cv_index=cv_index,
# name = (save_name + "_train_{}").format(size),
# name_cm = name_cm + " Train",
# save=save,
# plot=plot_cm
# )
_, mae_loss = get_MAE_loss(clf,test_data)
test_pred_label = np.zeros(len(mae_loss)).astype(int)
test_pred_label[mae_loss > threshold] = 1
analysis_wrapper_multiclass(true_labels=test_true_label,
pred_labels=test_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_test_{}").format(size),
name_cm = name_cm + " Test",
save=save,
plot=plot_cm
)
def pipelineAnalysisKeras(clf,train_data,train_true_label,test_data, test_true_label, conf, cv_index, size, save_name="", name_cm="",plot_cm=True,save=True):
"""
Send the classifier and generate necessary results for train and test data
"""
train_pred_label = np.argmax(clf.predict(train_data),1)
analysis_wrapper_multiclass(true_labels=train_true_label,
pred_labels=train_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_train_{}").format(size),
name_cm = name_cm + " Train",
save=save,
plot=plot_cm
)
test_pred_label = np.argmax(clf.predict(test_data),1)
analysis_wrapper_multiclass(true_labels=test_true_label,
pred_labels=test_pred_label,
conf=conf,
cv_index=cv_index,
name = (save_name + "_test_{}").format(size),
name_cm = name_cm + " Test",
save=save,
plot=plot_cm
)
def generate_results(clf, X, y, result_name):
"""
Prints classification report, plots confusion matrix and returns predictions
"""
target_names = ['normal','membw','memleak','cachecopy','cpuoccupy']
X_pred = clf.predict(X)
print(classification_report(y_true=y, y_pred=X_pred, target_names=target_names))
plot_cm(y, X_pred, name=result_name + '')
return X_pred
def plot_cm(labels, predictions, name):
cm = tf.math.confusion_matrix(labels, predictions)
plt.figure(figsize=(5,5))
sns.heatmap(cm, annot=True, fmt="d")
plt.title('{}'.format(name))
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.show()
def false_anom_rate_calc(true_label,pred_label,conf,cv_index,name,save):
"""
Calculates false alarm rate and anomaly miss rate
Assumes 0 is normal label and other labels are anomalies
Args:
true_label: Array composed of integer labels, e.g., [0,0,4,2]
pred_label: Array composed of integer labels, e.g., [0,0,4,2]
"""
# • False alarm rate: The percentage of the healthy windows that are identified as anomalous (any anomaly type).
# • Anomaly miss rate: The percentage of the anomalous windows that are identified as healthy
alarm_dict = {}
normal_true_idx = np.where(true_label==0)[0]
anom_true_idx = np.where(true_label!=0)[0]
#Find number of normal samples labeled as anomalous
fp_deploy = pred_label[normal_true_idx][pred_label[normal_true_idx] != 0]
false_alarm_rate = len(fp_deploy) / len(normal_true_idx)
logging.info("Total misclassified normal runs: %s, Total normal runs %s ",str(len(fp_deploy)),str(len(normal_true_idx)))
logging.info(false_alarm_rate)
#Find number of anomalous samples labeled as normal
fn_deploy = pred_label[anom_true_idx][pred_label[anom_true_idx] == 0]
anom_miss_rate = len(fn_deploy) / len(anom_true_idx)
logging.info("Total misclassified anom runs: %s, Total anom runs %s ",str(len(fn_deploy)),str(len(anom_true_idx)))
logging.info(anom_miss_rate)
alarm_dict['false_alarm_rate'] = false_alarm_rate
alarm_dict['anom_miss_rate'] = anom_miss_rate
if save:
json_dump = json.dumps(alarm_dict)
f_json = open(conf['results_dir'] / ("{}_alert_dict.json".format(name)),"w")
f_json.write(json_dump)
f_json.close()
def falseAnomRateCalc(true_label,pred_label):
"""
Calculates false alarm rate and anomaly miss rate
Assumes 0 is normal label and other labels are anomalies
Args:
true_label: Array composed of integer labels, e.g., [0,0,4,2]
pred_label: Array composed of integer labels, e.g., [0,0,4,2]
"""
# • False alarm rate: The percentage of the healthy windows that are identified as anomalous (any anomaly type).
# • Anomaly miss rate: The percentage of the anomalous windows that are identified as healthy
alarm_dict = {}
normal_true_idx = np.where(true_label==0)[0]
anom_true_idx = np.where(true_label!=0)[0]
#Find number of normal samples labeled as anomalous
fp_deploy = pred_label[normal_true_idx][pred_label[normal_true_idx] != 0]
false_alarm_rate = len(fp_deploy) / len(normal_true_idx)
logging.info("Total misclassified normal runs: %s, Total normal runs %s ",str(len(fp_deploy)),str(len(normal_true_idx)))
logging.info(false_alarm_rate)
#Find number of anomalous samples labeled as normal
fn_deploy = pred_label[anom_true_idx][pred_label[anom_true_idx] == 0]
anom_miss_rate = len(fn_deploy) / len(anom_true_idx)
logging.info("Total misclassified anom runs: %s, Total anom runs %s ",str(len(fn_deploy)),str(len(anom_true_idx)))
logging.info(anom_miss_rate)
alarm_dict['false_alarm_rate'] = false_alarm_rate
alarm_dict['anom_miss_rate'] = anom_miss_rate
return alarm_dict
### TRAINING RELATED PLOTS/RESULTS
import matplotlib.pyplot as plt
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
import seaborn as sns
sns.set(rc={'figure.figsize':(12,10)})
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_context("talk")
def calc_MSE_class(model, data,labels,ANOM_DICT):
mse_df =
|
pd.DataFrame(columns=['Average_MSE','Class'])
|
pandas.DataFrame
|
import model_framework
import platform
import statfile
import copy
import fileRelated
import pandas as pd
import experiment as experiment
import main_config
from pathlib import Path
def main():
"""intialize and run the model, for indepth detail about the config or how to run the code, go to the github page for this code"""
# you can control for multiple interventions by adding a case:
# [(modified attr1, newVal), (modified attr2, newVal), ...]
# simulation name --> simulation controlled variable(s)
# dont use . or - in the simulation name because the names are used to save images, or any symbols below
modelConfig = main_config.modelConfig
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"Infection" : [
("baseP" , 1.25),
("SeedNumber", 100),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
# this overrides the previous experiments, since base_p is being chnaged
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
def cross_scenarios(scenario1, scenario2):
experiments = {}
for keyname, experiment1 in scenario1.items():
for screenname, screen in scenario2.items():
experiment_name = screenname +"_" + keyname
experiments[experiment_name] = screen.copy()
for key, value in experiment1.items():
#print(key, value)
experiments[experiment_name][key] = value.copy()
return copy.deepcopy(experiments)
def print_nicely(experiment_scenarios):
for ex_name, ex_config in experiment_scenarios.items():
print("\n","*"*20,"\n", ex_name)
for ex_config_name, ex_config_list in ex_config.items():
print(ex_config_name, ":" ,ex_config_list)
#experiment2 = cross_scenarios(experiment.vaccine3, experiment.low_med)
#experiment3 =cross_scenarios(experiment.vaccine4, experiment.facemask3)
experiment1 = experiment.marginals
experiment2 = experiment.original_3x3
experiment3 = cross_scenarios(experiment.different_base_p_jump_025, experiment.medium_student_vary_policy)
experiment4 = cross_scenarios(experiment.medium_student_vary_policy, experiment.off_campus_multiplier)
experiment5 = experiment.diff_seed_number
experiment6 = experiment.facemask_param
#print(len(experiment3))
#print_nicely(experiment3)
basemodel = {"basemodel": {}}
multi_experiments = {
"request_1_marginal": experiment1,#
"request_2_3x3": experiment2,
"request_3_diff_base_p": experiment3,
"request_4_fixed_p_diff_offcampusP": experiment4,
"request_5_diff_seed_number": experiment5,
"request_6_facemask_param": experiment6,
}
print("here are the loaded experiments:")
for r_name, exp in multi_experiments.items():
r_name+=(" "*max(0, (40-len(r_name))))
print(f"{r_name} with {len(exp)} experiments")
#multi_experiments = {"new_request4": experiment.new_check}
user_input = input("which request # do you want to run? 0 to run all in one thread")
user_input = int(user_input)
sp_num = [123, 456, 12, 34, 56]
if (user_input < 0 or user_input > len(multi_experiments)) and user_input not in sp_num:
print("input number does not match experiment number, exiting program")
return
for sp_index, (request_name, modelConfigs) in enumerate(multi_experiments.items()):
if ((sp_index == user_input-1) or (user_input == 0) or (user_input==123 and sp_index < 3) or
(user_input==456 and sp_index >= 3) or (user_input==12 and sp_index < 2) or (user_input==34 and 4>sp_index>1)
or (user_input==56 and sp_index >= 4)):
print(sp_index)
R0Dict = dict()
InfectedCountDict = dict()
output_dir = fileRelated.fullPath(request_name, "outputs")
Path(output_dir).mkdir(parents=False, exist_ok=True)
output_folder = "outputs/"+ request_name
print(request_name)
for index, (modelName, modelControl) in enumerate(modelConfigs.items()):
print("finished", index)
configCopy = copy.deepcopy(modelConfig)
#print("*"*20)
#print(configCopy["Agents"].keys())
#print("*"*20)
#print(f"started working on initializing the simualtion for {modelName}")
for categoryKey, listOfControls in modelControl.items():
#print(listOfControls)
for (specificKey, specificValue) in listOfControls:
if specificKey not in configCopy[categoryKey].keys():
print("error", specificKey, specificValue, " was not assigned correctly")
#return
else:
configCopy[categoryKey][specificKey] = specificValue
R0Count, multiCounts = 100, 100
if index in [0, 1] and False:
R0Count = 200
#print(configCopy1
if index > -1:
#model_framework.simpleCheck(configCopy, days=10, visuals=True, debug=True, modelName=modelName)
InfectedCountDict[modelName] = model_framework.multiSimulation(multiCounts, configCopy, days=100, debug=False, modelName=modelName, outputDir=output_folder)
R0Dict[modelName] = model_framework.R0_simulation(configCopy, R0_controls,R0Count, debug=False, timeSeriesVisual=False, R0Visuals=True, modelName=modelName, outputDir=output_folder)
# the value of the dictionary is ([multiple R0 values], (descriptors, (tuple of useful data like mean and stdev))
print(InfectedCountDict.items())
print(R0Dict.items())
if True:
#for k in R0Dict.keys():
# R0Dict[k] = [list(R0Dict[k][0]) + [1 for _ in range(98)], R0Dict[k][1]]
# print(R0Dict)
simulationGeneration = "0"
saveName = "comparingModels_"+simulationGeneration
# reads R0 data
#fileRelated.mergeR0(R0Dict, fileRelated.fullPath("request_5/R0_data.csv", "outputs"))
print(R0Dict)
if R0Count > 0:
statfile.comparingBoxPlots(R0Dict, plottedData="R0", saveName=saveName, outputDir=output_folder)
if multiCounts >0:
statfile.comparingBoxPlots(InfectedCountDict ,plottedData="inf", saveName=saveName, outputDir=output_folder)
#for key, value in R0Dict.items():
# if isinstance(R0Dict[key][1], str):
# R0Dict[key] = value[0]
# # else do nothing
# #print(key, value)
#print(R0Dict)
# check if dict is not empty
merged = False
if merged:
for k, v in R0Dict.items():
print(k, len(v))
if isinstance(value[-1], str) or isinstance(value[-1], tuple):
R0Dict[k] = v[0]
sameshape = True
sizes = []
for k,v in R0Dict.items():
sizes.append(len(v[0]))
print("size is",sizes)
if len(set(sizes)) == 1:
R0_df =
|
pd.DataFrame(R0Dict)
|
pandas.DataFrame
|
from flask import Flask, request, jsonify, render_template
import pickle
import sklearn
import pandas as pd
import csv
import numpy as np
from AirBnbApp.predict import vectorize_data, pre
def create_app():
'''Create and configure an instance of the Flask application'''
app = Flask(__name__)
def get_df():
data_file = open('/app/AirBnbApp/airbnb.csv')
csv_file = csv.reader(data_file)
info = []
for row in csv_file:
info.append(row)
data =
|
pd.DataFrame(info)
|
pandas.DataFrame
|
# library doc string
'''
Author: goldin2008
Date: December, 2021
This module implements the main script for the customer churn project with clean code
'''
# import libraries
import os
import yaml
import shap
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# from sklearn.preprocessing import normalize
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import plot_roc_curve, classification_report
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
sns.set()
with open("config.yaml", 'r') as f:
config = yaml.safe_load(f)
def import_data(pth):
'''
returns dataframe for the csv found at pth
input:
pth: a path to the csv
output:
df: pandas dataframe
'''
df = pd.read_csv(pth)
return df
def perform_eda(df):
'''
perform eda on df and save figures to images folder
input:
df: pandas dataframe
output:
None
'''
# plot churn
df['Churn'] = df['Attrition_Flag'].apply(
lambda val: 0 if val == "Existing Customer" else 1)
plt.figure(figsize=(20, 10))
df['Churn'].hist()
plt.savefig('./images/eda/churn_distribution.png')
plt.close()
# plot customer age
plt.figure(figsize=(20, 10))
df['Customer_Age'].hist()
plt.savefig('./images/eda/customer_age_distribution.png')
plt.close()
# plot marital status
plt.figure(figsize=(20, 10))
df.Marital_Status.value_counts('normalize').plot(kind='bar')
plt.savefig('./images/eda/marital_status_distribution.png')
plt.close()
# plot total transaction ct
plt.figure(figsize=(20, 10))
sns.distplot(df['Total_Trans_Ct'])
plt.savefig('./images/eda/total_transaction_distribution.png')
plt.close()
# plot heatmap
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=False, cmap='Dark2_r', linewidths=2)
plt.savefig('./images/eda/heatmap.png')
plt.close()
def encoder_helper(df, category_lst, response):
'''
helper function to turn each categorical column into a new column with
propotion of churn for each category - associated with cell 15 from the notebook
input:
df: pandas dataframe
category_lst: list of columns that contain categorical features
response: string of response name [optional argument that could be used
for naming variables or index y column]
output:
df: pandas dataframe with new columns for
'''
for col in category_lst:
new_lst = []
group_obj = df.groupby(col).mean()[response]
for val in df[col]:
new_lst.append(group_obj.loc[val])
new_col_name = col + '_' + response
df[new_col_name] = new_lst
return df
def perform_feature_engineering(df, response):
'''
input:
df: pandas dataframe
response: string of response name [optional argument that could be
used for naming variables or index y column]
output:
X_train: X training data
X_test: X testing data
y_train: y training data
y_test: y testing data
'''
# encoded categorical features to digital numbers
cat_columns = config['data']['categorical_features']
y = df['Churn']
X =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
from getsig import getsig
from scipy.interpolate import interp1d
from scipy.signal import medfilt
import numpy as np
def create_dd_df(shotnr=30554, tBegin=1.0, tEnd=6.5, dt=0.1,
dne_data=['DNE','neDdel_2',17,'sfp',21],
ouput_file=None):
"""Returns a pandas dataframe"""
times = np.arange(tBegin, tEnd+dt, dt)
beta = getsig(shotnr, 'TOT', 'beta_N')
wmhd = getsig(shotnr, 'TOT', 'Wmhd')
taue = getsig(shotnr, 'TOT', 'tau_tot')
h98 = getsig(shotnr, 'TTH', 'H/L-facs')
h98y2_index = 7
nev_shotfile = 'DNE'
nev_signal = 'neDdel_2'
nev_channel = 17
nev_experiment = 'sfp'
medfilt_pts = 21
#Divertor Spectroscpy data
nev_shotfile = dne_data[0]
nev_signal = dne_data[1]
nev_channel = int(dne_data[2])
nev_experiment = dne_data[3]
medfilt_pts = int(dne_data[4])
dne = getsig(shotnr, nev_shotfile, nev_signal, exper=nev_experiment)
#Interpolations
try:
interp_beta = interp1d(beta.time, beta.data)
beta_df_entry = interp_beta(times)
except:
beta_df_entry = np.zeros_like(times)
try:
interp_wmhd = interp1d(wmhd.time, wmhd.data)
wmhd_df_entry = interp_wmhd(times)
except:
wmhd_df_entry = np.zeros_like(times)
try:
interp_taue = interp1d(taue.time, taue.data)
taue_df_entry = interp_taue(times)
except:
taue_df_entry = np.zeros_like(times)
try:
interp_h98 = interp1d(h98.time, h98.data[:,h98y2_index])
h98_df_entry = interp_h98(times)
except:
h98_df_entry = np.zeros_like(times)
interp_nev = interp1d(dne.time, medfilt(dne.data[:, nev_channel], medfilt_pts))
nev_df_entry = interp_nev(times)
dict_df = {'time':times, 'nev':nev_df_entry, 'h98y2':h98_df_entry, 'taue':taue_df_entry, 'beta':beta_df_entry, 'wmhd':wmhd_df_entry}
ddf =
|
pd.DataFrame(data=dict_df)
|
pandas.DataFrame
|
#change test
import pandas as pd
from pandas import DataFrame
'''attempt at class, test list and dataframe are inside
this creates a new dataframe and also locks the df and
list into the class, also creates a new dataframe
'''
test_list = [2,5,1,76,8,2,3,7,2]
class Lsc():
def __init__(self, df, listt):
self.df = df
self.listt = listt
series1 = pd.Series(listt)
df['newcol'] = series1
if __name__ == "__main__":
df =
|
DataFrame({"col":["T","A","F","H","E","B"]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import openpyxl as openpyxl
import pandas
import pandas as pd
import tushare as ts
import numpy as np
from datetime import datetime, timedelta
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import mplfinance as mpf
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QMessageBox
from dateutil.relativedelta import relativedelta
from mpl_finance import candlestick_ohlc, candlestick2_ohlc
import numpy as np
import decimal
import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QDialog, QApplication
from primodial import Ui_Dialog
from numpy import long
# author : ye
mode = 0
fig, ax = plt.subplots()
datadir = './data/'
strategydir = './strategy/'
financialdir = './financialdata/'
x, y, lastday, xminnow, xmaxnow = 1, 1, 0, 0, 0
# 云层细代表震荡,越来越细改变的趋势也越大,要看有没有最高点
# to avoid data collection, change return value to suffix of the file in 'data' dictionary -> enter offline mode!
def endDate():
return time.strftime('%Y%m%d')
# return '20210818'
# 1:excel 0:tushare
def getDataByTscode(ts_code, mode):
if mode == 1:
filedir = os.path.join(datadir, nameStrategy(ts_code))
byexcel = pd.read_excel(filedir)
byexcel.index = byexcel['Unnamed: 0']
byexcel = byexcel.drop(columns=['Unnamed: 0'])
return byexcel
if mode == 0:
ts.set_token('<KEY>')
pro = ts.pro_api()
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
df = pro.daily(ts_code=ts_code, start_date=t2, end_date=t1)
df = df.iloc[::-1]
return df
def nameStrategy(code):
return code + '-' + endDate() + '.xlsx'
def vision(data, ts_name):
ichimoku = Ichimoku(data)
ichimoku.run()
ichimoku.plot(ts_name)
def call_back(event):
axtemp = event.inaxes
x_min, x_max = axtemp.get_xlim()
fanwei = (x_max - x_min) / 10
if event.button == 'up':
axtemp.set(xlim=(x_min + fanwei, x_max - fanwei))
elif event.button == 'down':
axtemp.set(xlim=(x_min - fanwei, x_max + fanwei))
fig.canvas.draw_idle()
def button_press_callback(click):
global x
global y
x = click.xdata
y = click.ydata
point = (click.xdata, click.ydata)
print(point)
def motion_notify_callback(event):
global x, xminnow, xmaxnow
if event.button != 1: return
xnow = event.xdata
print(x)
delta = x - xnow
plt.xlim(xmin=xminnow + delta, xmax=xmaxnow + delta)
xminnow = xminnow + delta
xmaxnow = xmaxnow + delta
x = xnow
point = (event.xdata, event.ydata, xminnow, xmaxnow)
print(point)
fig.canvas.draw_idle()
class Ichimoku():
"""
@param: ohcl_df <DataFrame>
Required columns of ohcl_df are:
Date<Float>,Open<Float>,High<Float>,Close<Float>,Low<Float>
"""
def __init__(self, ohcl_df):
self.ohcl_df = ohcl_df
ohcl_df['trade_date'] = pandas.to_datetime(ohcl_df['trade_date'].astype(str))
def run(self):
tenkan_window = 9
kijun_window = 26
senkou_span_b_window = 52
cloud_displacement = 26
chikou_shift = -26
ohcl_df = self.ohcl_df
# Dates are floats in mdates like 736740.0
# the period is the difference of last two dates
last_date = ohcl_df["trade_date"].iloc[-1].date()
period = 1
# Add rows for N periods shift (cloud_displacement)
ext_beginning = last_date + timedelta(days=1)
ext_end = last_date + timedelta(days=((period * cloud_displacement) + period))
dates_ext = pd.date_range(start=ext_beginning, end=ext_end)
dates_ext_df = pd.DataFrame({"trade_date": dates_ext})
dates_ext_df.index = dates_ext # also update the df index
ohcl_df = ohcl_df.append(dates_ext_df)
# Tenkan
tenkan_sen_high = ohcl_df["high"].rolling(window=tenkan_window).max()
tenkan_sen_low = ohcl_df["low"].rolling(window=tenkan_window).min()
ohcl_df['tenkan_sen'] = (tenkan_sen_high + tenkan_sen_low) / 2
# Kijun
kijun_sen_high = ohcl_df["high"].rolling(window=kijun_window).max()
kijun_sen_low = ohcl_df["low"].rolling(window=kijun_window).min()
ohcl_df['kijun_sen'] = (kijun_sen_high + kijun_sen_low) / 2
# Senkou Span A
ohcl_df['senkou_span_a'] = ((ohcl_df['tenkan_sen'] + ohcl_df['kijun_sen']) / 2).shift(cloud_displacement)
# Senkou Span B
senkou_span_b_high = ohcl_df["high"].rolling(window=senkou_span_b_window).max()
senkou_span_b_low = ohcl_df["low"].rolling(window=senkou_span_b_window).min()
ohcl_df['senkou_span_b'] = ((senkou_span_b_high + senkou_span_b_low) / 2).shift(cloud_displacement)
# Chikou
ohcl_df['chikou_span'] = ohcl_df["close"].shift(chikou_shift)
self.ohcl_df = ohcl_df
ohcl_df['trade_date'] = mdates.date2num(ohcl_df['trade_date'])
ohcl_df.index = ohcl_df['trade_date']
ohcl_df['MA10'] = ohcl_df['close'].rolling(10).mean()
ohcl_df['MA5'] = ohcl_df['close'].rolling(5).mean()
ohcl_df['MA20'] = ohcl_df['close'].rolling(20).mean()
return ohcl_df
def plot(self, ts_name):
global xminnow, xmaxnow
fig.canvas.mpl_connect('scroll_event', call_back)
fig.canvas.mpl_connect('button_press_event', button_press_callback)
fig.canvas.mpl_connect('motion_notify_event', motion_notify_callback)
self.plot_candlesticks(fig, ax)
self.plot_ichimoku(fig, ax)
self.pretty_plot(fig, ax, ts_name)
plt.xlim(xmin=lastday - 200, xmax=lastday)
xminnow = lastday - 200
xmaxnow = lastday
plt.xlim(xmin=xminnow + 80, xmax=xmaxnow + 80)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pretty_plot(self, fig, ax, ts_name):
ax.legend()
fig.autofmt_xdate()
ax.set_xticks(range(len(self.ohcl_df['trade_date'])))
d = mdates.num2date(self.ohcl_df['trade_date'])
for i in range(0, len(d)):
d[i] = datetime.strftime(d[i], '%m-%d')
ax.set_xticklabels(d)
ax.xaxis_date()
ax.xaxis.set_major_locator(ticker.MultipleLocator(4))
# Chart info
title = ts_name
bgcolor = '#f0f0f0'
grid_color = '#363c4e'
spines_color = '#0f0f0f'
# Axes
plt.title(title, color='black', fontproperties="SimHei")
ax.set_facecolor(bgcolor)
ax.grid(linestyle='-', linewidth='0.5', color=grid_color, alpha=0.4)
ax.yaxis.tick_right()
ax.set_yscale("linear")
fig.patch.set_facecolor(bgcolor)
fig.patch.set_edgecolor(bgcolor)
plt.rcParams['figure.facecolor'] = bgcolor
plt.rcParams['savefig.facecolor'] = bgcolor
ax.spines['bottom'].set_color(spines_color)
ax.spines['top'].set_color(spines_color)
ax.spines['right'].set_color(spines_color)
ax.spines['left'].set_color(spines_color)
ax.tick_params(axis='x', colors=spines_color, size=7)
ax.tick_params(axis='y', colors=spines_color, size=7)
fig.tight_layout()
ax.autoscale_view()
def plot_ichimoku(self, fig, ax, view_limit=100):
d2 = self.ohcl_df.loc[:,
['tenkan_sen', 'kijun_sen', 'senkou_span_a', 'senkou_span_b', 'chikou_span', 'MA5', 'MA10', 'MA20']]
# d2 = d2.tail(view_limit)
date_axis = range(0, len(d2))
# ichimoku
plt.plot(date_axis, d2['tenkan_sen'], label="tenkan", color='#004200', alpha=1, linewidth=2)
plt.plot(date_axis, d2['kijun_sen'], label="kijun", color="#721d1d", alpha=1, linewidth=2)
plt.plot(date_axis, d2['senkou_span_a'], label="span a", color="#008000", alpha=0.65, linewidth=0.5)
plt.plot(date_axis, d2['senkou_span_b'], label="span b", color="#ff0000", alpha=0.65, linewidth=0.5)
plt.plot(date_axis, d2['chikou_span'], label="chikou", color="black", alpha=0.65, linewidth=0.5)
plt.plot(date_axis, d2['MA5'], label="MA5", color="green", alpha=0.8, linewidth=0.6)
plt.plot(date_axis, d2['MA10'], label="MA10", color="blue", alpha=0.8, linewidth=1.2)
plt.plot(date_axis, d2['MA20'], label="MA20", color="yellow", alpha=0.8, linewidth=0.6)
# green cloud
ax.fill_between(date_axis, d2['senkou_span_a'], d2['senkou_span_b'],
where=d2['senkou_span_a'] > d2['senkou_span_b'], facecolor='#008000',
alpha=0.25)
# red cloud
ax.fill_between(date_axis, d2['senkou_span_a'], d2['senkou_span_b'],
where=d2['senkou_span_b'] > d2['senkou_span_a'], facecolor='#ff0000',
alpha=0.25)
def plot_candlesticks(self, fig, ax, view_limit=10):
# plot candlesticks
candlesticks_df = self.ohcl_df.loc[:, ['trade_date', "open", "high", "low", "close"]]
# candlesticks_df = candlesticks_df.tail(view_limit)
# plot candlesticks
# candlesticks_df['trade_date'] = mdates.date2num(candlesticks_df['trade_date'])
# candlestick_ohlc(ax, candlesticks_df.values, width=0.5, colorup='#83b987', colordown='#eb4d5c', alpha=0.5)
candlestick2_ohlc(ax, candlesticks_df['open'], candlesticks_df['high'], candlesticks_df['low'],
candlesticks_df['close'], width=0.6, colorup='#83b987', colordown='#eb4d5c', alpha=1)
# mpf.plot(candlesticks_df, width=0.6, colorup='#83b987', colordown='#eb4d5c', alpha=0.5)
# Range generator for decimals
def drange(self, x, y, jump):
while x < y:
yield float(x)
x += decimal.Decimal(jump)
def ashareslist(excel):
lsh = pd.read_excel(excel, sheet_name='上证', header=None, dtype=str)
ashares = pd.DataFrame()
ashares = ashares.append(pd.read_excel(excel, sheet_name='深证', header=None, dtype=str)).append(
pd.read_excel(excel, sheet_name='创业板', header=None, dtype=str))
ashares[1] = ashares[1] + '.SZ'
lsh[1] = lsh[1] + '.SH'
ashares = ashares.append(lsh)
return ashares
class DialogDemo(QDialog, Ui_Dialog):
def __init__(self, shares, strategy, parent=None):
self.ashares = shares
self.allshares = shares
self.strategy = strategy
super(DialogDemo, self).__init__(parent)
self.setupUi(self)
def ichimoku_push(self):
sharesId = self.share.split(' ')[0]
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
self.ichimokuplot(sharesId, self.share, t2, t1)
def strategy_push(self):
stext = self.comboBox.currentText()
self.listWidget.clear()
if stext == 'All stocks':
self.listWidget.addItems(self.ashares[1] + ' ' + self.ashares[0])
self.ashares = self.allshares
if stext == 'ichimoku strategy':
df = self.strategy.strategy1()
self.listWidget.addItems(df[0])
df[1] = df[0].apply(lambda x: x.split(' ')[1])
df[0] = df[0].apply(lambda x: x.split(' ')[0])
self.ashares = df
if stext == 'trend tracking strategy':
df = self.strategy.strategy2()
self.listWidget.addItems(df[0])
df[1] = df[0].apply(lambda x: x.split(' ')[1])
df[0] = df[0].apply(lambda x: x.split(' ')[0])
self.ashares = df
def list_click(self, item):
self.share = item.text()
sharesId = self.share.split(' ')[0]
filelist = os.listdir(financialdir)
txtname = sharesId + '-' + endDate() + '.txt'
if filelist.__contains__(txtname):
with open(financialdir + txtname, 'r') as f:
text = f.read()
else:
text = getfinancialdata(sharesId)
with open(financialdir + txtname, 'w') as f:
for i in filelist:
if i.__contains__(sharesId):
os.remove(os.path.join(financialdir, i))
f.write(text)
self.textBrowser.setText(text)
print(self.share)
def double_click(self):
sharesId = self.share.split(' ')[0]
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
self.ichimokuplot(sharesId, self.share, t2, t1)
def text_edit_search(self):
text = self.shareSearch.text()
ls = self.ashares
ls = ls[ls[0].str.contains(text) | ls[1].str.contains(text)]
self.listWidget.clear()
self.listWidget.addItems(ls[1] + ' ' + ls[0])
def reset_push(self):
filelist = os.listdir(datadir)
for i in filelist: os.remove(os.path.join(datadir, i))
filelist = os.listdir(strategydir)
for i in filelist: os.remove(os.path.join(strategydir, i))
filelist = os.listdir(financialdir)
for i in filelist: os.remove(os.path.join(financialdir, i))
self.ashares = getshares()
self.allshares = self.ashares
self.ashares.reset_index(drop=True, inplace=True)
self.strategy = Strategy(self.ashares)
self.strategy.strategy1()
self.strategy.strategy2()
def createDialog(self):
app = QApplication(sys.argv)
# 创建对话框
# 显示对话框
self.listWidget.addItems(self.ashares[1] + ' ' + self.ashares[0])
# binding
self.listWidget.itemClicked.connect(self.list_click)
self.pushButton_2.clicked.connect(self.ichimoku_push)
self.pushButton_3.clicked.connect(self.reset_push)
self.shareSearch.textChanged.connect(self.text_edit_search)
self.listWidget.doubleClicked.connect(self.double_click)
self.listWidget.doubleClicked.connect(self.double_click)
self.pushButton.clicked.connect(self.strategy_push)
self.comboBox.addItem('All stocks')
self.comboBox.addItem('ichimoku strategy')
self.comboBox.addItem('trend tracking strategy')
self.show()
sys.exit(app.exec_())
def ichimokuplot(self, ts_code, ts_name, start_date, end_date):
global lastday
global fig, ax
plt.close(fig)
fig, ax = plt.subplots()
df = getDataByTscode(ts_code, mode)
lastday = len(df)
vision(df, ts_name)
class Strategy:
def __init__(self, shares):
self.sl = shares
filelist = os.listdir(datadir)
if (filelist.__contains__(nameStrategy(self.sl[1][0]))) & (len(filelist) == len(shares)): return
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
ts.set_token('<KEY>')
pro = ts.pro_api()
if not filelist.__contains__(nameStrategy(self.sl[1][0])):
for i in filelist: os.remove(os.path.join(datadir, i))
self.getdailyData(filelist, pro, t2, t1)
def getdailyData(self, filelist, pro, t2, t1):
for tmp in self.sl.iterrows():
print(tmp[1][1])
if filelist.__contains__(nameStrategy(tmp[1][1])): continue
try :
df = pro.daily(ts_code=tmp[1][1], start_date=t2, end_date=t1)
except :
print(tmp[1][1] + '出错')
continue
df = df.iloc[::-1]
df.to_excel(datadir + nameStrategy(tmp[1][1]))
filelist = os.listdir(datadir)
if not len(filelist) == len(self.sl): self.getdailyData(filelist, pro, t2, t1)
# ichimoku strategy -> seeking Low priced stocks with potential
def strategy1(self):
filelist = os.listdir(strategydir)
if filelist.__contains__(nameStrategy('strategy1')):
return pd.read_excel(strategydir + nameStrategy('strategy1'), index_col=0)
sl = self.sl
res = []
for s in sl[1]:
data = getDataByTscode(s, 1)
print(s)
if len(data) == 0: continue
ichimoku = Ichimoku(data)
i = ichimoku.run()
if len(i[(i['chikou_span'].isna()) & (~i['open'].isna())]) == 0: continue
ldd = i[(i['chikou_span'].isna()) & (~i['open'].isna())].iloc[-1] # lastdaydata
#
if ldd['tenkan_sen'] < ldd['kijun_sen']: continue
#
smin = min(ldd['senkou_span_a'], ldd['senkou_span_b'])
if ldd['high'] < smin: continue
#
if len(i[~i['chikou_span'].isna()]) == 0: continue
ckd = i[~i['chikou_span'].isna()].iloc[-1] # chikouData
if ckd['chikou_span'] < ckd['high']: continue
#
res.append(s + " " + sl[sl[1] == s][0].iloc[0])
df = pd.DataFrame(res)
filelist = os.listdir(strategydir)
for i in filelist:
if i.__contains__('strategy1'): os.remove(os.path.join(strategydir, i))
df.to_excel(strategydir + nameStrategy('strategy1'))
return df
# average strategy
def strategy2(self):
filelist = os.listdir(strategydir)
if filelist.__contains__(nameStrategy('strategy2')):
return pd.read_excel(strategydir + nameStrategy('strategy2'), index_col=0)
sl = self.sl
res = []
for s in sl[1]:
data = getDataByTscode(s, 1)
print(s)
if len(data) == 0: continue
# average
data['MA10'] = data['close'].rolling(10).mean()
data['MA100'] = data['close'].rolling(100).mean()
data['MA10diff'] = data['MA10'].diff()
# volatility
data['std10'] = data['close'].rolling(10).std(ddof=0).rolling(10).mean()
if len(data) <= 30: continue
x = -21
MAdata = data[x:-1]
xx = -x - 2
#
data60 = data[-61:-1]
data180 = data[-181:-1]
data250 = data[-251:-1]
min60 = data60['low'].min()
max60 = data60['high'].max()
min180 = data180['low'].min()
max180 = data180['high'].max()
min250 = data250['low'].min()
max250 = data250['high'].max()
if (min60 * 1.3 > max60) | (min180 * 1.6 > max180) | (min250 * 2 > max250): continue
if (MAdata.iloc[xx]['std10'] < MAdata.iloc[xx - 1]['std10']) | (
MAdata.iloc[xx]['MA10'] < MAdata.iloc[xx - 1]['MA100']): continue
if (MAdata.iloc[xx]['MA10diff'] < 0) | (MAdata.iloc[xx - 1]['MA10diff'] < 0): continue
if (MAdata.iloc[xx]['high'] < MAdata.iloc[xx]['MA10']) | (
MAdata.iloc[xx - 1]['high'] < MAdata.iloc[xx - 1]['MA10']): continue
MAdata['negativebias'] = MAdata['low'] - MAdata['MA10']
if MAdata['negativebias'].min() > 0: continue
if MAdata[MAdata.MA10 == MAdata['MA10'].min()].index[0] != \
MAdata[MAdata.negativebias == MAdata['negativebias'].min()].index[0]: continue
res.append(s + " " + sl[sl[1] == s][0].iloc[0])
df =
|
pd.DataFrame(res)
|
pandas.DataFrame
|
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr =
|
SparseArray([1., np.nan, 2.], fill_value=np.nan)
|
pandas.core.sparse.api.SparseArray
|
import numpy as np
import pandas as pd
class FiniteDifferenceMethod1D:
def __init__(self, time, surface, zone_air):
# Explicit solver
# General properties
self.time = time
self.surface = surface
self.zone_air = zone_air
# Stored variables
self.temperature_array = None
self.time_array = None
# Number of internal points
self.Nx = len(self.surface.layers)
# Calculate Spatial Step-Size
self.thickness = self.surface.thickness
self.dx = self.thickness/self.Nx
kx = self.dx/2
# Create grid-points on x axis
self.x = np.linspace(0,1,self.Nx+2)
self.x = self.x[1:-1]
# FD matrix
self.A = None
self.b = None
self.u = None
# Initial methods
self.update_calculated_values()
def update_calculated_values(self):
self.setup_finite_difference_matrix()
self.initialize_arrays()
def setup_finite_difference_matrix(self):
R = np.array(self.surface.thermal_resistance_array)
C = np.array(self.surface.thermal_capacitance_array)
dt = self.time.time_step
Nx = self.Nx
A1 = dt/(R[:-1]*C)
A2 = 1-(dt/C)*(1/R[1:]+1/R[:-1])
A3 = dt/(R[1:]*C)
A = np.zeros((Nx,Nx+2))
A[:,:-2] += np.diag(A1)
A[:,1:-1] += np.diag(A2)
A[:,2:] += np.diag(A3)
self.A = A
def initialize_arrays(self):
#self.temperature_array = np.zeros((len(self.time.time_range),len(self.x)))
#self.time_array = np.zeros(len(self.time.time_range))
df = pd.DataFrame()
for x in self.x:
df[str(x)] = np.ones(self.time.length)*self.zone_air.initial_zone_air_temperature
#df.index = self.time.time_range
self.temperature_array = df
self.time_array =
|
pd.Series(self.time.time_range)
|
pandas.Series
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: <EMAIL>
@file: processing.py
@time: 8/23/2017 13:57
@desc:
"""
import argparse
import datetime
import os
import subprocess
import sys
import time
import pandas as pd
from bb_parse import BBParse
from cluster import Cluster
from parallelprocessing import get_cluster_data_by_time
def env_check():
"""
Checking the running environment
:return:
"""
py_version = sys.version_info
if py_version[:2] >= (2, 7):
print("---- You currently have Python " + sys.version)
else:
print("---- Error, You need python 2.7.x+ and currently you have " + sys.version + 'exiting now...')
exit(-1)
try:
import numpy, pandas
except ImportError:
print('---- missing dependency: numpy or pandas, please install first')
exit(-1)
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem '
'importing'.format(ex=str(ex)))
print('---- You have all required dependencies, starting to process')
def save_avg_result(*option):
"""
Save results to file
:param option: optional inputs can be: save_avg_result(pat_path) or save_avg_result(pat_path, bb_log_path) or
save_avg_result(pat_path, bb_log_path, BB_Phase)
:return: None
"""
if len(option) == 1: # only pat_path is assigned
result_file = option[0] + os.sep + 'results.log'
attrib_avg = Cluster(option[0]).get_cluster_data_by_time([0], [0], False)
with open(result_file, 'w') as f:
f.write('*' * 110 + '\n')
f.write('All nodes average utilization\n')
f.write('*' * 110 + '\n')
for key in attrib_avg.keys():
f.write('All nodes average {0} utilization: \n {1} \n'
.format(key, attrib_avg.get(key).to_string(index=False)))
f.write('.' * 75 + '\n')
print('Results have been saved to: {0}'.format(result_file))
return
elif len(option) == 2: # pat_path and bb_log are assigned
result_file = option[0] + os.sep + 'results.log'
phase_name = ('BENCHMARK', 'LOAD_TEST', 'POWER_TEST', 'THROUGHPUT_TEST_1',
'VALIDATE_POWER_TEST', 'VALIDATE_THROUGHPUT_TEST_1')
with open(result_file, 'w') as f:
for phase in phase_name[0:4]:
start_stamp, end_stamp = BBParse(option[1]).get_stamp_by_phase(phase)
start_time = datetime.fromtimestamp(start_stamp).strftime('%Y-%m-%d %H:%M:%S')
end_time = datetime.fromtimestamp(end_stamp).strftime('%Y-%m-%d %H:%M:%S')
attrib_avg = Cluster(option[0]).get_cluster_avg(start_stamp, end_stamp)
f.write('*' * 110 + '\n')
f.write('All nodes average utilization for phase {0} between {1} and {2}:\n'
.format(phase, start_time, end_time))
f.write('*' * 110 + '\n')
for key in attrib_avg.keys():
f.write('All nodes average {0} utilization: \n {1} \n'
.format(key, attrib_avg.get(key).to_string(index=False)))
f.write('.' * 75 + '\n')
print('Results have been saved to: {0}'.format(result_file))
return
elif len(option) == 3: # pat_path, bb_log and phase_name are assigned
result_file = option[0] + os.sep + 'results.log'
with open(result_file, 'w') as f:
start_stamp, end_stamp = BBParse(option[1]).get_stamp_by_phase(option[2])
start_time = datetime.fromtimestamp(start_stamp).strftime('%Y-%m-%d %H:%M:%S')
end_time = datetime.fromtimestamp(end_stamp).strftime('%Y-%m-%d %H:%M:%S')
attrib_avg = Cluster(option[0]).get_cluster_avg(start_stamp, end_stamp)
f.write('*' * 110 + '\n')
f.write('All nodes average utilization for phase {0} between {1} and {2}:\n'
.format(option[2], start_time, end_time))
f.write('*' * 110 + '\n')
for key in attrib_avg.keys():
f.write('All nodes average {0} utilization: \n {1} \n'
.format(key, attrib_avg.get(key).to_string(index=False)))
f.write('.' * 75 + '\n')
print('Results have been saved to: {0}'.format(result_file))
return
else:
print('Usage: save_avg_result(pat_path) or save_avg_result(pat_path, bb_log_path) or ' \
'save_avg_result(pat_path, bb_log_path, BB_Phase)\n')
exit(-1)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
"""
Parse command line inputs
:return:
"""
parse = argparse.ArgumentParser(description='Processing PAT data')
group = parse.add_mutually_exclusive_group()
parse.add_argument('-p', '--pat', type=str, help='PAT file path', required=True)
parse.add_argument('-l', '--log', type=str, help='TPCx-BB log path', required=False)
group.add_argument('-ph', '--phase', type=str, help='TPCx-BB phase', required=False, nargs='+', default='BENCHMARK')
group.add_argument('-q', '--query', type=int, help='TPCx-BB query num', required=False, nargs='+')
group.add_argument('-n', '--streamNumber', type=int, help='TPCx-BB stream number', required=False, nargs='+')
parse.add_argument('-s', '--save', type=str2bool, help='whether to save raw data', required=False, default=False)
args = parse.parse_args()
pat_path = args.pat
log_path = args.log
phase = args.phase
stream = args.streamNumber
query = args.query
save_raw = args.save
return pat_path, log_path, phase, stream, query, save_raw
def run():
env_check()
pat_path, log_path, phase, stream, query, save_raw = get_args()
if os.path.exists(pat_path):
if not log_path: # only pat_path is assigned
# print 'only pat_path is assigned, calculating BENCHMARK average utilization...\n'
is_log_exist(pat_path)
cluster_avg = get_cluster_data_by_time(pat_path, [0], [0], save_raw)
tag = [0]
print_pat_result(cluster_avg, tag)
result_path = pat_path + os.sep + 'pat_avg.log'
save_pat_result(cluster_avg, tag, result_path)
else: # pat_path and log_path are assigned
if os.path.exists(log_path):
bb_parse = BBParse(log_path)
print('Parsing TPCx-BB log files...\n')
bb_parse.get_elapsed_time()
phase_ts = bb_parse.get_exist_phase_timestamp()
print_bb_result(phase_ts)
result_path = log_path + os.sep + 'bb_results.log'
save_bb_result(phase_ts, result_path)
print('Started to process PAT files...\n')
else:
print('TPCx-BB log file path: {0} does not exist, exiting...'.format(log_path))
exit(-1)
start_stamps = []
end_stamps = []
if (not query) & (not stream) & (phase == 'BENCHMARK'): # if -ph -q and -n not assigned
for key, value in phase_ts.items():
start_stamps.extend((value['epochStartTimestamp'] / 1000).tolist())
end_stamps.extend((value['epochEndTimestamp'] / 1000).tolist())
assert len(start_stamps) == len(end_stamps)
# cluster_avg = Cluster(pat_path).get_cluster_data_by_time(start_stamps, end_stamps, save_raw)
cluster_avg = get_cluster_data_by_time(pat_path, start_stamps, end_stamps, save_raw)
bb_result = pd.concat(phase_ts.values(), axis=0).reset_index(drop=True)
pat_result = pd.concat(cluster_avg.values(), axis=1)
avg_result = pd.concat([bb_result, pat_result], axis=1)
result_path = pat_path + os.sep + 'bb_results.log'
avg_result.to_csv(result_path, sep=',')
tag = []
for key in phase_ts.keys():
tag.append(key)
if key == 'POWER_TEST':
tag.extend(['q' + str(i) for i in phase_ts[key].iloc[1:, 2]])
elif key == 'THROUGHPUT_TEST_1':
tag.extend(['stream' + str(i) for i in phase_ts[key].iloc[1:, 1]])
print_pat_result(cluster_avg, tag)
result_path = pat_path + os.sep + 'pat_avg_all.log'
save_pat_result(cluster_avg, tag, result_path)
elif (not query) & (not stream) & (set(phase).issubset(phase_ts.keys())): # for BB phase
for ph in phase:
start_stamps.append(int(phase_ts[ph].iloc[0, 3] / 1000))
end_stamps.append(int(phase_ts[ph].iloc[0, 4] / 1000))
assert len(start_stamps) == len(end_stamps)
# cluster_avg = Cluster(pat_path).get_cluster_data_by_time(start_stamps, end_stamps, save_raw)
cluster_avg = get_cluster_data_by_time(pat_path, start_stamps, end_stamps, save_raw)
print_pat_result(cluster_avg, phase)
result_path = pat_path + os.sep + 'pat_avg.txt'
save_pat_result(cluster_avg, phase, result_path)
elif not query: # for throughput streams
num_streams = phase_ts['THROUGHPUT_TEST_1'].shape[0] - 1 # num of throughput steams from the log
if any(s >= num_streams for s in stream): # check if input streamNumber is right
print('Number of throughput steams is {0}, so input streamNumber should not be ' \
'greater than {1}, exiting...'.format(num_streams, num_streams - 1))
exit(-1)
stream = [i + 1 for i in stream] # index 1 corresponding to stream 0
start_stamps = map(int, (phase_ts['THROUGHPUT_TEST_1'].iloc[stream, 3] / 1000).tolist())
end_stamps = map(int, (phase_ts['THROUGHPUT_TEST_1'].iloc[stream, 4] / 1000).tolist())
assert len(start_stamps) == len(end_stamps)
# cluster_avg = Cluster(pat_path).get_cluster_data_by_time(start_stamps, end_stamps, save_raw)
cluster_avg = get_cluster_data_by_time(pat_path, start_stamps, end_stamps, save_raw)
tag = ['stream' + str(s - 1) for s in stream] # stream begin from 0
print_pat_result(cluster_avg, tag)
result_path = pat_path + os.sep + 'pat_avg.txt'
save_pat_result(cluster_avg, tag, result_path)
elif not stream: # for query
exist_queries = phase_ts['POWER_TEST'].iloc[:, 2].tolist()
if not set(query).issubset(set(exist_queries)): # check if input queries existing in the log
print('Input query may not exist in the log, existing queries are: {0}, ' \
'exiting...'.format(exist_queries[1:]))
exit(-1)
start_stamps = map(int, (phase_ts['POWER_TEST'].iloc[query, 3] / 1000).tolist())
end_stamps = map(int, (phase_ts['POWER_TEST'].iloc[query, 4] / 1000).tolist())
assert len(start_stamps) == len(end_stamps)
# cluster_avg = Cluster(pat_path).get_cluster_data_by_time(start_stamps, end_stamps, save_raw)
cluster_avg = get_cluster_data_by_time(pat_path, start_stamps, end_stamps, save_raw)
tag = ['q' + str(q) for q in query]
print_pat_result(cluster_avg, tag)
result_path = pat_path + os.sep + 'pat_avg.txt'
save_pat_result(cluster_avg, tag, result_path)
else:
print('The input arguments is not supported, exiting...')
exit(-1)
else:
print('PAT file path: {0} does not exist, exiting...'.format(pat_path))
exit(-1)
def save_pat_result(cluster_avg, tag, result_path):
"""
Save PAT result to file
:param cluster_avg: cluster_avg: dict that contains node avg attribute, e.g. CPU, Disk, Mem, Network
:param tag: tags for the output index, can be stream number: stream# or query number: q#
:param result_path: result save path
:return:
"""
with open(result_path, 'w') as f:
for key, value in cluster_avg.items():
value = value.set_index([tag])
f.write('*' * 100 + '\n')
f.write('Average {0} utilization: \n {1} \n'.format(key, value.to_string()))
f.write('*' * 100 + '\n')
print('PAT results have been saved to {0} \n'.format(result_path))
print('*' * 100 + '\n')
def print_pat_result(cluster_avg, tag):
"""
print avg PAT result
:param cluster_avg: dict that contains node avg attribute, e.g. CPU, Disk, Mem, Network
:param tag: tags for the output index, can be stream number: stream# or query number: q#
:return: None
"""
for key, value in cluster_avg.items():
value = value.set_index([tag])
print('*' * 100)
print('Average {0} utilization: \n {1} \n'.format(key, value.to_string())),
print('*' * 100 + '\n')
def print_bb_result(phase_ts):
"""
Print BigBench result
:param phase_ts: dict that keys are BigBench phase and values are elapsed time
:return: None
"""
df = pd.DataFrame(index=phase_ts.keys(), columns=('EpochStartTime', 'EpochEndTime', 'ElapsedTime'))
for key, value in phase_ts.items():
start = (value['epochStartTimestamp'][0]) / 1000
end = (value['epochEndTimestamp'][0]) / 1000
during = datetime.timedelta(seconds=int(end - start))
start =
|
pd.to_datetime(start, unit='s')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 16:23:54 2020
@author: huangyuyao
"""
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import MaxAbsScaler
from torch.utils.data import DataLoader
import os
import numpy as np
import pandas as pd
import scipy
from glob import glob
from scipy.io import mmread
from sklearn.preprocessing import LabelEncoder
import time
from torchvision import transforms, datasets
from torch import nn, optim
from torch.nn import init
from tqdm import trange
class SingleCellDataset(Dataset):
def __init__(self, path,
low = 0,
high = 0.9,
min_peaks = 0,
transpose = False,
transforms=[]):
self.data, self.peaks, self.barcode = load_data(path, transpose)
if min_peaks > 0:
self.filter_cell(min_peaks)
self.filter_peak(low, high)
for transform in transforms:
self.data = transform(self.data)
self.n_cells, self.n_peaks = self.data.shape
self.shape = self.data.shape
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
data = self.data[index];
if type(data) is not np.ndarray:
data = data.toarray().squeeze()
return data
def info(self):
print("Dataset Info")
print('Cell number: {}\nPeak number: {}'.format(self.n_cells, self.n_peaks))
def filter_peak(self, low=0, high=0.9):
total_cells = self.data.shape[0]
count = np.array((self.data >0).sum(0)).squeeze()
indices = np.where((count > low*total_cells) & (count < high*total_cells))[0]
self.data = self.data[:, indices]
self.peaks = self.peaks[indices]
print('filterpeak------')
def filter_cell(self, min_peaks=0):
if min_peaks < 1:
min_peaks = len(self.peaks)*min_peaks
indices = np.where(np.sum(self.data>0, 1)>=min_peaks)[0]
self.data = self.data[indices]
self.barcode = self.barcode[indices]
p = type(self.barcode)
print('filtercell------')
print(p)
def write_data(self,path):
print('tmp dataset saving')
data_ = self.data
data1 = data_.todense()
data =data1.T
#print(type(data))
recon_x = pd.DataFrame(data, index=self.peaks, columns=self.barcode)
recon_x.to_csv(os.path.join(path, 'tmp_data.txt'), sep='\t')
def load_data(path, transpose=False):
print("Loading data ...")
t0 = time.time()
if os.path.isdir(path):
count, peaks, barcode = read_mtx(path)
elif os.path.isfile(path):
count, peaks, barcode = read_csv(path)
else:
raise ValueError("File {} not exists".format(path))
if transpose:
count = count.transpose()
print('Original data contains {} cells x {} peaks'.format(*count.shape))
assert (len(barcode), len(peaks)) == count.shape
print("Finished loading takes {:.2f} min".format((time.time()-t0)/60))
return count, peaks, barcode
def read_mtx(path):
for filename in glob(path+'/*'):
basename = os.path.basename(filename)
if (('count' in basename) or ('matrix' in basename)) and ('mtx' in basename):
count = mmread(filename).T.tocsr().astype('float32')
elif 'barcode' in basename:
barcode = pd.read_csv(filename, sep='\t', header=None)[0].values
elif 'gene' in basename or 'peak' in basename:
feature = pd.read_csv(filename, sep='\t', header=None).iloc[:, -1].values
return count, feature, barcode
def read_csv(path):
if ('.txt' in path) or ('tsv' in path):
sep = '\t'
elif '.csv' in path:
sep = ','
else:
raise ValueError("File {} not in format txt or csv".format(path))
data = pd.read_csv(path, sep=sep, index_col=0).T.astype('float32')
genes = data.columns.values
barcode = data.index.values
counts = scipy.sparse.csr_matrix(data.values)
return counts, genes, barcode
# model
def build_mlp(layers, activation=nn.ReLU()):
net = []
for i in range(1, len(layers)):
net.append(nn.Linear(layers[i-1], layers[i]))
net.append(activation)
return nn.Sequential(*net)
class Encoder(nn.Module):
def __init__(self,dims):
super(Encoder, self).__init__()
[x_dim, h_dim, z_dim] = dims
self.hidden = build_mlp([x_dim]+h_dim +[z_dim])
def forward(self, x):
x = self.hidden(x)
return x
class Decoder(nn.Module):
def __init__(self, dims, output_activation=None):
super(Decoder, self).__init__()
[z_dim, h_dim, x_dim] = dims
self.hidden = build_mlp([z_dim, *h_dim])
self.reconstruction = nn.Linear([z_dim, *h_dim][-1], x_dim)
self.output_activation = output_activation
def forward(self, x):
x = self.hidden(x)
if self.output_activation is not None:
return self.output_activation(self.reconstruction(x))
else:
return self.reconstruction(x)
class AE(nn.Module):
def __init__(self,dims):
super(AE, self).__init__()
[x_dim, z_dim, encode_dim, decode_dim] = dims
self.encoder = Encoder([x_dim, encode_dim, z_dim])
self.decoder = Decoder([z_dim, decode_dim, x_dim])
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
feature = self.encoder(x)
recon_x = self.decoder(feature)
return recon_x
def loss_func(self,x):
feature = self.encoder(x)
recon_x = self.decoder(feature)
criteon = nn.MSELoss()
loss = criteon(recon_x,x)
return loss
def fit(self,dataloader,outdir,lr = 0.001,epochs = 10000 ,max_iter = 10000):
optimizer = optim.Adam(model.parameters(), lr=lr)
iteration =0
Loss = []
early_stopping = EarlyStopping()
with trange(max_iter, disable=False) as pbar:
while True:
epoch_loss = 0
for i,x in enumerate(dataloader):
epoch_lr = adjust_learning_rate(lr, optimizer, iteration)
optimizer.zero_grad()
loss = self.loss_func(x)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
pbar.set_postfix_str('loss={:.3f}'.format(loss))
pbar.update(1)
iteration+=1
Loss.append(loss)
if iteration >= max_iter:
break
else:
early_stopping(epoch_loss, self)
if early_stopping.early_stop:
print('EarlyStopping: run {} iteration'.format(iteration))
break
continue
break
def encodeBatch(self, dataloader,out='z',transforms=None):
output = []
for i, inputs in enumerate(dataloader):
x = inputs
x = x.view(x.size(0), -1).float()
feature = self.encoder(x)
if out == 'z':
output.append(feature.detach().cpu())
elif out == 'x':
recon_x = self.decoder(feature)
output.append(recon_x.detach().cpu().data)
output = torch.cat(output).numpy()
if out == 'x':
for transform in transforms:
output = transform(output)
return output
class AAE(AE):
def __init__(self, dims, n_centroids):
super(AAE, self).__init__(dims)
self.n_centroids = n_centroids
def adjust_learning_rate(init_lr, optimizer, iteration):
lr = max(init_lr * (0.9 ** (iteration//10)), 0.00002)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
class EarlyStopping:
def __init__(self, patience=100, verbose=False, outdir='./'):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.loss_min = np.Inf
self.model_file = os.path.join(outdir, 'model.pt')
def __call__(self, loss, model):
if np.isnan(loss):
self.early_stop = True
score = -loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(loss, model)
elif score < self.best_score:
self.counter += 1
if self.verbose:
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
model.load_model(self.model_file)
else:
self.best_score = score
self.save_checkpoint(loss, model)
self.counter = 0
def save_checkpoint(self, loss, model):
if self.verbose:
print(f'Loss decreased ({self.loss_min:.6f} --> {loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.model_file)
self.loss_min = loss
# plot
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import seaborn as sns
def plot_embedding(X, labels, classes=None, method='tSNE', cmap='tab20', figsize=(4, 4), markersize=4, marker=None,
return_emb=False, save=False, save_emb=False, show_legend=True, show_axis_label=True, **legend_params):
if marker is not None:
X = np.concatenate([X, marker], axis=0)
N = len(labels)
if X.shape[1] != 2:
if method == 'tSNE':
from sklearn.manifold import TSNE
X = TSNE(n_components=2, random_state=124).fit_transform(X)
if method == 'UMAP':
from umap import UMAP
X = UMAP(n_neighbors=30, min_dist=0.1).fit_transform(X)
if method == 'PCA':
from sklearn.decomposition import PCA
X = PCA(n_components=2, random_state=124).fit_transform(X)
plt.figure(figsize=figsize)
if classes is None:
classes = np.unique(labels)
if cmap is not None:
cmap = cmap
elif len(classes) <= 10:
cmap = 'tab10'
elif len(classes) <= 20:
cmap = 'tab20'
else:
cmap = 'husl'
colors = sns.color_palette(cmap, n_colors=len(classes))
for i, c in enumerate(classes):
plt.scatter(X[:N][labels==c, 0], X[:N][labels==c, 1], s=markersize, color=colors[i], label=c)
if marker is not None:
plt.scatter(X[N:, 0], X[N:, 1], s=10*markersize, color='black', marker='*')
# plt.axis("off")
legend_params_ = {'loc': 'center left',
'bbox_to_anchor':(1.0, 0.45),
'fontsize': 10,
'ncol': 1,
'frameon': False,
'markerscale': 1.5
}
legend_params_.update(**legend_params)
if show_legend:
plt.legend(**legend_params_)
sns.despine(offset=10, trim=True)
if show_axis_label:
plt.xlabel(method+' dim 1', fontsize=12)
plt.ylabel(method+' dim 2', fontsize=12)
if save:
plt.savefig(save, format='jpg', bbox_inches='tight')
else:
plt.show()
if save_emb:
np.savetxt(save_emb, X)
if return_emb:
return X
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print( path+'path create')
return True
else:
print ('already exist')
return False
normalizer = MaxAbsScaler()
dataset = SingleCellDataset('%s'%(sys.argv[2]), low=0.01, high=0.9, min_peaks=100,
transforms=[normalizer.fit_transform])
trainloader = DataLoader(dataset, batch_size=100, shuffle=False, drop_last=False)
testloader = DataLoader(dataset, batch_size=100, shuffle=False, drop_last=False)
cell_num = dataset.shape[0]
input_dim = dataset.shape[1]
n_centroids = 8
name = 'Forebrain'
z_dim = int('%s'%(sys.argv[4]))
h_dim = [1024, 128]
decode_dim = []
lr = 0.01
epochs = 9999
max_iter = int('%s'%(sys.argv[1]))
mkpath='%s'%(sys.argv[3])
mkdir(mkpath)
outdir = mkpath
dims = [input_dim, z_dim, h_dim, decode_dim]
model = AAE(dims, n_centroids= n_centroids)
print('\n ### Training…… ###')
model.fit(trainloader,lr=lr,epochs=epochs,max_iter=max_iter,outdir = outdir)
#torch.save(model.to('cpu').state_dict(), os.path.join(outdir, 'model_tmp.pt'))
feature = model.encodeBatch(testloader,out='z')
pd.DataFrame(feature, index=dataset.barcode).to_csv(os.path.join(outdir, 'feature.txt'), sep='\t', header=False)
recon_x = model.encodeBatch(testloader, out='x', transforms=[normalizer.inverse_transform])
recon_x = pd.DataFrame(recon_x.T, index=dataset.peaks, columns=dataset.barcode)
recon_x.to_csv(os.path.join(outdir, 'imputed_data.txt'), sep='\t')
print("Plotting embedding")
reference = '%s'%(sys.argv[5])
emb = 'UMAP'
#emb = 'tSNE'
ref =
|
pd.read_csv(reference, sep='\t', header=None, index_col=0)
|
pandas.read_csv
|
import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
import sklearn as skl
import matplotlib.pyplot as plt
import pandas as pd
import cv2
def realout(pdx, path, name):
pdx = np.asmatrix(pdx)
prl = (pdx[:, 1] > 0.5).astype('uint8')
prl = pd.DataFrame(prl, columns=['Prediction'])
out = pd.DataFrame(pdx, columns=['neg_score', 'pos_score'])
out = pd.concat([out, prl], axis=1)
out.insert(loc=0, column='Num', value=out.index)
out.to_csv("../{}/out/{}.csv".format(path, name), index=False)
def metrics(pdx, tl, path, name):
pdx = np.asmatrix(pdx)
prl = (pdx[:,1] > 0.5).astype('uint8')
prl = pd.DataFrame(prl, columns = ['Prediction'])
out = pd.DataFrame(pdx, columns = ['neg_score', 'pos_score'])
outtl = pd.DataFrame(tl, columns = ['True_label'])
out =
|
pd.concat([out,prl,outtl], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), names=cols)
def test_utf8_bom(self):
# see gh-4793
bom = u('\ufeff')
utf8 = 'utf-8'
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
# basic test
data = 'a\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8)
tm.assert_frame_equal(out, expected)
# test with "regular" quoting
data = '"a"\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, quotechar='"')
tm.assert_frame_equal(out, expected)
# test in a data row instead of header
data = 'b\n1'
expected = DataFrame({'a': ['b', '1']})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'])
tm.assert_frame_equal(out, expected)
# test in empty data row with skipping
data = '\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=True)
tm.assert_frame_equal(out, expected)
# test in empty data row without skipping
data = '\n1'
expected =
|
DataFrame({'a': [np.nan, 1.0]})
|
pandas.DataFrame
|
import time
import numpy as np
import pandas as pd
def add_new_category(x):
"""
Aimed at 'trafficSource.keyword' to tidy things up a little
"""
x = str(x).lower()
if x == 'nan':
return 'nan'
x = ''.join(x.split())
if r'provided' in x:
return 'not_provided'
if r'youtube' in x or r'you' in x or r'yo' in x or r'tub' in x or r'yout' in x or r'y o u' in x:
return 'youtube'
if r'google' in x or r'goo' in x or r'gle' in x:
return 'google'
else:
return 'other'
# Dump cleaned data to parquets for later.
train_df = pd.read_parquet('input/cleaned/train.parquet.gzip')
test_df = pd.read_parquet('input/cleaned/test.parquet.gzip')
# Remove target col.
y_train = train_df['totals.transactionRevenue'].values
train_df = train_df.drop(['totals.transactionRevenue'], axis=1)
# Join datasets for rowise feature engineering.
trn_len = train_df.shape[0]
merged_df = pd.concat([train_df, test_df])
num_cols = ["totals.hits", "totals.pageviews", "visitNumber", "visitStartTime"]
for col in num_cols:
merged_df[col] = merged_df[col].astype(float)
merged_df['diff_visitId_time'] = merged_df['visitId'] - merged_df['visitStartTime']
merged_df['diff_visitId_time'] = (merged_df['diff_visitId_time'] != 0).astype(float)
merged_df['totals.hits'] = merged_df['totals.hits'].astype(float)
# Build Time based features.
merged_df['formated_date'] = pd.to_datetime(merged_df['date'], format='%Y%m%d')
merged_df['month'] = pd.DatetimeIndex(merged_df['formated_date']).month
merged_df['year'] = pd.DatetimeIndex(merged_df['formated_date']).year
merged_df['day'] = pd.DatetimeIndex(merged_df['formated_date']).day
merged_df['quarter'] = pd.DatetimeIndex(merged_df['formated_date']).quarter
merged_df['weekday'] = pd.DatetimeIndex(merged_df['formated_date']).weekday
merged_df['weekofyear'] = pd.DatetimeIndex(merged_df['formated_date']).weekofyear
merged_df['is_month_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_start
merged_df['is_month_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_end
merged_df['is_quarter_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_start
merged_df['is_quarter_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_end
merged_df['is_year_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_year_start
merged_df['is_year_end'] =
|
pd.DatetimeIndex(merged_df['formated_date'])
|
pandas.DatetimeIndex
|
from contextlib import contextmanager
from hashlib import sha256
import logging
import warnings
import numpy as np
import pandas as pd
from sentry_sdk.integrations import logging as sentry_logging
from solarforecastarbiter import datamodel
def _observation_valid(index, obs_id, aggregate_observations):
"""
Indicates where the observation data is valid. For now,
effective_from and effective_until are inclusive, so data missing
at those times is marked as missing in the aggregate.
"""
nindex = pd.DatetimeIndex([], tz=index.tz)
for aggobs in aggregate_observations:
if aggobs['observation_id'] == obs_id:
if aggobs['observation_deleted_at'] is None:
locs = index.slice_locs(aggobs['effective_from'],
aggobs['effective_until'])
nindex = nindex.union(index[locs[0]:locs[1]])
elif (
aggobs['effective_until'] is None or
aggobs['effective_until'] >= index[0]
):
raise ValueError(
'Deleted Observation data cannot be retrieved'
' to include in Aggregate')
else: # observation deleted and effective_until before index
return pd.Series(False, index=index)
return pd.Series(1, index=nindex).reindex(index).fillna(0).astype(bool)
def _make_aggregate_index(data, interval_length, interval_label,
timezone):
"""
Compute the aggregate the index should have based on the min and
max timestamps in the data, the interval length, label, and timezone.
"""
# first, find limits for a new index
start = pd.Timestamp('20380119T031407Z')
end = pd.Timestamp('19700101T000001Z')
for df in data.values():
start = min(start, min(df.index))
end = max(end, max(df.index))
# adjust start, end to nearest interval
# hard to understand what this interval should be for
# odd (e.g. 52min) intervals, so required that interval
# is a divisor of one day
if 86400 % pd.Timedelta(interval_length).total_seconds() != 0:
raise ValueError(
'interval_length must be a divisor of one day')
if interval_label == 'ending':
start = start.ceil(interval_length)
end = end.ceil(interval_length)
elif interval_label == 'beginning':
start = start.floor(interval_length)
end = end.floor(interval_length)
else:
raise ValueError(
'interval_label must be beginning or ending for aggregates')
# raise the error if unlocalized
start = start.tz_convert(timezone)
end = end.tz_convert(timezone)
return pd.date_range(
start, end, freq=interval_length, tz=timezone)
def compute_aggregate(data, interval_length, interval_label,
timezone, agg_func, aggregate_observations,
new_index=None):
"""
Computes an aggregate quantity according to agg_func of the data.
This function assumes the data has an interval_value_type of
interval_mean or instantaneous and that the data interval_length
is less than or equal to the aggregate interval_length.
NaNs in the output are the result of missing data from an
underyling observation of the aggregate.
Parameters
----------
data : dict of pandas.DataFrames
With keys 'observation_id' corresponding to observation in
aggregate_observations. DataFrames must have 'value' and 'quality_flag'
columns.
interval_length : str or pandas.Timedelta
The time between timesteps in the aggregate result.
interval_label : str
Whether the timestamps in the aggregated output represent the beginning
or ending of the interval
timezone : str
The IANA timezone for the output index
agg_func : str
The aggregation function (e.g 'sum', 'mean', 'min') to create the
aggregate
aggregate_observations : tuple of dicts
Each dict should have 'observation_id' (string),
'effective_from' (timestamp), 'effective_until' (timestamp or None),
and 'observation_deleted_at' (timestamp or None) fields.
new_index : pandas.DatetimeIndex
The index to resample data to. Will attempt to infer an index if not
provided.
Returns
-------
pandas.DataFrame
- Index is a DatetimeIndex that adheres to interval_length and
interval_label
- Columns are 'value', for the aggregated value according to agg_func,
and 'quality_flag', the bitwise or of all flags in the aggregate for
the interval.
- A 'value' of NaN means that data from one or more
observations was missing in that interval.
Raises
------
KeyError
If data is missing a key for an observation in aggregate_obsevations
+ Or, if any DataFrames in data do not have 'value' or 'quality_flag'
columns
ValueError
If interval_length is not a divisor of one day and an index is not
provided.
+ Or, if an observation has been deleted but the data is required for
the aggregate
+ Or, if interval_label is not beginning or ending
+ Or, if data is empty and an index is provided.
"""
if new_index is None:
new_index = _make_aggregate_index(
data, interval_length, interval_label, timezone)
unique_ids = {ao['observation_id'] for ao in aggregate_observations}
valid_mask = {obs_id: _observation_valid(
new_index, obs_id, aggregate_observations) for obs_id in unique_ids}
expected_observations = {k for k, v in valid_mask.items() if v.any()}
# Raise an exception if no observations are valid
if len(expected_observations) == 0:
raise ValueError(
'No effective observations in data')
missing_from_data_dict = expected_observations - set(data.keys())
if missing_from_data_dict:
raise KeyError(
'Cannot aggregate data with missing keys '
f'{", ".join(missing_from_data_dict)}')
value_is_missing = pd.Series(False, index=new_index)
value = {}
qf = {}
closed = datamodel.CLOSED_MAPPING[interval_label]
for obs_id, df in data.items():
resampler = df.resample(interval_length, closed=closed, label=closed)
new_val = resampler['value'].mean().reindex(new_index)
# data is missing when the resampled value is NaN and the data
# should be valid according to effective_from/until
valid = valid_mask[obs_id]
missing = new_val.isna() & valid
if missing.any():
warnings.warn('Values missing for one or more observations')
value_is_missing[missing] = True
value[obs_id] = new_val[valid]
qf[obs_id] = resampler['quality_flag'].apply(np.bitwise_or.reduce)
final_value = pd.DataFrame(value).reindex(new_index).aggregate(
agg_func, axis=1)
final_value[value_is_missing] = np.nan
# have to fill in nans and convert to int to do bitwise_or
# only works with pandas >= 0.25.0
final_qf = pd.DataFrame(qf).reindex(new_index).fillna(0).astype(
int).aggregate(np.bitwise_or.reduce, axis=1)
out = pd.DataFrame({'value': final_value, 'quality_flag': final_qf})
return out
def sha256_pandas_object_hash(obj):
"""
Compute a hash for a pandas object. No sorting of the
object is performed, so an object with the same data in
in a different order returns a different hash.
Parameters
----------
obj: pandas.Series or pandas.DataFrame
Returns
-------
str
Hex digest of the SHA-256 hash of the individual object row hashes
"""
return sha256(
|
pd.util.hash_pandas_object(obj)
|
pandas.util.hash_pandas_object
|
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score
from imblearn.over_sampling import BorderlineSMOTE
from collections import Counter
import gc as gc
from sklearn.feature_selection import RFE
#-------------------------------------------------------------------------------------------------------------------------
def kfold_smote_RFE(features_num, classifier, folds, df_train_filtered_std, y_train, smote='y'):
"""K_fold training/validation for RFE with LightGBM/RandomForest/XGBoost/CATBoost,
with SMOTE train re-sampling,
features_num-> select the number of features for RFE"""
# get a list of models to evaluate
def get_models():
models = dict()
for i in range(2, features_num+1):
models[str(i)] = RFE(estimator=classifier, n_features_to_select=i)
return models
# data from each foldf
fold_results = list()
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_filtered_std, y_train)):
train_x, train_y = df_train_filtered_std.iloc[train_idx], y_train.iloc[train_idx]
valid_x, valid_y = df_train_filtered_std.iloc[valid_idx], y_train.iloc[valid_idx]
# summarize class distribution
counter = Counter(train_y)
print('\n-----------------------------------------------------')
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
if smote=='y':
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# get the models to evaluate
models = get_models()
# evaluate the models and store results
models_results, names = list(), list()
for name, model in models.items():
# Print the number of features of the model
print('\nFeatures:%s' % (name))
# fit RFE
model.fit(train_x, train_y)
# validation per model
probas = model.predict_proba(valid_x)[:, 1]
# ROC-AUC per model
AUC = roc_auc_score(valid_y, probas)
# Collecting results
models_results.append(AUC)
names.append(name)
# summarize all features
for i in range(train_x.shape[1]):
print('Column: %d, Selected %s, Rank: %.3f' % (i, model.support_[i], model.ranking_[i]))
# Print AUC score
print(f'\nAUC: {AUC}')
print('\nModels results')
print(models_results)
fold_results.append(models_results)
print('\nFolds results')
print(fold_results)
fold_results = np.asarray(fold_results)
# plot model performance for comparison
plt.figure(figsize=(15,10))
plt.boxplot(fold_results, labels=range(2,features_num+1), showmeans=True)
plt.title('RECURSIVE FEATURE ELIMINATION'
f'\n\ntrain re-sampling (SMOTE):"{smote}"',fontsize=20)
plt.xlabel('Numbers of features selected',fontsize=15)
plt.ylabel('Crossvalidation AUC',fontsize=15)
plt.ylim((0.5, 0.8))
# save
plt.savefig(f'projets\\07_loan_customer_scoring\\production\\savefig\\model_test_{smote_case}\\feature_selection\\{class_weigh_case}\\feature_selection_RFE_feature_number.png', transparent=True)
plt.show()
return fold_results
#-------------------------------------------------------------------------------------------------------------------------
# Classification with kfold available for several algorithms
def kfold_classif(classifier, folds, df_train_std, target_train, df_val_std, target_val, custom_loss, fbeta, fbeta_number=0, logistic_regression=False, train_resampling='n', eval_set=False, scorer='auc', early_stopping_rounds=None, verbose=200):
"""K_fold training/validation for DecisionTree/RandomForest/LightGBM/XGBoost/CATBoost/LogisticRegression,
train_resampling-> borderline smote re-sampling on the train part,
fbetanumber-> for function to optimize"""
""""num_iteration=clf.best_iteration_ can be added in the predict_proba() when callable """
# Create arrays and dataframes to store results
crossvalid_probas = np.zeros(df_train_std.shape[0])
valid_probas = np.zeros(df_val_std.shape[0])
fold_AUC_list = []
feature_importance_df = pd.DataFrame()
feats = [f for f in df_train_std.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
# Modification of columns
df_train_std_2 = df_train_std[feats]
df_val_std_2 = df_val_std[feats]
df_train_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_train_std_2.columns]
df_val_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_val_std_2.columns]
# define thresholds
thresholds = np.arange(0, 1, 0.001)
# apply threshold to positive probabilities to create labels
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def custom_cost_function(testy, yhat):
# get the fn and the fp from the confusion matrix
tn, fp, fn, tp = confusion_matrix(testy, yhat).ravel()
# function
y = 10*fn + fp
return y
# data from each fold
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_std_2, target_train)):
train_x, train_y = df_train_std_2.iloc[train_idx], target_train.iloc[train_idx]
valid_x, valid_y = df_train_std_2.iloc[valid_idx], target_train.iloc[valid_idx]
# Re-sampling
if train_resampling=='y':
# summarize class distribution
counter = Counter(train_y)
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# classifier instance
clf = classifier
# fitting
if eval_set==True:
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric=scorer, verbose=verbose, early_stopping_rounds=early_stopping_rounds)
if eval_set==False:
clf.fit(train_x, train_y)
# validation
crossvalid_probas[valid_idx] = clf.predict_proba(valid_x)[:, 1]
# ROC-AUC
AUC = roc_auc_score(valid_y, crossvalid_probas[valid_idx])
fold_AUC_list.append(AUC)
# showing results from each fold
print('Fold %2d AUC : %.6f' % (n_fold + 1, AUC))
# Collecting results
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
# Classifier case
if logistic_regression==True:
fold_importance_df["importance"] = clf.coef_[0]
if logistic_regression==False:
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
fold_importance_df["val_fold_AUC"] = AUC
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
feature_importance_df.sort_values(by='importance', ascending=False, inplace=True)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
valid_probas += clf.predict_proba(df_val_std)[:, 1] / folds.n_splits
del train_x, train_y, valid_x, valid_y
gc.collect()
# Final performance
mean_crossvalid_fold_ROC_AUC = sum(fold_AUC_list)/len(fold_AUC_list)
print('Mean cross-validation ROC-AUC score %.6f' % mean_crossvalid_fold_ROC_AUC)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
validation_ROC_AUC = roc_auc_score(target_val, valid_probas)
print('Validation ROC-AUC score %.6f' % validation_ROC_AUC)
# Optimising the threshold
if (fbeta==True)&(fbeta_number!=0):
# evaluate each threshold with f-beta loss function
scores = [fbeta_score(target_val.values, to_labels(valid_probas, t), average='weighted', beta=fbeta_number) for t in thresholds]
# get best threshold
ix = np.argmax(scores)
print(f'Threshold=%.3f, F-{fbeta_number} score_max=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
if custom_loss=='y':
# evaluate each threshold with custom loss function
scores = [custom_cost_function(target_val.values, to_labels(valid_probas, t)) for t in thresholds]
# get best threshold
ix = np.argmin(scores)
print(f'Threshold=%.3f, Custom loss function (10*fn + fp) score_min=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
return clf, feature_importance_df, mean_crossvalid_fold_ROC_AUC, validation_ROC_AUC, best_score, threshold
#-------------------------------------------------------------------------------------------------------------------------
# One hot encoder (avec récupération des labels)
from sklearn.preprocessing import OneHotEncoder as SklearnOneHotEncoder
import pandas as pd
import numpy as np
class OneHotEncoder(SklearnOneHotEncoder):
def __init__(self, **kwargs):
super(OneHotEncoder, self).__init__(**kwargs)
self.fit_flag = False
def fit(self, X, **kwargs):
out = super().fit(X)
self.fit_flag = True
return out
def transform(self, X, **kwargs):
sparse_matrix = super(OneHotEncoder, self).transform(X)
new_columns = self.get_new_columns(X=X)
d_out = pd.DataFrame(sparse_matrix.toarray(), columns=new_columns, index=X.index)
return d_out
def fit_transform(self, X, **kwargs):
self.fit(X)
return self.transform(X)
def get_new_columns(self, X):
new_columns = []
for i, column in enumerate(X.columns):
j = 0
while j < len(self.categories_[i]):
new_columns.append(f'{column}_<{self.categories_[i][j]}>')
j += 1
return new_columns
#-------------------------------------------------------------------------------------------------------------------------
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Label Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def label_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = dataframe_work[column].apply(lambda x: trained_model.transform([x])[0] if pd.notna(x) else np.NaN)
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def target_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# ONE-HOT-ENCODING (plusieurs nouvelles colonnes crées)
def vector_encoding_transform_with_merge(dataframe, column, fix_column, trained_model):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work =
|
pd.DataFrame(dataframe[[column,fix_column]])
|
pandas.DataFrame
|
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv = pd.read_csv(cnv_f, sep='\t')
if not sv_f is None:
self.sv = pd.read_csv(sv_f, sep='\t')
if not cli_f is None:
self.cli = pd.read_csv(cli_f, sep='\t')
else:
self._set_cli()
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
def to_tsvs(self, path: str = './'):
"""Write CDx_Data properties to 4 seprated files
Args:
path (str, optional): Path to write files. Defaults to './'.
"""
if not self.cli is None:
self.cli.to_csv(os.path.join(path, 'sample_info.txt'),
index=None,
sep='\t')
if not self.mut is None:
self.mut.to_csv(os.path.join(path, 'mut_info.txt'),
index=None,
sep='\t')
if not self.cnv is None:
self.cnv.to_csv(os.path.join(path, 'cnv_info.txt'),
index=None,
sep='\t')
if not self.sv is None:
self.sv.to_csv(os.path.join(path, 'fusion_info.txt'),
index=None,
sep='\t')
def to_excel(self, filename: str = './output.xlsx'):
"""Write CDx_Data properties to excel file
Args:
filename (str, optional): target filename. Defaults to './output.xlsx'.
"""
if not filename.endswith('xlsx'):
filename = filename + '.xlsx'
with pd.ExcelWriter(filename) as ew:
if not self.cli is None:
self.cli.to_excel(ew, sheet_name='clinical', index=None)
if not self.mut is None:
self.mut.to_excel(ew, sheet_name='mutations', index=None)
if not self.cnv is None:
self.cnv.to_excel(ew, sheet_name='cnv', index=None)
if not self.sv is None:
self.sv.to_excel(ew, sheet_name='sv', index=None)
def _set_cli(self):
"""Set the cli attribute, generate a void DataFrame when it is not specified.
"""
sample_id_series = []
if not self.mut is None:
sample_id_series.append(
self.mut['Tumor_Sample_Barcode'].drop_duplicates())
if not self.cnv is None:
sample_id_series.append(
self.cnv['Tumor_Sample_Barcode'].drop_duplicates())
if not self.sv is None:
sample_id_series.append(
self.sv['Tumor_Sample_Barcode'].drop_duplicates())
if len(sample_id_series) > 0:
self.cli = pd.DataFrame({
'sampleId': pd.concat(sample_id_series)
}).drop_duplicates()
else:
self.cli = None
def _infer_datetime_columns(self) -> pd.DataFrame:
"""To infer the datetime_columns and astype to datetime64 format
Returns:
pd.DataFrame: CDx.cli dataframe
"""
cli = self.cli
for column in cli.columns:
if column.endswith('DATE'):
try:
cli[column] =
|
pd.to_datetime(cli[column])
|
pandas.to_datetime
|
from datetime import date, timedelta, datetime
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from sqlalchemy import func, or_, and_, text, column
from app import db
from app.context import get_import_date, get_import_id
from app.models import OckovaciMisto, Okres, Kraj, OckovaciMistoMetriky, CrMetriky, OckovaniRegistrace, Populace, \
PrakticiKapacity, OckovaniRezervace, OckovaniLide, Vakcina, ZdravotnickeStredisko, OckovaciZarizeni
def unique_nrpzs_subquery():
"""Returns unique NRPZS within all centers."""
return db.session.query(OckovaciMisto.nrpzs_kod) \
.group_by(OckovaciMisto.nrpzs_kod) \
.having(func.count(OckovaciMisto.nrpzs_kod) == 1)
def unique_nrpzs_active_subquery():
"""Returns unique NRPZS within active centers."""
return db.session.query(OckovaciMisto.nrpzs_kod) \
.filter(OckovaciMisto.status == True) \
.group_by(OckovaciMisto.nrpzs_kod) \
.having(func.count(OckovaciMisto.nrpzs_kod) == 1)
def has_unique_nrpzs(center_id):
res = db.session.query(func.count(OckovaciMisto.id)) \
.filter(OckovaciMisto.id == center_id) \
.filter(or_(and_(OckovaciMisto.status == True, OckovaciMisto.nrpzs_kod.in_(unique_nrpzs_active_subquery())),
and_(OckovaciMisto.status == False, OckovaciMisto.nrpzs_kod.in_(unique_nrpzs_subquery())))) \
.one()
return res[0] == 1
def find_kraj_options():
return db.session.query(Kraj.nazev, Kraj.id).order_by(Kraj.nazev).all()
def find_centers(filter_column, filter_value):
centers = db.session.query(OckovaciMisto.id, OckovaciMisto.nazev, Okres.nazev.label("okres"),
Kraj.nazev_kratky.label("kraj"), Kraj.id.label("kraj_id"), OckovaciMisto.longitude,
OckovaciMisto.latitude, OckovaciMisto.adresa, OckovaciMisto.status,
OckovaciMisto.bezbarierovy_pristup, OckovaciMisto.vekove_skupiny, OckovaciMisto.typ,
OckovaciMisto.davky, OckovaciMisto.vakciny,
OckovaciMistoMetriky.registrace_fronta, OckovaciMistoMetriky.registrace_prumer_cekani,
OckovaciMistoMetriky.ockovani_odhad_cekani,
OckovaciMistoMetriky.registrace_fronta_prumer_cekani,
OckovaciMistoMetriky.registrace_pred_zavorou) \
.join(OckovaciMistoMetriky) \
.outerjoin(Okres, OckovaciMisto.okres_id == Okres.id) \
.outerjoin(Kraj, Okres.kraj_id == Kraj.id) \
.filter(filter_column == filter_value) \
.filter(OckovaciMistoMetriky.datum == get_import_date()) \
.filter(or_(OckovaciMisto.status == True, OckovaciMistoMetriky.registrace_fronta > 0,
OckovaciMistoMetriky.rezervace_cekajici > 0, OckovaciMisto.typ == 'WALKIN')) \
.filter(OckovaciMisto.typ != 'AČR') \
.group_by(OckovaciMisto.id, OckovaciMisto.nazev, Okres.id, Kraj.id, OckovaciMisto.longitude,
OckovaciMisto.latitude, OckovaciMisto.adresa, OckovaciMisto.status,
OckovaciMisto.bezbarierovy_pristup, OckovaciMisto.vekove_skupiny, OckovaciMisto.typ,
OckovaciMisto.davky, OckovaciMisto.vakciny, OckovaciMistoMetriky.registrace_fronta,
OckovaciMistoMetriky.registrace_prumer_cekani, OckovaciMistoMetriky.ockovani_odhad_cekani,
OckovaciMistoMetriky.registrace_fronta_prumer_cekani, OckovaciMistoMetriky.registrace_pred_zavorou) \
.order_by(Kraj.nazev_kratky, Okres.nazev, OckovaciMisto.nazev) \
.all()
return centers
def find_third_doses_centers():
center_ids = db.session.query(OckovaniRezervace.ockovaci_misto_id) \
.distinct() \
.filter(OckovaniRezervace.kalendar_ockovani == 'V3') \
.all()
return [center[0] for center in center_ids]
def find_centers_vaccine_options():
return db.session.query(func.unnest(OckovaciMisto.vakciny).label('vyrobce')).order_by('vyrobce').distinct().all()
def find_doctor_offices(nrpzs_kod):
df = pd.read_sql_query(
f"""
select coalesce(z.zarizeni_nazev, min(s.nazev_cely)) zarizeni_nazev, o.nazev okres, k.nazev kraj,
k.nazev_kratky kraj_kratky, s.druh_zarizeni, s.obec, s.psc, s.ulice, s.cislo_domu, s.telefon, s.email,
s.web, s.latitude, s.longitude
from ockovaci_zarizeni z
full join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join okresy o on o.id = coalesce(z.okres_id, s.okres_kod)
join kraje k on k.id = o.kraj_id
where z.id = '{nrpzs_kod}' or s.nrpzs_kod = '{nrpzs_kod}'
group by z.zarizeni_nazev, o.nazev, k.nazev, k.nazev_kratky, s.druh_zarizeni, s.obec, s.psc, s.ulice,
s.cislo_domu, s.telefon, s.email, s.web, s.latitude, s.longitude
""",
db.engine)
return df
NRPZS_PEDIATRICIAN_CODE = 321
def find_doctors(okres_id=None, kraj_id=None):
okres_id_sql = 'null' if okres_id is None else f"'{okres_id}'"
kraj_id_sql = 'null' if kraj_id is None else f"'{kraj_id}'"
df = pd.read_sql_query(
f"""
select z.id, z.zarizeni_nazev, o.nazev okres, o.kraj_id, k.nazev kraj, k.nazev_kratky kraj_kratky,
z.provoz_ukoncen, m.ockovani_pocet_davek, m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7,
count(n.nrpzs_kod) nabidky,
case when s.druh_zarizeni_kod = {NRPZS_PEDIATRICIAN_CODE} then true else false end pediatr
from ockovaci_zarizeni z
left join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join okresy o on o.id = z.okres_id
join kraje k on k.id = o.kraj_id
left join zarizeni_metriky m on m.zarizeni_id = z.id and m.datum = '{get_import_date()}'
left join (
select left(zdravotnicke_zarizeni_kod, 11) nrpzs_kod
from praktici_kapacity n
where n.pocet_davek > 0 and (n.expirace is null or n.expirace >= '{get_import_date()}')
) n on n.nrpzs_kod = z.id
where prakticky_lekar = True
and (z.okres_id = {okres_id_sql} or {okres_id_sql} is null)
and (o.kraj_id = {kraj_id_sql} or {kraj_id_sql} is null)
group by z.id, z.zarizeni_nazev, o.nazev, o.kraj_id, k.nazev, k.nazev_kratky, z.provoz_ukoncen,
m.ockovani_pocet_davek, m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, pediatr
order by k.nazev_kratky, o.nazev, z.zarizeni_nazev
""",
db.engine
)
df['ockovani_vakciny_7'] = df['ockovani_vakciny_7'].replace({None: ''})
df['provoz_ukoncen'] = df['provoz_ukoncen'].astype('bool')
df['ockovani_pocet_davek'] = df['ockovani_pocet_davek'].replace({np.nan: 0})
df['ockovani_pocet_davek_zmena_tyden'] = df['ockovani_pocet_davek_zmena_tyden'].replace({np.nan: 0})
return df
def find_doctors_map():
df = pd.read_sql_query(
f"""
select z.id, z.zarizeni_nazev, z.provoz_ukoncen, s.latitude, s.longitude, m.ockovani_pocet_davek,
m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, count(n.nrpzs_kod) nabidky,
case when s.druh_zarizeni_kod = {NRPZS_PEDIATRICIAN_CODE} then true else false end pediatr
from ockovaci_zarizeni z
left join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join zarizeni_metriky m on m.zarizeni_id = z.id and m.datum = '{get_import_date()}'
left join (
select left(zdravotnicke_zarizeni_kod, 11) nrpzs_kod
from praktici_kapacity n
where n.pocet_davek > 0 and (n.expirace is null or n.expirace >= '{get_import_date()}')
) n on n.nrpzs_kod = z.id
where prakticky_lekar = True
group by z.id, z.zarizeni_nazev, z.provoz_ukoncen, s.latitude, s.longitude, m.ockovani_pocet_davek,
m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, pediatr
""",
db.engine
)
df['ockovani_vakciny_7'] = df['ockovani_vakciny_7'].replace({None: ''})
df['provoz_ukoncen'] = df['provoz_ukoncen'].astype('bool')
df['latitude'] = df['latitude'].replace({np.nan: None})
df['longitude'] = df['longitude'].replace({np.nan: None})
df['ockovani_pocet_davek'] = df['ockovani_pocet_davek'].replace({np.nan: 0})
df['ockovani_pocet_davek_zmena_tyden'] = df['ockovani_pocet_davek_zmena_tyden'].replace({np.nan: 0})
return df
def find_doctors_vaccine_options():
return db.session.query(Vakcina.vyrobce) \
.join(OckovaniLide, Vakcina.vakcina == OckovaniLide.vakcina) \
.distinct(Vakcina.vyrobce) \
.order_by(Vakcina.vyrobce) \
.all()
def find_free_vaccines_available(nrpzs_kod=None, okres_id=None, kraj_id=None):
return db.session.query(PrakticiKapacity.datum_aktualizace, PrakticiKapacity.pocet_davek,
PrakticiKapacity.typ_vakciny, PrakticiKapacity.mesto, PrakticiKapacity.nazev_ordinace,
PrakticiKapacity.deti, PrakticiKapacity.dospeli, PrakticiKapacity.kontakt_tel,
PrakticiKapacity.kontakt_email, PrakticiKapacity.expirace, PrakticiKapacity.poznamka,
PrakticiKapacity.kraj, ZdravotnickeStredisko.nrpzs_kod, ZdravotnickeStredisko.latitude,
ZdravotnickeStredisko.longitude) \
.outerjoin(ZdravotnickeStredisko, ZdravotnickeStredisko.zdravotnicke_zarizeni_kod == PrakticiKapacity.zdravotnicke_zarizeni_kod) \
.filter(or_(func.left(PrakticiKapacity.zdravotnicke_zarizeni_kod, 11) == nrpzs_kod, nrpzs_kod is None)) \
.filter(or_(ZdravotnickeStredisko.okres_kod == okres_id, okres_id is None)) \
.filter(or_(ZdravotnickeStredisko.kraj_kod == kraj_id, kraj_id is None)) \
.filter(PrakticiKapacity.pocet_davek > 0) \
.filter(or_(PrakticiKapacity.expirace == None, PrakticiKapacity.expirace >= get_import_date())) \
.order_by(PrakticiKapacity.kraj, PrakticiKapacity.mesto, PrakticiKapacity.nazev_ordinace,
PrakticiKapacity.typ_vakciny) \
.all()
def find_free_vaccines_vaccine_options():
return db.session.query(PrakticiKapacity.typ_vakciny) \
.filter(PrakticiKapacity.pocet_davek > 0) \
.filter(or_(PrakticiKapacity.expirace == None, PrakticiKapacity.expirace >= get_import_date())) \
.distinct(PrakticiKapacity.typ_vakciny) \
.order_by(PrakticiKapacity.typ_vakciny) \
.all()
def count_vaccines_center(center_id):
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id, ockovaci_mista.nazev, okres_id, kraj_id
from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where ockovaci_mista.id='{}';
""".format(center_id),
db.engine
)
prijato = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato
from ockovani_distribuce
where akce = 'Příjem' and ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
prijato_odjinud = pd.read_sql_query(
"""
select cilove_ockovaci_misto_id ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato_odjinud
from ockovani_distribuce
where akce = 'Výdej' and cilove_ockovaci_misto_id = '{}' and datum < '{}'
group by (cilove_ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
vydano = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) vydano
from ockovani_distribuce
where akce = 'Výdej' and ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
spotreba = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pouzite_davky) pouzito, sum(znehodnocene_davky) znehodnoceno
from ockovani_spotreba
where ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
vyrobci = pd.read_sql_query("select vyrobce from vakciny;", db.engine)
mista_key = mista
mista_key['join'] = 0
vyrobci_key = vyrobci
vyrobci_key['join'] = 0
df = mista_key.merge(vyrobci_key).drop('join', axis=1)
df = pd.merge(df, prijato, how="left")
df = pd.merge(df, prijato_odjinud, how="left")
df = pd.merge(df, vydano, how="left")
df = pd.merge(df, spotreba, how="left")
df['prijato'] = df['prijato'].fillna(0).astype('int')
df['prijato_odjinud'] = df['prijato_odjinud'].fillna(0).astype('int')
df['vydano'] = df['vydano'].fillna(0).astype('int')
df['pouzito'] = df['pouzito'].fillna(0).astype('int')
df['znehodnoceno'] = df['znehodnoceno'].fillna(0).astype('int')
df['prijato_celkem'] = df['prijato'] + df['prijato_odjinud'] - df['vydano']
df['skladem'] = df['prijato_celkem'] - df['pouzito'] - df['znehodnoceno']
df = df.groupby(by=['vyrobce'], as_index=False).sum().sort_values(by=['vyrobce'])
df = df[(df['prijato_celkem'] > 0) | (df['pouzito'] > 0) | (df['znehodnoceno'] > 0)]
return df
def count_vaccines_kraj(kraj_id):
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id, ockovaci_mista.nazev, kraj_id
from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where kraj_id='{}';
""".format(kraj_id),
db.engine
)
mista_ids = ','.join("'" + misto + "'" for misto in mista['ockovaci_misto_id'].tolist())
prijato = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato
from ockovani_distribuce
where akce = 'Příjem' and ockovaci_misto_id in ({}) and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, get_import_date()),
db.engine
)
prijato_odjinud = pd.read_sql_query(
"""
select cilove_ockovaci_misto_id ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato_odjinud
from ockovani_distribuce
where akce = 'Výdej' and cilove_ockovaci_misto_id in ({}) and ockovaci_misto_id not in({}) and datum < '{}'
group by (cilove_ockovaci_misto_id, vyrobce);
""".format(mista_ids, mista_ids, get_import_date()),
db.engine
)
vydano = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) vydano
from ockovani_distribuce
where akce = 'Výdej' and ockovaci_misto_id in ({}) and cilove_ockovaci_misto_id not in({}) and cilove_ockovaci_misto_id != '-' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, mista_ids, get_import_date()),
db.engine
)
spotreba = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(znehodnocene_davky) znehodnoceno
from ockovani_spotreba
where ockovaci_misto_id in ({}) and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, get_import_date()),
db.engine
)
ockovano = pd.read_sql_query(
"""
select kraj_nuts_kod kraj_id, sum(pocet) ockovano, vyrobce
from ockovani_lide
join vakciny on vakciny.vakcina = ockovani_lide.vakcina
where kraj_nuts_kod = '{}' and datum < '{}'
group by kraj_nuts_kod, vyrobce
""".format(kraj_id, get_import_date()),
db.engine
)
vyrobci =
|
pd.read_sql_query("select vyrobce from vakciny;", db.engine)
|
pandas.read_sql_query
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE_MATPOWER file.
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
# The file has been modified from Pypower.
# The function mu() has been added to the solver in order to provide an optimal iteration control
#
# Copyright (c) 2018 <NAME>
#
# This file retains the BSD-Style license
from numpy import angle
import scipy
scipy.ALLOW_THREADS = True
import numpy as np
np.set_printoptions(precision=8, suppress=True, linewidth=320)
########################################################################################################################
# MAIN
########################################################################################################################
if __name__ == "__main__":
from GridCal.Engine.All import *
from GridCal.Engine.Simulations.PowerFlow.jacobian_based_power_flow import ContinuousNR
import pandas as pd
|
pd.set_option('display.max_rows', 500)
|
pandas.set_option
|
from datetime import timedelta
import numpy as np
from pandas.core.groupby import BinGrouper, Grouper
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_nanoseconds
from pandas.tseries.period import PeriodIndex, period_range
import pandas.tseries.tools as tools
import pandas.core.common as com
import pandas.compat as compat
from pandas.lib import Timestamp
import pandas.lib as lib
_DEFAULT_METHOD = 'mean'
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
nperiods : optional, integer
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
Notes
-----
Use begin, end, nperiods to generate intervals that cannot be derived
directly from the associated object
"""
def __init__(self, freq='Min', closed=None, label=None, how='mean',
nperiods=None, axis=0,
fill_method=None, limit=None, loffset=None, kind=None,
convention=None, base=0, **kwargs):
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.nperiods = nperiods
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def resample(self, obj):
self._set_grouper(obj, sort=True)
ax = self.grouper
if isinstance(ax, DatetimeIndex):
rs = self._resample_timestamps()
elif isinstance(ax, PeriodIndex):
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == 'period': # pragma: no cover
print('Warning: multiple of frequency -> timestamps')
# Cannot have multiple of periods, convert to timestamp
self.kind = 'timestamp'
if self.kind is None or self.kind == 'period':
rs = self._resample_periods()
else:
obj = self.obj.to_timestamp(how=self.convention)
self._set_grouper(obj)
rs = self._resample_timestamps()
elif len(ax) == 0:
return self.obj
else: # pragma: no cover
raise TypeError('Only valid with DatetimeIndex or PeriodIndex')
rs_axis = rs._get_axis(self.axis)
rs_axis.name = ax.name
return rs
def _get_grouper(self, obj):
self._set_grouper(obj)
return self._get_binner_for_resample()
def _get_binner_for_resample(self):
# create the BinGrouper
# assume that self.set_grouper(obj) has already been called
ax = self.ax
if self.kind is None or self.kind == 'timestamp':
self.binner, bins, binlabels = self._get_time_bins(ax)
else:
self.binner, bins, binlabels = self._get_time_period_bins(ax)
self.grouper = BinGrouper(bins, binlabels)
return self.binner, self.grouper, self.obj
def _get_binner_for_grouping(self, obj):
# return an ordering of the transformed group labels,
# suitable for multi-grouping, e.g the labels for
# the resampled intervals
ax = self._set_grouper(obj)
self._get_binner_for_resample()
# create the grouper
binner = self.binner
l = []
for key, group in self.grouper.get_iterator(ax):
l.extend([key]*len(group))
grouper = binner.__class__(l,freq=binner.freq,name=binner.name)
# since we may have had to sort
# may need to reorder groups here
if self.indexer is not None:
indexer = self.indexer.argsort(kind='quicksort')
grouper = grouper.take(indexer)
return grouper
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_range_edges(ax, self.freq, closed=self.closed,
base=self.base)
tz = ax.tz
binner = labels = DatetimeIndex(freq=self.freq,
start=first.replace(tzinfo=None),
end=last.replace(tzinfo=None),
tz=tz,
name=ax.name)
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == ax.max() and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
day_nanos = _delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
return binner, bin_edges
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=self.freq,
name=ax.name)
end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
@property
def _agg_method(self):
return self.how if self.how else _DEFAULT_METHOD
def _resample_timestamps(self):
# assumes set_grouper(obj) already called
axlabels = self.ax
self._get_binner_for_resample()
grouper = self.grouper
binner = self.binner
obj = self.obj
# Determine if we're downsampling
if axlabels.freq is not None or axlabels.inferred_freq is not None:
if len(grouper.binlabels) < len(axlabels) or self.how is not None:
# downsample
grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
# GH2073
if self.fill_method is not None:
result = result.fillna(method=self.fill_method,
limit=self.limit)
else:
# upsampling shortcut
if self.axis:
raise AssertionError('axis must be 0')
if self.closed == 'right':
res_index = binner[1:]
else:
res_index = binner[:-1]
# if we have the same frequency as our axis, then we are equal sampling
# even if how is None
if self.fill_method is None and self.limit is None and to_offset(
axlabels.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=self.fill_method,
limit=self.limit)
else:
# Irregular data, have to use groupby
grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
if self.fill_method is not None:
result = result.fillna(method=self.fill_method,
limit=self.limit)
loffset = self.loffset
if isinstance(loffset, compat.string_types):
loffset = to_offset(self.loffset)
if isinstance(loffset, (DateOffset, timedelta)):
if (isinstance(result.index, DatetimeIndex)
and len(result.index) > 0):
result.index = result.index + loffset
return result
def _resample_periods(self):
# assumes set_grouper(obj) already called
axlabels = self.ax
obj = self.obj
if len(axlabels) == 0:
new_index = PeriodIndex(data=[], freq=self.freq)
return obj.reindex(new_index)
else:
start = axlabels[0].asfreq(self.freq, how=self.convention)
end = axlabels[-1].asfreq(self.freq, how='end')
new_index = period_range(start, end, freq=self.freq)
# Start vs. end of period
memb = axlabels.asfreq(self.freq, how=self.convention)
if is_subperiod(axlabels.freq, self.freq) or self.how is not None:
# Downsampling
rng = np.arange(memb.values[0], memb.values[-1] + 1)
bins = memb.searchsorted(rng, side='right')
grouper = BinGrouper(bins, new_index)
grouped = obj.groupby(grouper, axis=self.axis)
return grouped.aggregate(self._agg_method)
elif is_superperiod(axlabels.freq, self.freq):
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=self.fill_method,
limit=self.limit)
return _take_new_index(obj, indexer, new_index, axis=self.axis)
else:
raise ValueError('Frequency %s cannot be resampled to %s'
% (axlabels.freq, self.freq))
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = com.take_1d(obj.values, indexer)
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
raise NotImplementedError
def _get_range_edges(axis, offset, closed='left', base=0):
if isinstance(offset, compat.string_types):
offset = to_offset(offset)
if isinstance(offset, Tick):
day_nanos = _delta_to_nanoseconds(timedelta(1))
# #1165
if (day_nanos % offset.nanos) == 0:
return _adjust_dates_anchored(axis[0], axis[-1], offset,
closed=closed, base=base)
first, last = axis.min(), axis.max()
if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = tools.normalize_date(first)
last = tools.normalize_date(last)
if closed == 'left':
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
from pandas.tseries.tools import normalize_date
start_day_nanos = Timestamp(normalize_date(first)).value
last_day_nanos = Timestamp(normalize_date(last)).value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
last_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - last_day_nanos) % offset.nanos
if closed == 'right':
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult, tz=first.tz),
Timestamp(lresult, tz=last.tz))
def asfreq(obj, freq, method=None, how=None, normalize=False):
"""
Utility frequency conversion method for Series/DataFrame
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
raise NotImplementedError
if how is None:
how = 'E'
new_index = obj.index.asfreq(freq, how=how)
new_obj = obj.copy()
new_obj.index = new_index
return new_obj
else:
if len(obj.index) == 0:
return obj.copy()
dti =
|
date_range(obj.index[0], obj.index[-1], freq=freq)
|
pandas.tseries.index.date_range
|
import glob
import itertools
import json
import os
from typing import Any, Dict, List, Optional, Sequence
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy
WEIGHT = "mw"
MEANS = [
"lcoe",
"interconnect_annuity",
"offshore_spur_miles",
"spur_miles",
"tx_miles",
"site_substation_spur_miles",
"substation_metro_tx_miles",
"site_metro_spur_miles",
]
SUMS = ["area", "mw"]
PROFILE_KEYS = ["metro_id", "cluster_level", "cluster"]
HOURS_IN_YEAR = 8784
class ClusterBuilder:
"""
Builds clusters of resources.
Attributes:
groups (list of dict): Resource group metadata.
- `metadata` (str): Relative path to resource metadata file.
- `profiles` (str): Relative path to variable resource profiles file.
- `technology` (str): Resource type.
- ... and any additional (optional) keys.
clusters (list of dict): Resource clusters.
- `group` (dict): Resource group from :attr:`groups`.
- `kwargs` (dict): Arguments used to uniquely identify the group.
- `region` (str): Model region in which the clustering was performed.
- `clusters` (pd.DataFrame): Computed resource clusters.
- `profiles` (np.ndarray): Computed profiles for the resource clusters.
"""
def __init__(
self, path: str = ".", remove_feb_29: bool = True, utc_offset: int = 0
) -> None:
"""
Initialize with cluster group metadata.
Arguments:
path: Path to the directory containing the metadata files ('*_group.json').
Raises:
ValueError: Group metadata missing required keys.
"""
self.remove_feb_29 = remove_feb_29
self.utc_offset = utc_offset
self.groups = load_groups(path)
required = ("metadata", "profiles", "technology")
for g in self.groups:
missing = [k for k in required if k not in g]
if missing:
raise ValueError(f"Group metadata missing required keys {missing}: {g}")
g["metadata"] = os.path.abspath(os.path.join(path, g["metadata"]))
g["profiles"] = os.path.abspath(os.path.join(path, g["profiles"]))
self.clusters: List[dict] = []
def _test_clusters_exist(self) -> None:
if not self.clusters:
raise ValueError("No clusters have yet been computed")
def find_groups(self, **kwargs: Any) -> List[dict]:
"""
Return the groups matching the specified arguments.
Arguments:
**kwargs: Arguments to match against group metadata.
"""
return [
g
for g in self.groups
if all(k in g and g[k] == v for k, v in kwargs.items())
]
def build_clusters(
self,
region: str,
ipm_regions: Sequence[str],
min_capacity: float = None,
max_clusters: int = None,
cap_multiplier: float = None,
**kwargs: Any,
) -> None:
"""
Build and append resource clusters to the collection.
This method can be called as many times as desired before generating outputs.
Arguments:
region: Model region (used only to label results).
ipm_regions: IPM regions in which to select resources.
min_capacity: Minimum total capacity (MW). Resources are selected,
from lowest to highest levelized cost of energy (lcoe),
until the minimum capacity is just exceeded.
If `None`, all resources are selected for clustering.
max_clusters: Maximum number of resource clusters to compute.
If `None`, no clustering is performed; resources are returned unchanged.
cap_multiplier: Capacity multiplier applied to resource metadata.
**kwargs: Arguments to :meth:`get_groups` for selecting the resource group.
Raises:
ValueError: Arguments match multiple resource groups.
"""
groups = self.find_groups(**kwargs)
if len(groups) > 1:
raise ValueError(f"Arguments match multiple resource groups: {groups}")
c: Dict[str, Any] = {}
c["group"] = groups[0]
c["kwargs"] = kwargs
c["region"] = region
metadata = load_metadata(c["group"]["metadata"], cap_multiplier=cap_multiplier)
c["clusters"] = build_clusters(
metadata,
ipm_regions=ipm_regions,
min_capacity=min_capacity,
max_clusters=max_clusters,
)
c["profiles"] = build_cluster_profiles(
c["group"]["profiles"], c["clusters"], metadata
)
self.clusters.append(c)
def get_cluster_metadata(self) -> Optional[pd.DataFrame]:
"""
Return computed cluster metadata.
The following fields are added:
- `region` (str): Region label passed to :meth:`build_clusters`.
- `technology` (str): From resource metadata 'technology'.
- `cluster` (int): Unique identifier for each cluster to link to other outputs.
The following fields are renamed:
- `max_capacity`: From 'mw'.
- `area_km2`: From 'area'.
Raises:
ValueError: No clusters have yet been computed.
"""
self._test_clusters_exist()
dfs = []
start = 0
for c in self.clusters:
df = c["clusters"].reset_index()
columns = [x for x in np.unique([WEIGHT] + MEANS + SUMS) if x in df] + [
"ids"
]
n = len(df)
df = (
df[columns]
.assign(
region=c["region"],
technology=c["group"]["technology"],
cluster=np.arange(start, start + n),
)
.rename(columns={"mw": "max_capacity", "area": "area_km2"})
)
dfs.append(df)
start += n
return pd.concat(dfs, axis=0, ignore_index=True, sort=False)
def get_cluster_profiles(self) -> Optional[pd.DataFrame]:
"""
Return computed cluster profiles.
Uses multi-index columns ('region', 'Resource', 'cluster'), where
- `region` (str): Region label passed to :meth:`build_clusters`.
- `Resource` (str): From resource metadata 'technology'.
- `cluster` (int): Unique identifier for each cluster to link to other outputs.
The first column is the hour number.
Subsequent columns are the cluster profiles.
Raises:
ValueError: No clusters have yet been computed.
"""
self._test_clusters_exist()
profiles = np.column_stack([c["profiles"] for c in self.clusters])
columns = []
start = 0
for c in self.clusters:
for _ in range(c["profiles"].shape[1]):
columns.append((c["region"], c["group"]["technology"], start))
start += 1
df = pd.DataFrame(profiles, columns=columns)
if self.remove_feb_29:
df = _remove_feb_29(df, offset=self.utc_offset)
# Insert hour numbers into first column
df.insert(
loc=0, column=("region", "Resource", "cluster"), value=np.arange(8760) + 1,
)
df.columns = pd.MultiIndex.from_tuples(df.columns)
return df
def load_groups(path: str = ".") -> List[dict]:
"""Load group metadata."""
paths = glob.glob(os.path.join(path, "*_group.json"))
groups = []
for p in paths:
with open(p, mode="r") as fp:
groups.append(json.load(fp))
return groups
def load_metadata(path: str, cap_multiplier: float = None) -> pd.DataFrame:
"""Load resource metadata."""
df =
|
pd.read_csv(path, dtype={"metro_id": str})
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pandas as pd
def table_five(x: list, y: list):
"""Função para geração da tabela utilizada na lista 5 (Pacote five)
:param: x: Valores em X
:param: y: Valores em Y
"""
table =
|
pd.Series()
|
pandas.Series
|
import numpy as np
import pandas as pd
import numba
import pylab as plt
import matplotlib as mpl
import os
def mkdir(path):os.system('mkdir -p {}'.format(path))
def roundto(x, base=50000):
return int(base * np.round(float(x)/base))
def TI(a):
return a.replace({False:None}).dropna().index
def polymorphic(data, minAF=1e-9,mincoverage=10,index=True):
def poly(x):return (x>=minAF)&(x<=1-minAF)
C,D=data.xs('C',level='READ',axis=1),data.xs('D',level='READ',axis=1)
I=(C.sum(1)/D.sum(1)).apply(lambda x:poly(x)) & ((D>=mincoverage).mean(1)==1)
if index:
return I
return data[I]
def batch(iterable, n=10000000):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
@numba.vectorize
def vectorizedLog(x):
return float(np.log(x))
def numbaLog(df):
return pd.DataFrame(vectorizedLog(df.values),columns=df.index,index=df.index).astype(float)
@numba.vectorize
def vectorizedExp(x):
return float(np.exp(x))
def numbaExp(df):
return pd.DataFrame(vectorizedExp(df.values),columns=df.index,index=df.index).astype(float)
class EE:
@staticmethod
def fx(x, s=0.0, h=0.5):
Z=(1 + s) * x ** 2 + 2 * (1 + h * s) * x * (1 - x) + (1 - x) ** 2
if Z>0:
return ((1 + s) * x ** 2 + (1 + h * s) * x * (1 - x)) / (Z)
else:
return 0
@staticmethod
def sig(x): return 1. / (1 + np.exp(-x))
@staticmethod
def logit(p): return np.log(p) - np.log(1 - p)
# def logit_(p): return T.log(p) - T.log(1 - p)
# def sig_(x): return 1. / (1 + T.exp(-x))
@staticmethod
def Nu(s, t, nu0, theta, n=2000): return EE.Z(EE.sig(t * s / 2 + EE.logit(nu0)), n, theta)
@staticmethod
def forward(x0=0.005,h=0.5,s=1,t=150):
def f(x,h=0.5,s=1): return ((1+s)*x*x + (1+h*s)*x*(1-x) )/((1+s)*x*x + 2*(1+h*s)*x*(1-x) +(1-x)**2)
x=[x0]
for i in range(t):
x+=[f(x[-1],h,s)]
return pd.Series(x)
floatX = 'float64'
@staticmethod
def Z(nu, n, theta): return theta * (
nu * ((nu + 1) / 2. - 1. / ((1 - nu) * n + 1)) + (1 - nu) * ((n + 1.) / (2 * n) - 1. / ((1 - nu) * n + 1)))
class VCF:
@staticmethod
def loadDP(fname):
a= pd.read_csv(fname,sep='\t',na_values='.').set_index(['CHROM','POS'])
a.columns=pd.MultiIndex.from_tuples(map(lambda x:(int(x.split('R')[1].split('F')[0]),int(x.split('F')[1])),a.columns))
return a
@staticmethod
def loadCD(vcfgz,vcftools='~/bin/vcftools_0.1.13/bin/vcftools'):
"""
vcfgz: vcf file where samples are in the format of RXXFXXX
"""
vcf=os.path.basename(vcfgz)
path=vcfgz.split(vcf)[0]
os.system('cd {0} && {1} --gzvcf {2} --extract-FORMAT-info DP && {1} --gzvcf {2} --extract-FORMAT-info AD'.format(path,vcftools,vcf))
fname='out.{}.FORMAT'
a=map(lambda x: VCF.loadDP(path +fname.format(x)) ,['AD','DP'])
a=pd.concat(a,keys=['C','D'],axis=1).reorder_levels([1,2,0],1).sort_index(1)
a.columns.names=['REP','GEN','READ']
return a
class SynchronizedFile:
@staticmethod
def processSyncFileLine(x,dialellic=True):
z = x.apply(lambda xx: pd.Series(xx.split(':'), index=['A', 'T', 'C', 'G', 'N', 'del'])).astype(float).iloc[:, :4]
ref = x.name[-1]
alt = z.sum().sort_values()[-2:]
alt = alt[(alt.index != ref)].index[0]
if dialellic: ## Alternate allele is everthing except reference
return pd.concat([z[ref].astype(int).rename('C'), (z.sum(1)).rename('D')], axis=1).stack()
else: ## Alternate allele is the allele with the most reads
return pd.concat([z[ref].astype(int).rename('C'), (z[ref] + z[alt]).rename('D')], axis=1).stack()
@staticmethod
def load(fname = './sample_data/popoolation2/F37.sync'):
# print 'loading',fname
cols=pd.read_csv(fname+'.pops', sep='\t', header=None, comment='#').iloc[0].apply(lambda x: list(map(int,x.split(',')))).tolist()
data=pd.read_csv(fname, sep='\t', header=None).set_index(list(range(3)))
data.columns=pd.MultiIndex.from_tuples(cols)
data.index.names= ['CHROM', 'POS', 'REF']
data=data.sort_index().reorder_levels([1,0],axis=1).sort_index(axis=1)
data=data.apply(SynchronizedFile.processSyncFileLine,axis=1)
data.columns.names=['REP','GEN','READ']
data=SynchronizedFile.changeCtoAlternateAndDampZeroReads(data)
data.index=data.index.droplevel('REF')
return data
@staticmethod
def changeCtoAlternateAndDampZeroReads(a):
C = a.xs('C', level=2, axis=1).sort_index().sort_index(axis=1)
D = a.xs('D', level=2, axis=1).sort_index().sort_index(axis=1)
C = D - C
if (D == 0).sum().sum():
C[D == 0] += 1
D[D == 0] += 2
C.columns = pd.MultiIndex.from_tuples([x + ('C',) for x in C.columns], names=C.columns.names + ['READ'])
D.columns = pd.MultiIndex.from_tuples([x + ('D',) for x in D.columns], names=D.columns.names + ['READ'])
return
|
pd.concat([C, D], axis=1)
|
pandas.concat
|
"""
Holds Delegate and Accessor Logic
"""
import os
import copy
import uuid
import shutil
import datetime
import tempfile
import pandas as pd
import numpy as np
from ._internals import register_dataframe_accessor, register_series_accessor
from ._array import GeoType
from ._io.fileops import to_featureclass, from_featureclass
from arcgis.geometry import Geometry, SpatialReference, Envelope, Point
############################################################################
def _is_geoenabled(df):
"""
Checks if a Panda's DataFrame is 'geo-enabled'.
This means that a spatial column is defined and is a GeoArray
:returns: boolean
"""
try:
if isinstance(df, pd.DataFrame) and \
hasattr(df, 'spatial') and \
df.spatial.name and \
df[df.spatial.name].dtype.name.lower() == 'geometry':
return True
else:
return False
except:
return False
###########################################################################
@pd.api.extensions.register_series_accessor("geom")
class GeoSeriesAccessor:
"""
"""
_data = None
_index = None
_name = None
#----------------------------------------------------------------------
def __init__(self, obj):
"""initializer"""
self._validate(obj)
self._data = obj.values
self._index = obj.index
self._name = obj.name
#----------------------------------------------------------------------
@staticmethod
def _validate(obj):
if not is_geometry_type(obj):
raise AttributeError("Cannot use 'geom' accessor on objects of "
"dtype '{}'.".format(obj.dtype))
##---------------------------------------------------------------------
## Accessor Properties
##---------------------------------------------------------------------
@property
def area(self):
"""
Returns the features area
:returns: float in a series
"""
res = self._data.area
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_arcpy(self):
"""
Returns the features as ArcPy Geometry
:returns: arcpy.Geometry in a series
"""
res = self._data.as_arcpy
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_shapely(self):
"""
Returns the features as Shapely Geometry
:returns: shapely.Geometry in a series
"""
res = self._data.as_shapely
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def centroid(self):
"""
Returns the feature's centroid
:returns: tuple (x,y) in series
"""
res = self._data.centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def extent(self):
"""
Returns the feature's extent
:returns: tuple (xmin,ymin,xmax,ymax) in series
"""
res = self._data.extent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def first_point(self):
"""
Returns the feature's first point
:returns: Geometry
"""
res = self._data.first_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geoextent(self):
"""
A returns the geometry's extents
:returns: Series of Floats
"""
res = self._data.geoextent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geometry_type(self):
"""
returns the geometry types
:returns: Series of strings
"""
res = self._data.geometry_type
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def hull_rectangle(self):
"""
A space-delimited string of the coordinate pairs of the convex hull
:returns: Series of strings
"""
res = self._data.hull_rectangle
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_empty(self):
"""
Returns True/False if feature is empty
:returns: Series of Booleans
"""
res = self._data.is_empty
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_multipart(self):
"""
Returns True/False if features has multiple parts
:returns: Series of Booleans
"""
res = self._data.is_multipart
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_valid(self):
"""
Returns True/False if features geometry is valid
:returns: Series of Booleans
"""
res = self._data.is_valid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def JSON(self):
"""
Returns JSON string of Geometry
:returns: Series of strings
"""
res = self._data.JSON
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def label_point(self):
"""
Returns the geometry point for the optimal label location
:returns: Series of Geometries
"""
res = self._data.label_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def last_point(self):
"""
Returns the Geometry of the last point in a feature.
:returns: Series of Geometry
"""
res = self._data.last_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length3D(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length3D
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def part_count(self):
"""
Returns the number of parts in a feature's geometry
:returns: Series of Integer
"""
res = self._data.part_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def point_count(self):
"""
Returns the number of points in a feature's geometry
:returns: Series of Integer
"""
res = self._data.point_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def spatial_reference(self):
"""
Returns the Spatial Reference of the Geometry
:returns: Series of SpatialReference
"""
res = self._data.spatial_reference
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def true_centroid(self):
"""
Returns the true centroid of the Geometry
:returns: Series of Points
"""
res = self._data.true_centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKB(self):
"""
Returns the Geometry as WKB
:returns: Series of Bytes
"""
res = self._data.WKB
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKT(self):
"""
Returns the Geometry as WKT
:returns: Series of String
"""
res = self._data.WKT
res.index = self._index
return res
##---------------------------------------------------------------------
## Accessor Geometry Method
##---------------------------------------------------------------------
def angle_distance_to(self, second_geometry, method="GEODESIC"):
"""
Returns a tuple of angle and distance to another point using a
measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required Geometry. A arcgis.Geometry object.
--------------- --------------------------------------------------------------------
method Optional String. PLANAR measurements reflect the projection of geographic
data onto the 2D surface (in other words, they will not take into
account the curvature of the earth). GEODESIC, GREAT_ELLIPTIC,
LOXODROME, and PRESERVE_SHAPE measurement types may be chosen as
an alternative, if desired.
=============== ====================================================================
:returns: a tuple of angle and distance to another point using a measurement type.
"""
res = self._data.angle_distance_to(**{'second_geometry' : second_geometry,
'method' : method})
res.index = self._index
return res
#----------------------------------------------------------------------
def boundary(self):
"""
Constructs the boundary of the geometry.
:returns: arcgis.geometry.Polyline
"""
res = self._data.boundary()
res.index = self._index
return res
#----------------------------------------------------------------------
def buffer(self, distance):
"""
Constructs a polygon at a specified distance from the geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
distance Required float. The buffer distance. The buffer distance is in the
same units as the geometry that is being buffered.
A negative distance can only be specified against a polygon geometry.
=============== ====================================================================
:returns: arcgis.geometry.Polygon
"""
res = self._data.buffer(**{'distance' : distance})
res.index = self._index
return res
#----------------------------------------------------------------------
def clip(self, envelope):
"""
Constructs the intersection of the geometry and the specified extent.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
envelope required tuple. The tuple must have (XMin, YMin, XMax, YMax) each value
represents the lower left bound and upper right bound of the extent.
=============== ====================================================================
:returns: output geometry clipped to extent
"""
res = self._data.clip(**{'envelope' : envelope})
res.index = self._index
return res
#----------------------------------------------------------------------
def contains(self, second_geometry, relation=None):
"""
Indicates if the base geometry contains the comparison geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
relation Optional string. The spatial relationship type.
+ BOUNDARY - Relationship has no restrictions for interiors or boundaries.
+ CLEMENTINI - Interiors of geometries must intersect. Specifying CLEMENTINI is equivalent to specifying None. This is the default.
+ PROPER - Boundaries of geometries must not intersect.
=============== ====================================================================
:returns: boolean
"""
res = self._data.contains(**{'second_geometry' : second_geometry,
'relation' : relation})
res.index = self._index
return res
#----------------------------------------------------------------------
def convex_hull(self):
"""
Constructs the geometry that is the minimal bounding polygon such
that all outer angles are convex.
"""
res = self._data.convex_hull()
res.index = self._index
return res
#----------------------------------------------------------------------
def crosses(self, second_geometry):
"""
Indicates if the two geometries intersect in a geometry of a lesser
shape type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.crosses(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def cut(self, cutter):
"""
Splits this geometry into a part left of the cutting polyline, and
a part right of it.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
cutter Required Polyline. The cuttin polyline geometry
=============== ====================================================================
:returns: a list of two geometries
"""
res = self._data.cut(**{'cutter' : cutter})
res.index = self._index
return res
#----------------------------------------------------------------------
def densify(self, method, distance, deviation):
"""
Creates a new geometry with added vertices
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. The type of densification, DISTANCE, ANGLE, or GEODESIC
--------------- --------------------------------------------------------------------
distance Required float. The maximum distance between vertices. The actual
distance between vertices will usually be less than the maximum
distance as new vertices will be evenly distributed along the
original segment. If using a type of DISTANCE or ANGLE, the
distance is measured in the units of the geometry's spatial
reference. If using a type of GEODESIC, the distance is measured
in meters.
--------------- --------------------------------------------------------------------
deviation Required float. Densify uses straight lines to approximate curves.
You use deviation to control the accuracy of this approximation.
The deviation is the maximum distance between the new segment and
the original curve. The smaller its value, the more segments will
be required to approximate the curve.
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.densify(**{'method' : method,
'distance' : distance,
'deviation' : deviation})
res.index = self._index
return res
#----------------------------------------------------------------------
def difference(self, second_geometry):
"""
Constructs the geometry that is composed only of the region unique
to the base geometry but not part of the other geometry. The
following illustration shows the results when the red polygon is the
source geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.difference(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def disjoint(self, second_geometry):
"""
Indicates if the base and comparison geometries share no points in
common.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.disjoint(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def distance_to(self, second_geometry):
"""
Returns the minimum distance between two geometries. If the
geometries intersect, the minimum distance is 0.
Both geometries must have the same projection.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: float
"""
res = self._data.distance_to(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def equals(self, second_geometry):
"""
Indicates if the base and comparison geometries are of the same
shape type and define the same set of points in the plane. This is
a 2D comparison only; M and Z values are ignored.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.equals(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def generalize(self, max_offset):
"""
Creates a new simplified geometry using a specified maximum offset
tolerance.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
max_offset Required float. The maximum offset tolerance.
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.generalize(**{'max_offset' : max_offset})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_area(self, method, units=None):
"""
Returns the area of the feature using a measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. PLANAR measurements reflect the projection of
geographic data onto the 2D surface (in other words, they will not
take into account the curvature of the earth). GEODESIC,
GREAT_ELLIPTIC, LOXODROME, and PRESERVE_SHAPE measurement types
may be chosen as an alternative, if desired.
--------------- --------------------------------------------------------------------
units Optional String. Areal unit of measure keywords: ACRES | ARES | HECTARES
| SQUARECENTIMETERS | SQUAREDECIMETERS | SQUAREINCHES | SQUAREFEET
| SQUAREKILOMETERS | SQUAREMETERS | SQUAREMILES |
SQUAREMILLIMETERS | SQUAREYARDS
=============== ====================================================================
:returns: float
"""
res = self._data.get_area(**{'method' : method,
'units' : units})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_length(self, method, units):
"""
Returns the length of the feature using a measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. PLANAR measurements reflect the projection of
geographic data onto the 2D surface (in other words, they will not
take into account the curvature of the earth). GEODESIC,
GREAT_ELLIPTIC, LOXODROME, and PRESERVE_SHAPE measurement types
may be chosen as an alternative, if desired.
--------------- --------------------------------------------------------------------
units Required String. Linear unit of measure keywords: CENTIMETERS |
DECIMETERS | FEET | INCHES | KILOMETERS | METERS | MILES |
MILLIMETERS | NAUTICALMILES | YARDS
=============== ====================================================================
:returns: float
"""
res = self._data.get_length(**{'method' : method,
'units' : units})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_part(self, index=None):
"""
Returns an array of point objects for a particular part of geometry
or an array containing a number of arrays, one for each part.
**requires arcpy**
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
index Required Integer. The index position of the geometry.
=============== ====================================================================
:return: arcpy.Array
"""
return self._data.get_part(**{'index' : index})
#----------------------------------------------------------------------
def intersect(self, second_geometry, dimension=1):
"""
Constructs a geometry that is the geometric intersection of the two
input geometries. Different dimension values can be used to create
different shape types. The intersection of two geometries of the
same shape type is a geometry containing only the regions of overlap
between the original geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
dimension Required Integer. The topological dimension (shape type) of the
resulting geometry.
+ 1 -A zero-dimensional geometry (point or multipoint).
+ 2 -A one-dimensional geometry (polyline).
+ 4 -A two-dimensional geometry (polygon).
=============== ====================================================================
:returns: boolean
"""
res = self._data.intersect(**{'second_geometry' : second_geometry,
'dimension' : dimension})
res.index = self._index
return res
#----------------------------------------------------------------------
def measure_on_line(self, second_geometry, as_percentage=False):
"""
Returns a measure from the start point of this line to the in_point.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
as_percentage Optional Boolean. If False, the measure will be returned as a
distance; if True, the measure will be returned as a percentage.
=============== ====================================================================
:return: float
"""
res = self._data.measure_on_line(**{'second_geometry' : second_geometry,
'as_percentage' : as_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def overlaps(self, second_geometry):
"""
Indicates if the intersection of the two geometries has the same
shape type as one of the input geometries and is not equivalent to
either of the input geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: boolean
"""
res = self._data.overlaps(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def point_from_angle_and_distance(self, angle, distance, method='GEODESCIC'):
"""
Returns a point at a given angle and distance in degrees and meters
using the specified measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
angle Required Float. The angle in degrees to the returned point.
--------------- --------------------------------------------------------------------
distance Required Float. The distance in meters to the returned point.
--------------- --------------------------------------------------------------------
method Optional String. PLANAR measurements reflect the projection of geographic
data onto the 2D surface (in other words, they will not take into
account the curvature of the earth). GEODESIC, GREAT_ELLIPTIC,
LOXODROME, and PRESERVE_SHAPE measurement types may be chosen as
an alternative, if desired.
=============== ====================================================================
:return: arcgis.geometry.Geometry
"""
res = self._data.point_from_angle_and_distance(**{'angle' : angle,
'distance' : distance,
'method' : method})
res.index = self._index
return res
#----------------------------------------------------------------------
def position_along_line(self, value, use_percentage=False):
"""
Returns a point on a line at a specified distance from the beginning
of the line.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
value Required Float. The distance along the line.
--------------- --------------------------------------------------------------------
use_percentage Optional Boolean. The distance may be specified as a fixed unit
of measure or a ratio of the length of the line. If True, value
is used as a percentage; if False, value is used as a distance.
For percentages, the value should be expressed as a double from
0.0 (0%) to 1.0 (100%).
=============== ====================================================================
:return: Geometry
"""
res = self._data.position_along_line(**{'value' : value,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def project_as(self, spatial_reference, transformation_name=None):
"""
Projects a geometry and optionally applies a geotransformation.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
spatial_reference Required SpatialReference. The new spatial reference. This can be a
SpatialReference object or the coordinate system name.
-------------------- --------------------------------------------------------------------
transformation_name Required String. The geotransformation name.
==================== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.project_as(**{'spatial_reference' : spatial_reference,
'transformation_name' : transformation_name})
res.index = self._index
return res
#----------------------------------------------------------------------
def query_point_and_distance(self, second_geometry,
use_percentage=False):
"""
Finds the point on the polyline nearest to the in_point and the
distance between those points. Also returns information about the
side of the line the in_point is on as well as the distance along
the line where the nearest point occurs.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
as_percentage Optional boolean - if False, the measure will be returned as
distance, True, measure will be a percentage
=============== ====================================================================
:return: tuple
"""
res = self._data.query_point_and_distance(**{'second_geometry' : second_geometry,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def segment_along_line(self, start_measure,
end_measure, use_percentage=False):
"""
Returns a Polyline between start and end measures. Similar to
Polyline.positionAlongLine but will return a polyline segment between
two points on the polyline instead of a single point.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
start_measure Required Float. The starting distance from the beginning of the line.
--------------- --------------------------------------------------------------------
end_measure Required Float. The ending distance from the beginning of the line.
--------------- --------------------------------------------------------------------
use_percentage Optional Boolean. The start and end measures may be specified as
fixed units or as a ratio.
If True, start_measure and end_measure are used as a percentage; if
False, start_measure and end_measure are used as a distance. For
percentages, the measures should be expressed as a double from 0.0
(0 percent) to 1.0 (100 percent).
=============== ====================================================================
:returns: Geometry
"""
res = self._data.segment_along_line(**{'start_measure' : start_measure,
'end_measure' : end_measure,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def snap_to_line(self, second_geometry):
"""
Returns a new point based on in_point snapped to this geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.snap_to_line(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def symmetric_difference (self, second_geometry):
"""
Constructs the geometry that is the union of two geometries minus the
instersection of those geometries.
The two input geometries must be the same shape type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.symmetric_difference(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def touches(self, second_geometry):
"""
Indicates if the boundaries of the geometries intersect.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: boolean
"""
res = self._data.touches(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def union(self, second_geometry):
"""
Constructs the geometry that is the set-theoretic union of the input
geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.union(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def within(self, second_geometry, relation=None):
"""
Indicates if the base geometry is within the comparison geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
relation Optional String. The spatial relationship type.
- BOUNDARY - Relationship has no restrictions for interiors or boundaries.
- CLEMENTINI - Interiors of geometries must intersect. Specifying CLEMENTINI is equivalent to specifying None. This is the default.
- PROPER - Boundaries of geometries must not intersect.
=============== ====================================================================
:return: boolean
"""
res = self._data.within(**{'second_geometry' : second_geometry,
'relation' : relation}
)
res.index = self._index
return res
#--------------------------------------------------------------------------
def is_geometry_type(obj):
t = getattr(obj, 'dtype', obj)
try:
return isinstance(t, GeoType) or issubclass(t, GeoType)
except Exception:
return False
###########################################################################
@register_dataframe_accessor("spatial")
class GeoAccessor(object):
"""
The DataFrame Accessor is a namespace that performs dataset operations.
This includes visualization, spatial indexing, IO and dataset level properties.
"""
_sr = None
_viz = None
_data = None
_name = None
_index = None
_kdtree = None
_sindex = None
_stype = None
_sfname = None
_HASARCPY = None
_HASSHAPELY = None
#----------------------------------------------------------------------
def __init__(self, obj):
self._data = obj
self._index = obj.index
self._name = None
#----------------------------------------------------------------------
def _repr_svg_(self):
"""draws the dataframe as SVG features"""
if self.name:
fn = lambda g, n: getattr(g, n, None)() if g is not None else None
vals = np.vectorize(fn, otypes='O')(self._data['SHAPE'], 'svg')
svg = "\n".join(vals.tolist())
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if len(self._data) == 0:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.full_extent
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .001, ymin - .001, xmax + .001, ymax + .001
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100.0, dx]), 300])
height = min([max([100.0, dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box,
width,
height,
transform,
svg)
return
#----------------------------------------------------------------------
def set_geometry(self, col, sr=None):
"""Assigns the Geometry Column by Name or by List"""
from ._array import GeoArray
if isinstance(col, str) and \
col in self._data.columns and \
self._data[col].dtype.name.lower() != 'geometry':
idx = self._data[col].first_valid_index()
if sr is None:
try:
g = self._data.iloc[idx][col]
if isinstance(g, dict):
self._sr = SpatialReference(Geometry(g['spatialReference']))
else:
self._sr = SpatialReference(g['spatialReference'])
except:
self._sr = SpatialReference({'wkid' : 4326})
self._name = col
q = self._data[col].isna()
self._data.loc[q, "SHAPE"] = None
self._data[col] = GeoArray(self._data[col])
elif isinstance(col, str) and \
col in self._data.columns and \
self._data[col].dtype.name.lower() == 'geometry':
self._name = col
#self._data[col] = self._data[col]
elif isinstance(col, str) and \
col not in self._data.columns:
raise ValueError(
"Column {name} does not exist".format(name=col))
elif isinstance(col, pd.Series):
self._data['SHAPE'] = GeoArray(col.values)
self._name = "SHAPE"
elif isinstance(col, GeoArray):
self._data['SHAPE'] = col
self._name = "SHAPE"
elif isinstance(col, (list, tuple)):
self._data['SHAPE'] = GeoArray(values=col)
self._name = "SHAPE"
else:
raise ValueError(
"Column {name} is not valid. Please ensure it is of type Geometry".format(name=col))
#----------------------------------------------------------------------
@property
def name(self):
"""returns the name of the geometry column"""
if self._name is None:
try:
cols = [c.lower() for c in self._data.columns.tolist()]
if any(self._data.dtypes == 'geometry'):
name = self._data.dtypes[self._data.dtypes == 'geometry'].index[0]
self.set_geometry(name)
elif "shape" in cols:
idx = cols.index("shape")
self.set_geometry(self._data.columns[idx])
except:
raise Exception("Spatial column not defined, please use `set_geometry`")
return self._name
#----------------------------------------------------------------------
def validate(self, strict=False):
"""
Determines if the Geo Accessor is Valid with Geometries in all values
"""
if self._name is None:
return False
if strict:
q = self._data[self.name].notna()
gt = pd.unique(self._data[q][self.name].geom.geometry_type)
if len(gt) == 1:
return True
else:
return False
else:
q = self._data[self.name].notna()
return all(
|
pd.unique(self._data[q][self.name].geom.is_valid)
|
pandas.unique
|
import sys
import os
import json
import pandas as pd
import requests
import collections
import dill
import traceback
from urllib.parse import quote_from_bytes
from featurehub.util import (
compute_dataset_hash, run_isolated, get_source, possibly_talking_action,
myhash
)
from featurehub.admin.sqlalchemy_declarative import Problem, Feature
from featurehub.evaluation import EvaluationResponse
from featurehub.modeling import Model
class EvaluatorClient(object):
def __init__(self, problem_id, username, orm, dataset={}, target=None,
entities_featurized=None):
self.problem_id = problem_id
self.username = username
self.orm = orm
self.dataset = dataset
self.target = target
self.entities_featurized = entities_featurized
if self.dataset:
self.__dataset_hash = compute_dataset_hash(self.dataset)
else:
self.__dataset_hash = None
def check_if_registered(self, feature, verbose=False):
"""Check if feature is registered.
Extracts source code, then looks for the identical source code in the
feature database.
Parameters
----------
feature : function
verbose : bool
Whether to print output.
"""
code = get_source(feature)
return self._check_if_registered(code, verbose=verbose)
def _check_if_registered(self, code, verbose=False):
md5 = myhash(code)
with self.orm.session_scope() as session:
filters = (
Feature.problem_id == self.problem_id,
Feature.md5 == md5,
)
query = session.query(Feature).filter(*filters)
result = query.scalar()
if result:
if verbose:
print("Feature already registered.")
return True
return False
def submit(self, feature, description):
"""Submit feature to server for evaluation on test data.
If successful, registers feature in feature database and returns key
performance metrics.
Runs the feature in an isolated environment to extract the feature
values. Validates the feature values. Then, builds a model on that one
feature, performs cross validation, and returns key performance
metrics.
Parameters
----------
feature : function
Feature to evaluate
description : str
Feature description
"""
from featurehub.user.session import Session
feature_dill = quote_from_bytes(dill.dumps(feature))
code = get_source(feature)
data = {
"database" : self.orm.database,
"problem_id" : self.problem_id,
"feature_dill" : feature_dill,
"code" : code,
"description" : description,
}
response = Session._eval_server_post("submit", data)
if response.ok:
try:
eval_response = EvaluationResponse.from_string(response.text)
print(eval_response)
except Exception as e:
# TODO
print("response failed with exception")
print(traceback.format_exc(), file=sys.stderr)
try:
print(response, file=sys.stderr)
print(response.text, file=sys.stderr)
except Exception:
pass
else:
# TODO
print("response failed with bad status code")
try:
print(response, file=sys.stderr)
print(response.text, file=sys.stderr)
except Exception:
pass
def evaluate(self, feature):
"""Evaluate feature on training dataset and return key performance metrics.
Runs the feature in an isolated environment to extract the feature
values. Validates the feature values. Then, builds a model on that one
feature and computes key cross-validated metrics. Prints results and
returns a dictionary with (metric => value) entries. If the feature is
invalid, prints reason and returns empty dictionary.
Parameters
----------
feature : function
Feature to evaluate
"""
try:
metrics = self._evaluate(feature, verbose=True)
metrics_str = metrics.to_string(kind="user")
metrics_user = metrics.convert(kind="user")
print(metrics_str)
except ValueError as e:
print("Feature is not valid: {}".format(str(e)), file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
metrics_user = {}
try:
# TODO this can be an async procedure
self._log_evaluation_attempt(feature)
except Exception:
pass
return metrics_user
def _log_evaluation_attempt(self, feature):
from featurehub.user.session import Session
code = get_source(feature)
data = {
"database" : self.orm.database,
"problem_id" : self.problem_id,
"code" : code,
}
Session._eval_server_post("log-evaluation-attempt", data)
def _evaluate(self, feature, verbose=False):
with possibly_talking_action("Obtaining dataset...", verbose):
self._load_dataset()
with possibly_talking_action("Extracting features...", verbose):
feature_values = self._extract_features(feature)
# confirm dataset has not been changed
with possibly_talking_action("Verifying dataset integrity...", verbose):
self._verify_dataset_integrity()
# validate
with possibly_talking_action("Validating feature values...", verbose):
result = self._validate_feature_values(feature_values)
# full feature matrix
with possibly_talking_action("Building full feature matrix...",
verbose):
X = self._build_feature_matrix(feature_values)
# target values
# with possibly_talking_action("Extracting target values...", verbose):
Y = self._extract_label()
# compute metrics
with possibly_talking_action("Fitting model and computing metrics...", verbose):
metrics = self._compute_metrics(X, Y)
return metrics
#
# The rest of these methods are subroutines within _evaluate, or utility
# functions of those subroutines.
#
def _create_model(self):
with self.orm.session_scope() as session:
problem = session.query(Problem)\
.filter(Problem.id == self.problem_id).one()
problem_type = problem.problem_type
return Model(problem_type)
def _compute_metrics(self, X, Y):
model = self._create_model()
metrics = model.compute_metrics_cv(X, Y)
return metrics
def _extract_features(self, feature):
assert isinstance(feature, collections.Callable), \
"feature must be a function!"
return run_isolated(feature, self.dataset)
def _extract_label(self):
if pd.DataFrame(self.target).empty:
self._load_dataset()
return self.target
def _load_dataset_split(self,
split="train",
dataset={},
entities_featurized=None,
target=None,
dataset_hash=None,
compute_hash=True):
# query db for import parameters to load files
is_present_dataset = bool(dataset)
is_present_entities_featurized = not pd.DataFrame(entities_featurized).empty
is_present_target = not pd.DataFrame(target).empty
is_anything_missing = not all(
[is_present_dataset, is_present_entities_featurized, is_present_target])
if is_anything_missing:
with self.orm.session_scope() as session:
problem = session.query(Problem)\
.filter(Problem.id == self.problem_id).one()
problem_data_dir = getattr(problem,
"data_dir_{}".format(split))
problem_files = json.loads(problem.files)
problem_table_names = json.loads(problem.table_names)
problem_entities_featurized_table_name = \
problem.entities_featurized_table_name
problem_target_table_name = problem.target_table_name
# load entities and other tables
if not is_present_dataset:
# load other tables
for (table_name, filename) in zip (problem_table_names,
problem_files):
if table_name == problem_entities_featurized_table_name or \
table_name == problem_target_table_name:
continue
abs_filename = os.path.join(problem_data_dir, filename)
dataset[table_name] = pd.read_csv(abs_filename,
low_memory=False, header=0)
# compute/recompute hash
if compute_hash:
dataset_hash = compute_dataset_hash(dataset)
else:
dataset_hash = None
# recompute dataset hash. condition only met if we dataset has already
# loaded, but dataset hash had not been computed. (because we just
# computed hash several lines above!)
if compute_hash:
if not dataset_hash:
dataset_hash = compute_dataset_hash(dataset)
# load entities featurized
if not is_present_entities_featurized:
# if empty string, we simply don't have any features to add
if problem_entities_featurized_table_name:
cols = list(problem_table_names)
ind_features = cols.index(problem_entities_featurized_table_name)
abs_filename = os.path.join(problem_data_dir,
problem_files[ind_features])
entities_featurized = pd.read_csv(abs_filename,
low_memory=False, header=0)
# load target
if not is_present_target:
cols = list(problem_table_names)
ind_target = cols.index(problem_target_table_name)
abs_filename = os.path.join(problem_data_dir,
problem_files[ind_target])
# target might not exist if we are making predictions on unseen
# test data
if os.path.exists(abs_filename):
target = pd.read_csv(abs_filename, low_memory=False, header=0)
else:
target = None
return dataset, entities_featurized, target, dataset_hash
def _load_dataset(self):
"""Load dataset if not present.
Also computes/re-computes dataset hash.
"""
# TODO check for dtypes file, facilitating lower memory usage
self.dataset, self.entities_featurized, self.target, \
self.__dataset_hash = self._load_dataset_split(
split="train", dataset=self.dataset,
entities_featurized=self.entities_featurized,
target=self.target, dataset_hash=self.__dataset_hash)
def _reload_dataset(self):
"""Force reload of dataset.
Doesn't reload entities_featurized or target, because we only call this
routine when the dataset hash has changed.
"""
self.dataset = {}
self._load_dataset()
def _validate_feature_values(self, feature_values):
"""Check whether feature values are valid.
Currently checks if the feature is a DataFrame of the correct
dimensions. If the feature is valid, returns an empty string. Otherwise,
raises ValueError with message of a str containing a semicolon-delimited
list of reasons that the feature is invalid.
Parameters
----------
feature_values : np array-like
Returns
-------
Empty string if feature values are valid.
Raises
------
ValueError with message of a str containing semicolon-delimited list of
reasons that the feature is invalid.
"""
problems = []
# must be coerced to DataFrame
try:
feature_values_df = pd.DataFrame(feature_values)
except Exception:
problems.append("cannot be coerced to DataFrame")
problems = "; ".join(problems)
raise ValueError(problems)
if pd.DataFrame(self.target).empty:
self._load_dataset()
# must have the right shape
expected_shape = (self.target.shape[0], 1) # pylint: disable=no-member
if feature_values_df.shape != expected_shape:
problems.append(
"returns DataFrame of invalid shape "
"(actual {}, expected {})".format(
feature_values_df.shape, expected_shape)
)
problems = "; ".join(problems)
if problems:
raise ValueError(problems)
# problems must be an empty string
return problems
def _verify_dataset_integrity(self):
new_hash = compute_dataset_hash(self.dataset)
if self.__dataset_hash != new_hash:
print("Old hash: {}".format(self.__dataset_hash), file=sys.stderr)
print("New hash: {}".format(new_hash), file=sys.stderr)
#TODO exception handling
self._reload_dataset()
def _build_feature_matrix(self, feature_values):
values_df = pd.DataFrame(feature_values)
if not
|
pd.DataFrame(self.entities_featurized)
|
pandas.DataFrame
|
import glob
import itertools
import json
import logging
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow import keras
import nanotune as nt
from nanotune.classification.classifier import (DEFAULT_CLF_PARAMETERS,
DEFAULT_DATA_FILES,
METRIC_NAMES, Classifier)
logger = logging.getLogger(__name__)
metric_mapping = {
"accuracy_score": "accuracy",
"auc": "AUC",
"average_precision_recall": "precision recall",
"brier_score_loss": "Brier loss",
}
def qf_model(
input_shape: Tuple[int, int, int, int],
learning_rate: float = 0.001,
) -> keras.Sequential:
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(1024, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(2, activation="softmax"))
model.compile(
loss=keras.losses.mean_squared_error, # categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=["accuracy"],
)
return model
def my_model(
input_shape: Tuple[int, int, int, int],
learning_rate: float = 0.001,
) -> keras.Sequential:
""" """
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(
keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(1024, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation="softmax"))
model.compile(
loss=keras.losses.mean_squared_error, # categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=["accuracy"],
)
return model
def load_syn_data(
data_files: Optional[Dict[str, List[str]]] = None,
data_types: Optional[List[str]] = None,
for_CNN: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
""""""
if data_files is None:
# data_files = {
# 'qflow': ['qflow_data_large.npy'],
# 'capa': ['noiseless_data.npy'],
# }
data_files = {
"qflow": [
"augmented_qf_data1.npy",
"augmented_qf_data2.npy",
"augmented_qf_data3.npy",
],
"capa": [
"augmented_cm_data1.npy",
"augmented_cm_data2.npy",
"augmented_cm_data3.npy",
],
}
else:
if not all(elem in data_files.keys() for elem in ["qflow", "capa"]):
print('data_files must contain following keys: "qflow", "capa".')
raise ValueError
if data_types is None:
data_types = ["signal"]
qf_data, qf_labels = _load_data(
data_files["qflow"],
data_types=data_types,
)
qf_data = qf_data * 2
cm_data, cm_labels = _load_data(
data_files["capa"],
data_types=data_types,
)
cm_data = cm_data * 0.6
syn_data = np.concatenate((qf_data, cm_data), axis=0)
syn_labels = np.concatenate((qf_labels, cm_labels), axis=0)
p = np.random.permutation(len(syn_labels))
syn_data = syn_data[p]
syn_labels = syn_labels[p]
if not for_CNN and len(data_types) == 2:
syn_labels = np.argmax(syn_labels, axis=1)
m = syn_labels.shape[0]
syn_curr = syn_data[:, :, :, 0].reshape(m, -1)
syn_freq = syn_data[:, :, :, 1].reshape(m, -1)
syn_data = np.concatenate((syn_curr, syn_freq), axis=1)
else:
logger.warning(
"No data reshaping for parametric binary classifiers" + " was performed."
)
return syn_data, syn_labels
def load_exp_data(
which: List[str],
data_files: Optional[Dict[str, List[str]]] = None,
data_types: Optional[List[str]] = None,
for_CNN: bool = True,
) -> List[Tuple[np.ndarray, np.ndarray]]:
""""""
if data_files is None:
# data_files = {
# 'clean': ['clean_exp_dots.npy'],
# 'good': ['exp_dots_corrected.npy'],
# 'bad': ['exp_dots_corrected.npy'],
# 'good_minus_clean': ['exp_dots_minus_clean.npy'],
# 'good_and_bad': ['exp_dots_corrected.npy'],
# 'good_and_bad_minus_clean': None,
# }
data_files = {
"clean": [
"augmented_clean_exp_dots1.npy",
"augmented_clean_exp_dots2.npy",
],
"good": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"bad": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"good_minus_clean": [
"augmented_exp_dots_minus_clean1.npy",
"augmented_exp_dots_minus_clean2.npy",
"augmented_exp_dots_minus_clean3.npy",
],
"good_and_bad": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"good_and_bad_minus_clean": [],
}
if data_types is None:
data_types = ["signal"]
exp_data_all = []
for dtype in which:
if dtype == "good_and_bad":
all_data, all_labels = _load_good_and_poor(
data_files[dtype], data_types=data_types
)
exp_data_all.append((all_data, all_labels))
elif dtype == "bad":
all_data, all_labels = _load_data(
data_files[dtype], data_types=data_types, relevant_labels=[0, 2]
)
exp_data_all.append((all_data, all_labels))
elif dtype == "good_and_bad_minus_clean":
if data_files["good_and_bad_minus_clean"] is None:
f_name = data_files["good_minus_clean"]
else:
f_name = data_files["good_and_bad_minus_clean"]
all_data, all_labels = _load_good_and_poor(f_name, data_types=data_types)
exp_data_all.append((all_data, all_labels))
elif dtype in ["clean", "good", "good_minus_clean"]:
# not in ['good_and_bad', 'good_and_bad_minus_clean', 'bad']:
data, labels = _load_data(
data_files[dtype],
data_types=data_types,
)
exp_data_all.append((data, labels))
else:
logger.error("Trying to load unknown data.")
if not for_CNN and len(data_types) == 2:
for idd, sub_data in enumerate(exp_data_all):
data = sub_data[0]
labels = sub_data[1]
labels = np.argmax(labels, axis=1)
m = labels.shape[0]
curr = data[:, :, :, 0].reshape(m, -1)
freq = data[:, :, :, 1].reshape(m, -1)
data = np.concatenate((curr, freq), axis=1)
exp_data_all[idd] = (data, labels)
else:
logger.warning(
"No data reshaping for parametric binary classifiers" + " was performed."
)
return exp_data_all
def _load_good_and_poor(
filenames: List[str],
data_types: Optional[List[str]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
""""""
if data_types is None:
data_types = ["signal"]
if isinstance(filenames, str):
filenames = [filenames]
singledots, single_labels = _load_data(
filenames,
data_types=data_types,
regime="singledot",
)
doubledots, double_labels = _load_data(
filenames,
data_types=data_types,
regime="doubledot",
)
single_labels = np.argmax(single_labels, axis=1)
double_labels = np.argmax(double_labels, axis=1)
n_each = int(np.min([len(single_labels), len(double_labels)]))
sd_ids = np.random.choice(n_each, n_each, replace=False).astype(int)
dd_ids = np.random.choice(n_each, n_each, replace=False).astype(int)
singledot = singledots[sd_ids]
sd_labels = np.zeros(n_each, dtype=int)
doubledot = doubledots[dd_ids]
dd_labels = np.ones(n_each, dtype=int)
all_data = np.concatenate((singledot, doubledot), axis=0)
all_labels = np.concatenate((sd_labels, dd_labels), axis=0)
p = np.random.permutation(len(all_labels))
all_data = all_data[p]
all_labels = all_labels[p]
all_labels = keras.utils.to_categorical(all_labels)
return all_data, all_labels
def _load_data(
files: List[str],
regime: str = "dotregime",
data_types: Optional[List[str]] = None,
shuffle: bool = True,
relevant_labels: Optional[List[int]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Load data from multiple data files but do it seperaterly to ensure
'select_equal_populations' won't accidentially
select data mainly from one file
"""
if data_types is None:
data_types = ["signal", "frequencies"]
data = np.empty([0, 50, 50, len(data_types)])
labels = np.empty([0])
for dfile in files:
data_loader = Classifier(
[dfile],
regime,
data_types=data_types,
relevant_labels=relevant_labels,
)
(sub_data, sub_labels) = data_loader.select_equal_populations(
data_loader.original_data, data_loader.labels
)
m = sub_data.shape[0]
if len(data_types) > 2:
raise NotImplementedError
if len(data_types) == 2:
data_sig = sub_data[:, :2500].reshape(m, 50, 50, 1)
data_frq = sub_data[:, 2500:].reshape(m, 50, 50, 1)
sub_data = np.concatenate((data_sig, data_frq), axis=3)
# print(sub_data.shape)
# print(data.shape)
if len(data_types) == 1:
sub_data = sub_data.reshape(m, 50, 50, 1)
data = np.concatenate((data, sub_data), axis=0)
labels = np.concatenate((labels, sub_labels), axis=0)
if shuffle:
p = np.random.permutation(len(labels))
data = data[p]
labels = labels[p]
labels = keras.utils.to_categorical(labels)
return data, labels
def select_equal_populations(
data: np.ndarray,
labels: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Make sure we have 50% of one and 50% of other population
"""
# self.data_to_use = copy.deepcopy(self.original_data)
populations_labels, population_counts = np.unique(labels, return_counts=True)
n_each = int(np.min(population_counts))
new_data = np.empty([n_each * len(populations_labels), data.shape[-1]])
new_labels = np.empty(n_each * len(populations_labels), int)
for ii, label in enumerate(populations_labels):
idx = np.where(labels == int(label))
idx = np.random.choice(idx[0], n_each, replace=False)
idx = idx.astype(int)
dat = data[idx]
new_data[ii * n_each : (ii + 1) * n_each] = dat
label_array = np.ones(n_each, dtype=int) * int(label)
new_labels[ii * n_each : (ii + 1) * n_each] = label_array
p = np.random.permutation(len(new_labels))
return new_data[p], new_labels[p]
def print_data_stats(data, labels) -> None:
print("number of samples: {}".format(data.shape[0]))
print(
"populations (number and count): {}, {}".format(
*np.unique(labels, return_counts=True)
)
)
print("\n")
print("max value: {}".format(np.max(data)))
print("min value: {}".format(np.min(data)))
print("std: {}".format(np.std(data)))
print("median: {}".format(np.median(data)))
a = np.hstack(data[:500].flatten())
# _ = plt.hist(a, bins=100, range=[np.min(data), np.min(data) + 2*np.std(data)]) # arguments are passed to np.histogram
_ = plt.hist(a, bins=100, range=[np.min(data), 1])
plt.title("Histogram qf_data")
plt.show()
def feature_combination_metrics(
classifier: str,
data_filenames: List[str],
category: str,
metric: str = "accuracy_score",
filename: Optional[str] = None,
classifier_parameters: Dict[str, Union[str, float, int]] = {},
feature_indexes: List[int] = [0],
n_iter: int = 75,
) -> Dict[str, Any]:
""""""
# TODO: Fix this method. It is broken. Has not been used for a while
# n_feat = len(feature_indexes)
# scores: List[str] = []
# for k in range(1, n_feat+1):
# f_indx = itertools.combinations(range(1, n_feat+1), k)
# for f_combo in feature_indexes:
# qclf = Classifier(data_filenames,
# category,
# classifier=classifier,
# hyper_parameters=classifier_parameters,
# data_types=['features'],
# feature_indexes=list(f_combo),
# )
# infos = qclf.compute_metrics(n_iter=n_iter)
# features_str = ''
# sub_feat = [features[f] for f in f_combo]
# scores.append([', '.join(sub_feat), infos[metric]['mean'],
# infos[metric]['std']])
# info_dict = {
# 'stage': category,
# 'classifier': qclf.clf_type,
# 'classifier_parameters': qclf.clf.get_params(),
# 'n_iter': n_iter,
# 'data_files': qclf.file_paths,
# 'scores': scores,
# 'metric': metric,
# }
# if filename is None:
# filename = qclf.clf_type + '_' + metric + '.json'
# path = os.path.join(nt.config['db_folder'], category + '_features_metrics')
# if not os.path.exists(path):
# os.makedirs(path)
# path = os.path.join(path, filename)
# with open(path, 'w') as f:
# json.dump(info_dict, f)
# return info_dict
logger.warning("feature_combination_metrics under construction")
return {}
# def feature_metric_to_latex(directory: str,
# filenames: List[str],
# tables_folder: str) -> None:
# """
# """
# metric = 'accuracy_score'
# classifiers = ['SVC_rbf', 'SVC_linear', 'MLPClassifier']
# stage = 'po'
# directory = '/Users/jana/Documents/code/nanotune/measurements/databases/' + stage + '_features_metrics'
# for classifier in classifiers:
# filename = classifier + '_' + metric + '.json'
# path = os.path.join(directory, filename)
# with open(path) as f:
# feat_data = json.load(f)
# header = ['features', 'mean ' + metric, 'std']
# scores = sorted(feat_data['scores'], key=itemgetter(1), reverse=True)[0:40]
# df = pd.DataFrame(scores)
# filepath = os.path.join(tables_folder, stage + '_' + classifier + '_' + metric +'.tex')
# with open(filepath, 'w') as tf:
# with pd.option_context("max_colwidth", 1000):
# tf.write(df.to_latex(index=False,
# formatters=[dont_format, format_float,
# format_float],
# header=header,
# column_format='lcc').replace('\\toprule', '\\hline').replace('\\midrule', '\\hline').replace('\\bottomrule','\\hline'))
def performance_metrics_to_latex(
tables_directory: str,
metric: str = "accuracy_score",
file_directory: Optional[str] = None,
) -> None:
""""""
categories: Dict[str, Tuple[str, List[List[str]]]] = {
"pinchoff": (
"pinchoff",
[["signal"], ["frequencies"], ["frequencies", "signal"], ["features"]],
),
"singledot": ("dots", [["signal"], ["frequencies"], ["signal", "frequencies"]]),
"doubledot": ("dots", [["signal"], ["frequencies"], ["frequencies", "signal"]]),
"dotregime": ("dots", [["signal"], ["frequencies"], ["frequencies", "signal"]]),
}
classifiers = [
"DecisionTreeClassifier",
"GaussianProcessClassifier",
"KNeighborsClassifier",
"LogisticRegression",
"MLPClassifier",
"QuadraticDiscriminantAnalysis",
"RandomForestClassifier",
"SVC",
]
if file_directory is None:
file_directory = os.path.join(nt.config["db_folder"], "classifier_metrics")
header2 = [
"classifier ",
metric_mapping[metric],
"evaluation time [s]",
metric_mapping[metric],
"evaluation time [s]",
]
for category, settings in categories.items():
data_file = settings[0]
data_types = settings[1]
for data_type in data_types:
scores = []
base_pattern = data_file + "_" + category + "*"
all_files = glob.glob(os.path.join(file_directory, base_pattern))
pattern = "_".join(data_type)
rel_files = [f for f in all_files if pattern in f]
for d_type in nt.config["core"]["data_types"]:
if d_type not in data_type:
print(d_type)
rel_files = [f for f in rel_files if d_type not in f]
for classifier in classifiers:
clf_files = [f for f in rel_files if classifier in f]
sub_score = [classifier]
for pca_setting in ["no_PCA", "PCA."]:
if pca_setting == "PCA.":
files = [f for f in clf_files if pca_setting in f]
else:
files = [f for f in clf_files if "PCA" not in f]
if len(files) > 1:
print("error")
print(files)
with open(files[0]) as json_file:
data = json.load(json_file)
sub_score.extend(
[
"{0:.3f}".format(float(data[metric]["mean"]))
+ " $\pm$ "
+ "{0:.3f}".format(float(data[metric]["std"])),
format_time(float(data["mean_test_time"]))
+ " $\pm$ "
+ format_time(float(data["std_test_time"])),
]
)
scores.append(sub_score)
df = pd.DataFrame(scores)
filepath = tables_directory + category + "_" + pattern + ".tex"
with open(filepath, "w") as tf:
output = df.to_latex(
index=False,
formatters=[
dont_format,
dont_format,
dont_format,
dont_format,
dont_format,
],
header=header2,
column_format="@{\extracolsep{6pt}}lcccc",
escape=False,
)
output = output.replace(
"\\toprule",
"\\hline \\hline & \multicolumn{2}{c}{PCA} & \multicolumn{2}{c}{no PCA} \\\\ \cline{2-3} \cline{4-5} ",
)
output = output.replace("\\midrule", "\\hline")
output = output.replace("\\bottomrule", "\\hline \\hline")
tf.write(output)
def performance_metrics_to_figure(
data_file: str,
category: str,
data_types: List[str],
metric: str,
figure_directory: Optional[str] = None,
file_directory: Optional[str] = None,
rcparams: Optional[Dict[str, object]] = None,
) -> None:
""""""
if rcparams is not None:
matplotlib.rcParams.update(rcparams)
if file_directory is None:
file_directory = os.path.join(nt.config["db_folder"], "classifier_metrics")
base_pattern = data_file + "_" + category + "*"
all_files = glob.glob(os.path.join(file_directory, base_pattern))
for data_type in data_types:
pattern = "_".join(data_type)
rel_files = [f for f in all_files if pattern in f]
for d_type in nt.config["core"]["data_types"]:
if d_type not in data_type:
rel_files = [f for f in rel_files if d_type not in f]
file_dict = {
"PCA": [f for f in rel_files if "PCA." in f],
"PCA_scaled_PC": [f for f in rel_files if "PCA_scaled" in f],
"no_PCA": [f for f in rel_files if "PCA" not in f],
}
for pca_setting, files in file_dict.items():
names = []
means = []
stds = []
for file in files:
with open(file) as json_file:
data = json.load(json_file)
names.append(data["classifier"])
means.append(float(data[metric]["mean"]))
stds.append(float(data[metric]["std"]))
means = [x for _, x in sorted(zip(names, means), key=lambda pair: pair[0])]
stds = [x for _, x in sorted(zip(names, stds), key=lambda pair: pair[0])]
names = sorted(names)
fig = plt.figure()
plt.plot(names, means, "o")
plt.xticks(rotation=45, ha="right")
plt.ylim([0, 1])
fig.tight_layout()
if figure_directory is None:
figure_directory = file_directory
filepath = figure_directory + category + "_" + pattern + "_"
filepath = filepath + metric + "_" + pca_setting + "_all_clf.eps"
plt.savefig(
filepath,
format="eps",
bbox_inches="tight",
)
plt.show()
def plot_metric_fluctuations(
category: str,
data_types: List[List[str]],
data_file: Optional[str] = None,
metrics: Optional[List[str]] = None,
classifier: Optional[str] = None,
figure_directory: Optional[str] = None,
file_directory: Optional[str] = None,
rcparams: Optional[Dict[str, object]] = None,
) -> None:
""""""
if rcparams is not None:
matplotlib.rcParams.update(rcparams)
if data_file is None:
data_file = DEFAULT_DATA_FILES[category]
if metrics is None:
metrics = METRIC_NAMES
if file_directory is None:
file_directory = os.path.join(nt.config["db_folder"], "classifier_stats")
base_pattern = "metric_fluctuations_" + os.path.splitext(data_file)[0]
base_pattern = base_pattern + "_" + category
if classifier is None:
base_pattern = base_pattern + "*"
else:
assert classifier in DEFAULT_CLF_PARAMETERS.keys()
base_pattern = base_pattern + "_" + classifier + "*"
all_files = glob.glob(os.path.join(file_directory, base_pattern))
for data_type in data_types:
pattern = "_".join(data_type)
rel_files = [f for f in all_files if pattern in f]
for d_type in nt.config["core"]["data_types"]:
if d_type not in data_type:
rel_files = [f for f in rel_files if d_type not in f]
rel_files = [f for f in rel_files if ".json" in f]
# print(rel_files)
for file in rel_files:
with open(file) as json_file:
info_dict = json.load(json_file)
means = info_dict["mean_metric_variations"]
stds = info_dict["std_metric_variations"]
colors = ["r", "g", "b", "y", "m"]
fig, ax = plt.subplots(1, 1)
for m_id, metric in enumerate(metrics):
# if metric is not 'confusion_matrix':
ax.plot(
means[m_id], c=colors[m_id], label="mean " + metric_mapping[metric]
)
ax.plot(
stds[m_id],
c=colors[m_id],
linestyle="dotted",
label="std " + metric_mapping[metric],
)
title = "Metric Fluctuations " + info_dict["category"]
title = title + " (" + " ".join(data_type) + ")"
ax.set_title(title)
ax.set_xlabel("\# re-draws")
ax.set_ylabel("score")
ax.set_ylim((0, 1))
ax.set_xticks(np.round(np.linspace(0, len(means[0]), 5), 2))
ax.legend(loc="upper left", bbox_to_anchor=(0.8, 1))
fig.tight_layout()
print(figure_directory)
if figure_directory is None:
figure_directory = file_directory
filename = os.path.splitext(os.path.basename(file))[0]
print(filename)
plt.savefig(
os.path.join(figure_directory, filename + ".eps"),
format="eps",
bbox_inches="tight",
)
plt.show()
def summarize_hyper_parameter_optimization(
directory: Optional[str] = None,
filename: Optional[str] = None,
to_latex: bool = True,
table_folder: Optional[str] = None,
) -> Dict[str, Dict[str, Any]]:
""""""
if directory is None:
directory = os.path.join(nt.config["db_folder"], "classifier_hyperparams")
if filename is None:
all_files = glob.glob(directory + "/*.json")
filename = max(all_files, key=os.path.getctime)
if table_folder is None:
table_folder = directory
f_path = os.path.join(directory, filename)
with open(f_path, "r") as f:
hparams = json.load(f)
besties: Dict[str, Dict[str, Any]] = {}
for clf in hparams.keys():
for category in hparams[clf].keys():
try:
besties[category][clf] = {}
except Exception:
besties[category] = {}
besties[category][clf] = {}
best_score = 0.0
best_option = {}
best_dtype = None
for dtype in hparams[clf][category].keys():
for hparams_comb in hparams[clf][category][dtype]:
if float(hparams_comb[1]) > best_score:
best_option = hparams_comb[0]
best_score = hparams_comb[1]
best_dtype = dtype
# besties[clf][category][best_dtype] = [best_option, best_score]
besties[category][clf] = [best_dtype, best_option, best_score]
besties2 = {}
table_besties: Dict[str, Any] = {}
for cat in besties.keys():
best_score = 0.0
for clf in besties[cat].keys():
if besties[cat][clf][2] > best_score:
besties2[cat] = [clf] + besties[cat][clf]
table_besties[cat] = []
table_besties[cat].append(["category", cat])
table_besties[cat].append(["classifier", clf])
table_besties[cat].append(["data_type", besties[cat][clf][0]])
table_besties[cat].append(["accuracy_score", besties[cat][clf][2]])
for pname, param in besties[cat][clf][1].items():
table_besties[cat].append([pname, param])
best_score = besties[cat][clf][2]
# table_besties[cat] = [clf] + besties[cat][clf][0:1]
# table_besties[cat] += [besties[cat][clf][-1]]
if to_latex:
header = ["parameter", "value"]
for cat, param_table in table_besties.items():
df =
|
pd.DataFrame.from_dict(param_table)
|
pandas.DataFrame.from_dict
|
import numpy as np
import pandas as pd
lt = 'f:/lt/'
region = pd.read_csv(lt + 'region.csv',sep='\t', index_col=0)
# 排除内蒙古和西藏
# prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '海南', '吉林',
# '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '广西', '重庆', '四川', '贵州', '云南',
# '陕西', '甘肃', '青海', '宁夏', '新疆']
prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '广西', '海南',
'吉林', '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '重庆', '四川', '贵州', '云南',
'陕西', '甘肃', '青海', '宁夏', '新疆', '内蒙古']
years = ['2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012']
worker = pd.read_csv(lt + 'worker.csv', sep='\t', index_col=0).join(region)
capital = pd.read_csv(lt + 'capital.csv', sep='\t', index_col=0).join(region)
energy = pd.read_csv(lt + 'energy.csv', sep='\t', index_col=0).join(region)
gdp = pd.read_csv(lt + 'gdp.csv', sep='\t', index_col=0).join(region)
co2 = pd.read_csv(lt + 'co2.csv', sep='\t', index_col=0).join(region)
table = {'劳动力': worker, '资本': capital, '能源': energy, 'GDP': gdp, 'CO2': co2}
ll = []
ll_indexs = ['劳动力', '资本', '能源', 'GDP', 'CO2']
# ll_columns = ['整体均值', '整体标准差', '东部均值', '东部标准差', '中部均值', '中部标准差', '西部均值', '西部标准差']
ll_columns = ['均值', '标准差', '最小值', '最大值']
for k, v in table.items():
print(k)
df = v.loc[prvs, :]
# 整体
val = df.loc[:, years].values.ravel()
avg = val.mean()
std = np.std(val, ddof=1)
mini = val.min()
maxi = val.max()
# 东部
val1 = df[df.rgn==1].loc[:, years].values.ravel()
avg1 = val1.mean()
std1 = np.std(val1, ddof=1)
# 中部
val2 = df[df.rgn==2].loc[:, years].values.ravel()
avg2 = val2.mean()
std2 = np.std(val2, ddof=1)
# 西部
val3 = df[df.rgn==3].loc[:, years].values.ravel()
avg3 = val3.mean()
std3 = np.std(val3, ddof=1)
print(f'整体\n平均数{avg:.2f}\n标准差{std:.2f}')
print(f'东部\n平均数{avg1:.2f}\n标准差{std1:.2f}')
print(f'中部\n平均数{avg2:.2f}\n标准差{std2:.2f}')
print(f'西部\n平均数{avg3:.2f}\n标准差{std3:.2f}')
# ll.append([avg, std, avg1, std1, avg2, std2, avg3, std3])
ll.append([avg, std, mini, maxi])
arr = np.array(ll)
df = pd.DataFrame(arr, ll_indexs, ll_columns)
df.to_csv(lt + 'table2_300.csv')
df.to_csv(lt + 'table6_290.csv')
df.to_csv(lt + 'table6_300.csv')
# eviews
eviews =
|
pd.read_csv(lt + 'eviews.csv', sep='\t')
|
pandas.read_csv
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from numpy import random
import json
import csv
import random
import os
import time
import itertools
from scipy.special import softmax
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from itertools import cycle
from collections import OrderedDict, defaultdict
from sklearn.metrics import f1_score, matthews_corrcoef, confusion_matrix, accuracy_score
import pandas as pd
import collections
from scipy.ndimage import zoom
from tabulate import tabulate
import shap
from glob import glob
#--------------------------------------------------------------------------------------------
# shap CNN heatmap utility functions
def shap_abs_mean(tb_log_dir, shap_dir, task):
mean, count = np.zeros((43, 52, 43)), 0
for i in range(5):
with open(tb_log_dir + 'cross{}/'.format(i) + 'test_eval.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row[task]:
mean += np.abs(np.load(shap_dir + 'shap_{}_'.format(task) + row['filename']))
count += 1
mean = mean / count
print('averaged {} cases for the {} task'.format(count, task))
np.save(shap_dir + '{}_abs.npy'.format(task), mean)
def average_ADD_shapmap(tb_log_dir, shap_dir):
ADD, nADD, count_ADD, count_nADD = np.zeros((43, 52, 43)), np.zeros((43, 52, 43)), 0, 0
for i in range(5):
with open(tb_log_dir + 'cross{}/'.format(i) + 'test_eval.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['ADD'] in ['1', '1.0']:
ADD += np.load(shap_dir + 'shap_ADD_' + row['filename'])
count_ADD += 1
elif row['ADD'] in ['0', '0.0']:
nADD += np.load(shap_dir + 'shap_ADD_' + row['filename'])
count_nADD += 1
ADD = ADD / count_ADD
nADD = nADD / count_nADD
std = np.std(ADD)
ADD, nADD = ADD / std, nADD / std
print('averaged {} ADD cases and {} nADD cases'.format(count_ADD, count_nADD))
np.save(shap_dir + 'ADD.npy', ADD)
np.save(shap_dir + 'nADD.npy', nADD)
return shap_dir + 'ADD.npy', shap_dir + 'nADD.npy'
def average_ADD_shapmap_truepred(tb_log_dir, shap_dir):
ADD, nADD, count_ADD, count_nADD = np.zeros((43, 52, 43)), np.zeros((43, 52, 43)), 0, 0
for i in range(5):
with open(tb_log_dir + 'cross{}/'.format(i) + 'test_eval.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['ADD'] in ['1', '1.0'] and row['ADD_pred'] in ['1', '1.0']:
ADD += np.load(shap_dir + 'shap_ADD_' + row['filename'])
count_ADD += 1
elif row['ADD'] in ['0', '0.0'] and row['ADD_pred'] in ['0', '0.0']:
nADD += np.load(shap_dir + 'shap_ADD_' + row['filename'])
count_nADD += 1
ADD = ADD / count_ADD
nADD = nADD / count_nADD
# std = np.std(ADD)
# ADD, nADD = ADD / std, nADD / std
print('averaged {} ADD cases and {} nADD cases'.format(count_ADD, count_nADD))
np.save(shap_dir + 'ADD.npy', ADD)
np.save(shap_dir + 'nADD.npy', nADD)
return shap_dir + 'ADD.npy', shap_dir + 'nADD.npy'
def average_COG_shapmap_truepred(tb_log_dir, shap_dir):
NC, MCI, DE, count_NC, count_MCI, count_DE = np.zeros((43, 52, 43)), np.zeros((43, 52, 43)), np.zeros((43, 52, 43)), 0, 0, 0
for i in range(5):
with open(tb_log_dir + 'cross{}/'.format(i) + 'test_eval.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['COG'] in ['0', '0.0'] and row['COG_pred'] in ['0', '0.0']:
NC += np.load(shap_dir + 'shap_COG_' + row['filename'])
count_NC += 1
elif row['COG'] in ['1', '1.0'] and row['COG_pred'] in ['1', '1.0']:
MCI += np.load(shap_dir + 'shap_COG_' + row['filename'])
count_MCI += 1
elif row['COG'] in ['2', '2.0'] and row['COG_pred'] in ['2', '2.0']:
DE += np.load(shap_dir + 'shap_COG_' + row['filename'])
count_DE += 1
NC = NC / count_NC
MCI = MCI / count_MCI
DE = DE / count_DE
# std = np.std(NC)
# NC, MCI, DE = NC / std, MCI / std, DE / std
print('averaged {} NC cases and {} MCI cases and {} DE cases'.format(count_NC, count_MCI, count_DE))
np.save(shap_dir + 'NC.npy', NC)
np.save(shap_dir + 'MCI.npy', MCI)
np.save(shap_dir + 'DE.npy', DE)
# shap meshgrid plot
def plot_shap_heatmap(models, tasks, stage):
from matplotlib import rc, rcParams
rc('axes', linewidth=2)
rc('font', weight='bold')
rcParams.update({'font.size': 14})
heatmaps = [[] for _ in tasks]
for model in models:
common_path = 'tb_log/' + model + '_Fusion'
for i, task in enumerate(tasks):
name = 'shap_{}_{}.csv'.format(stage, task)
csv_files = [common_path + '_cross{}/'.format(j) + name for j in range(5)]
mean, std, columns = shap_stat(csv_files)
heatmaps[i].append(mean)
for i, task in enumerate(tasks):
heatmaps[i] = np.array(heatmaps[i])
hm, feature_names = get_common_top_N(heatmaps[i], columns)
for i, f in enumerate(feature_names):
feature_names[i] = f.lower()
if f == 'ADD_score': feature_names[i] = 'mri_add'
if f == 'COG_score': feature_names[i] = 'mri_cog'
for j in range(hm.shape[0]):
hm[j, :] = hm[j, :] / np.max(hm[j, :])
fig, ax = plt.subplots(figsize=(12, 6))
im, cbar = heatmap(hm, models, feature_names, ax=ax, vmin=0, vmax=1,
cmap="cool", cbarlabel="Relative Importance")
plt.savefig('shap_heatmap_{}.png'.format(task), dpi=200, bbox_inches='tight')
plt.close()
def CNN_shap_regions_heatmap(corre_file, name):
from matplotlib import rc, rcParams
rc('axes', linewidth=2)
rc('font', weight='bold')
rcParams.update({'font.size': 14})
with open(corre_file, 'r') as csv_file:
reader = csv.DictReader(csv_file)
column_names, hm = [], []
for row in reader:
for key in row:
column_names = key.split()
data = row[key].split()[1:]
data = np.array(list(map(float, data)))
hm.append(data)
hm = np.array(hm)
print(hm.shape)
fig, ax = plt.subplots(figsize=(12, 12))
im, cbar = heatmap(hm, column_names, column_names, ax=ax, vmin=-1, vmax=1,
cmap="hot", cbarlabel="Pearson Correlation")
plt.savefig('regional_heatmap_{}.png'.format(name), dpi=200, bbox_inches='tight')
plt.close()
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
code from https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, shrink=0.75, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom", fontsize=12, fontweight='black')
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def get_common_top_N(heatmap, columns):
mean = np.mean(heatmap, axis=0)
indexes = mean.argsort()
return heatmap[:, indexes[-15:][::-1]], columns[indexes[-15:][::-1]].to_list()
# shap bar plot
def plot_shap_bar(path, model_name, stage, tasks, top):
from matplotlib import rc, rcParams
rc('axes', linewidth=2)
rc('font', weight='bold', size=15)
common_path = 'tb_log/' + model_name
for task in tasks:
name = 'shap_{}_{}.csv'.format(stage, task)
csv_files = [common_path + '_cross{}/'.format(i) + name for i in range(5)]
mean, std, columns = shap_stat(csv_files)
for i, f in enumerate(columns):
columns[i] = f.lower()
if f == 'ADD_score': columns[i] = 'mri_add'
if f == 'COG_score': columns[i] = 'mri_cog'
pool = get_top_N(mean, std, columns, top)
fig, ax = plt.subplots(figsize=(4, 10))
plt.barh([a[2] for a in pool], [a[0] for a in pool], color='r', xerr=[a[1] for a in pool], capsize=5)
ax.set_xlabel('Mean(|SHAP|)', fontsize=16, fontweight='black')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.spines['bottom'].set_position(('axes', 0.01))
ax.tick_params(axis='both', which='major', labelsize=16)
plt.savefig(path + 'shap_bar_{}.png'.format(task), dpi=200, bbox_inches='tight')
plt.close()
def plot_shap_beeswarm(path, SHAP, DATA, tasks, stage):
from matplotlib import rc, rcParams
rc('axes', linewidth=2)
rc('font', weight='bold', size=15)
for i, task in enumerate(tasks):
fig, ax = plt.subplots()
shap_values = np.concatenate([s[i] for s in SHAP], axis=0)
feature_values =
|
pd.concat([d[i] for d in DATA])
|
pandas.concat
|
# -*- coding: utf-8 -*-
import pandas as pd
import pandas.types.concat as _concat
import pandas.util.testing as tm
class TestConcatCompat(tm.TestCase):
def check_concat(self, to_concat, exp):
for klass in [pd.Index, pd.Series]:
to_concat_klass = [klass(c) for c in to_concat]
res = _concat.get_dtype_kinds(to_concat_klass)
self.assertEqual(res, set(exp))
def test_get_dtype_kinds(self):
to_concat = [['a'], [1, 2]]
self.check_concat(to_concat, ['i', 'object'])
to_concat = [[3, 4], [1, 2]]
self.check_concat(to_concat, ['i'])
to_concat = [[3, 4], [1, 2.1]]
self.check_concat(to_concat, ['i', 'f'])
def test_get_dtype_kinds_datetimelike(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'])]
self.check_concat(to_concat, ['datetime'])
to_concat = [
|
pd.TimedeltaIndex(['1 days'])
|
pandas.TimedeltaIndex
|
'''
Open Power System Data
Time series Datapackage
read.py : read time series files
'''
import pytz
import yaml
import os
import sys
import numpy as np
import pandas as pd
import logging
from datetime import datetime, date, time, timedelta
import xlrd
from xml.sax import ContentHandler, parse
from .excel_parser import ExcelHandler
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
def read_entso_e_transparency(
areas,
filepath,
dataset_name,
headers,
cols,
stacked,
unstacked,
append_headers,
**kwargs):
'''
Read a .csv file from ENTSO-E TRansparency into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
dataset_name : str
Name of variable, e.g. ``solar``
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
cols : dict
A mapping of of columnnames to use from input file and a new name to
rename them to. The new name is the header level whose corresponding
values are specified in that column
stacked : list
List of strings indicating the header levels that are reported
column-wise in the input files
unstacked
One strings indicating the header level that is reported row-wise in the
input files
append_headers: dict
Map of header levels and values to append to Multiindex
kwargs: dict
placeholder for further named function arguments
Returns
----------
df: pandas.DataFrame
The content of one file from ENTSO-E Transparency
'''
df_raw = pd.read_csv(
filepath,
sep='\t',
encoding='utf-16',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['DateTime']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
usecols=cols.keys(),
)
# rename columns to comply with other data
df_raw.rename(columns=cols, inplace=True)
if dataset_name == 'Actual Generation per Production Type':
# keep only renewables columns
renewables = {
'Solar': 'solar',
'Wind Onshore': 'wind_onshore',
'Wind Offshore': 'wind_offshore'
}
df_raw = df_raw[df_raw['variable'].isin(renewables.keys())]
df_raw.replace({'variable': renewables}, inplace=True)
if dataset_name == 'Day-ahead Prices':
# Omit polish price data reported in EUR (keeping PLN prices)
# (Before 2017-03-02, the data is very messy)
no_polish_euro = ~(
(df_raw['region'] == 'PSE SA BZ') &
(df_raw.index < pd.to_datetime('2017-03-02 00:00:00')))
df_raw = df_raw.loc[no_polish_euro]
if dataset_name in ['Actual Total Load', 'Day-ahead Total Load Forecast']:
# Zero load is highly unlikely. Such occurences are actually NaNs
df_raw['load'].replace(0, np.nan, inplace=True)
# keep only entries for selected geographic entities as specified in
# areas.csv
area_filter = areas['primary AreaName ENTSO-E'].dropna()
df_raw = df_raw.loc[df_raw['region'].isin(area_filter)]
# based on the AreaName column, map the area names used throughout OPSD
lookup = areas.set_index('primary AreaName ENTSO-E')['area ID'].dropna()
lookup = lookup[~lookup.index.duplicated()]
df_raw['region'] = df_raw['region'].map(lookup)
dfs = {}
for res in ['15', '30', '60']:
df = (df_raw.loc[df_raw['resolution'] == 'PT' + res + 'M', :]
.copy().sort_index(axis='columns'))
df = df.drop(columns=['resolution'])
# DST-handling
# Hours 2-3 of the DST-day in March are both labelled 3:00, with no possibility
# to distinguish them. We have to delete both
dst_transitions_spring = [d.replace(hour=3, minute=m)
for d in pytz.timezone('Europe/Paris')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3
for m in [0, 15, 30, 45]]
df = df.loc[~df.index.isin(dst_transitions_spring)]
# juggle the index and columns
df.set_index(stacked, append=True, inplace=True)
# at this point, only the values we are intereseted in are left as
# columns
df.columns.rename(unstacked, inplace=True)
df = df.unstack(stacked)
# keep only columns that have at least some nonzero values
df = df.loc[:, (df > 0).any(axis=0)]
# add source, url and unit to the column names.
# Note: pd.concat inserts new MultiIndex values infront of the old ones
df = pd.concat([df],
keys=[tuple(append_headers.values())],
names=append_headers.keys(),
axis='columns')
# reorder and sort columns
df = df.reorder_levels(headers, axis=1)
dfs[res + 'min'] = df
return dfs
def read_pse(filepath):
'''
Read a .csv file from PSE into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
Returns
----------
df: pandas.DataFrame
The content of one file from PSE
'''
df = pd.read_csv(
filepath,
sep=';',
encoding='cp1250',
header=0,
index_col=None,
parse_dates=None,
date_parser=None,
dayfirst=False,
decimal=',',
thousands=None,
# hours are indicated by their ending time. During fall DST,
# UTC 23:00-00:00 = CEST 1:00-2:00 is indicated by '02',
# UTC 00:00-01:00 = CEST 2:00-3:00 is indicated by '02A',
# UTC 01:00-02:00 = CET 2:00-3:00 is indicated by '03'.
# regular hours require backshifting by 1 period
converters={
'Time': lambda x: '2:00' if x == '2A' else str(int(x) - 1) + ':00'
}
)
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Warsaw')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Account for an error where an hour is jumped in the data, incrementing
# the hour by one
#time_int = df['Time'].str[:-3].astype(int)
# if (time_int time_int.shift(1) - 1).
# if (time_int == 24).any():
# logger.info(filepath)
# df = df[time_int != 24]
if df['Date'][0] == 20130324:
df['Time'] = [str(num) + ':00' for num in range(24)]
# The hour from 01:00 - 02:00 (CET) should by PSE's logic be indexed
# by "02:00" (the endpoint), but at DST day in spring they use "03:00" in
# the files. Our routine requires it to be "01:00" (the start point).
df['proto_timestamp'] = pd.to_datetime(
df['Date'].astype(str) + ' ' + df['Time'])
slicer = df['proto_timestamp'].isin(dst_transitions_spring)
df.loc[slicer, 'Time'] = '1:00'
# create the actual timestamp from the corrected "Date"-column
df.index = pd.to_datetime(df['Date'].astype(str) + ' ' + df['Time'])
# DST-handling
# 'ambiguous' refers to how the October dst-transition hour is handled.
# 'infer' will attempt to infer dst-transition hours based on order.
df.index = df.index.tz_localize('Europe/Warsaw', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_ceps(filepath):
'''Read a file from CEPS into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=2,
parse_dates=True,
dayfirst=True,
skiprows=None,
index_col=0,
usecols=[0, 1, 2]
)
# DST-handling
df.index = df.index.tz_localize('Europe/Prague', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_elia(filepath):
'''Read a file from Elia into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3,
parse_dates={'timestamp': ['DateTime']},
dayfirst=True,
index_col='timestamp',
usecols=None
)
# DST handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_energinet_dk(filepath):
'''Read a file from energinet.dk into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=2, # the column headers are taken from 3rd row.
# 2nd row also contains header info like in a multiindex,
# i.e. wether the colums are price or generation data.
# However, we will make our own columnnames below.
# Row 3 is enough to unambiguously identify the columns
skiprows=None,
index_col=None,
parse_dates=True,
dayfirst=False,
usecols=None, # None means: parse all columns
thousands=',',
# hours in 2nd column run from 1-24, we need 0-23:
# (converters seem not to work in combination with parse_dates)
converters={1: lambda x: x - 1}
)
# Create the timestamp column and set as index
df.index = df.iloc[:, 0] + pd.to_timedelta(df.iloc[:, 1], unit='h')
# DST-handling
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Copenhagen')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Drop 3rd hour for (spring) DST-transition from df.
df = df[~df.index.isin(dst_transitions_spring)]
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Copenhagen', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
return df
def read_entso_e_statistics(filepath,):
'''Read a file from ENTSO-E into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=18,
usecols='A, B, G, K, L, N, P:AU'
)
# rename columns
# According to the specific national considerations, GB data reflects the
# whole UK including Northern Ireland since 2016
renamer = {df.columns[0]: 'date', df.columns[1]: 'time', 'GB': 'GB_UKM'}
df.rename(columns=renamer, inplace=True)
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Construct the index and set timezone
# for some reason, the 'date' column has already been parsed to datetime
df['date'] = df['date'].fillna(method='ffill').dt.strftime('%Y-%m-%d')
df.index = pd.to_datetime(df.pop('date') + ' ' + df.pop('time').str[:5])
# DST-handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_entso_e_portal(filepath):
'''Read a file from the old ENTSO-E Data Portal into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3, # 0 indexed, so the column names are actually in the 4th row
skiprows=None,
# create MultiIndex from first 2 columns ['date', 'Country']
index_col=[0, 1],
parse_dates={'date': ['Year', 'Month', 'Day']},
dayfirst=False,
usecols=None, # None means: parse all columns
)
# The "Coverage ratio"-column specifies for some countries scaling factor
# with which we should upscale the reported values
df = df.divide(df.pop('Coverage ratio'), axis='index') * 100
# The original data has days and countries in the rows and hours in the
# columns. This rearranges the table, mapping hours on the rows and
# countries on the columns.
df.columns.names = ['hour']
df = df.stack(level='hour').unstack(level='Country').reset_index()
# Create the timestamp column and set as index
df.index = df.pop('date') + pd.to_timedelta(df.pop('hour'), unit='h')
# DST-handling
# Delete values in DK and FR that should not exist
df = df.loc[df.index != '2015-03-29 02:00', :]
# Delete values in DK that are obviously twice as high as they should be
df.loc[df.index.isin(['2014-10-26 02:00:00', '2015-10-25 02:00:00']),
'DK'] = np.nan
# Delete values in UK that are all zero except for one day
df.loc[(df.index.year == 2010) & (df.index.month == 1), 'GB'] = np.nan
# Delete values in CY that are mostly zero but not always
df.loc[(df.index.year < 2013), 'CY'] = np.nan
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('CET', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
# Rename regions to comply with naming conventions
renamer = {'DK_W': 'DK_1', 'UA_W': 'UA_west', 'NI': 'GB_NIR', 'GB': 'GB_GBN'}
df.rename(columns=renamer, inplace=True)
# Calculate load for whole UK from Great Britain and Northern Ireland data
df['GB_UKM'] = df['GB_GBN'].add(df['GB_NIR'])
return df
def read_hertz(filepath, dataset_name):
'''Read a file from 50Hertz into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=3,
index_col='timestamp',
parse_dates={'timestamp': ['Datum', 'Von']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands='.',
# truncate values in 'time' column after 5th character
converters={'Von': lambda x: x[:5]},
)
# Wind onshore
if dataset_name == 'wind generation_actual pre-offshore':
df['wind_onshore'] = df['MW']
# Until 2006, and in 2015 (except for wind_generation_pre-offshore),
# during the fall dst-transistion, only the
# wintertime hour (marked by a B in the data) is reported, the summertime
# hour, (marked by an A) is missing in the data.
# dst_arr is a boolean array consisting only of "False" entries, telling
# python to treat the hour from 2:00 to 2:59 as wintertime.
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=2)
# Conform index to UTC
if (pd.to_datetime(df.index.values[0]).year not in [2005, 2006, 2015] or
(dataset_name == 'wind generation_actual pre-offshore' and
pd.to_datetime(df.index.values[0]).year == 2015)):
check_dst(df.index, autumn_expect=2)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
else:
dst_arr = np.zeros(len(df.index), dtype=bool)
check_dst(df.index, autumn_expect=1)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
variable, attribute = dataset_name.split(' ')[:2]
# Since 2016, wind data has an aditional column for offshore.
# Baltic 1 has been producing since 2011-05-02 and Baltic2 since
# early 2015 (source: Wikipedia) so it is probably not correct that
# 50Hertz-Wind data pre-2016 is only onshore. Maybe we can ask at
# 50Hertz directly.
return df
def read_amprion(filepath, dataset_name):
'''Read a file from Amprion into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['Datum', 'Uhrzeit']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands=None,
# Truncate values in 'time' column after 5th character.
converters={'Uhrzeit': lambda x: x[:5]},
)
# Wind onshore
if dataset_name == 'wind':
df['wind_onshore'] = df['Online Hochrechnung [MW]']
# DST-Handling:
# In the years after 2009, during the fall dst-transistion, only the
# summertime hour is reported, the wintertime hour is missing in the data.
# dst_arr is a boolean array consisting only of "True" entries, telling
# python to treat the hour from 2:00 to 2:59 as summertime.
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=0)
index1 = df.index[df.index.year >= 2018]
index1 = index1.tz_localize('Europe/Berlin', ambiguous='infer')
index2 = df.index[df.index.year < 2018]
dst_arr = np.ones(len(index2), dtype=bool)
index2 = index2.tz_localize('Europe/Berlin', ambiguous=dst_arr)
df.index = index2.append(index1)
df.index = df.index.tz_convert(None)
return df
def read_tennet(filepath, dataset_name):
'''Read a file from TenneT into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
encoding='latin_1',
header=3,
index_col=False,
parse_dates=False,
date_parser=None,
dayfirst=True,
thousands=None,
converters=None,
)
# Wind onshore
if dataset_name == 'wind':
# Calculate onshore wind-generation
df['wind_onshore'] = df['tatsächlich [MW]'].sub(
df['Anteil Offshore [MW]'])
# Construct the datetime-inex
renamer = {'Datum': 'date', 'Position': 'pos'}
df.rename(columns=renamer, inplace=True)
df['date'].fillna(method='ffill', limit=100, inplace=True)
# Check the rows for irregularities
for i, row in df.iterrows():
# there must not be more than 100 quarter-hours in a day
if row['pos'] > 100:
logger.warning('%s th quarter-hour at %s, position %s',
row['pos'], row['date'], i)
# On the day in March when summertime begins, shift the data forward by
# 1 hour, beginning with the 9th quarter-hour, so the index runs again
# up to 96
elif (row['pos'] == 92 and (
(i == len(df.index) - 1) or (df['pos'][i + 1] == 1))):
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 9)].index
df.loc[slicer, 'pos'] = df['pos'] + 4
# Instead of having the quarter-hours' index run up to 100, we want
# to have it set back by 1 hour beginning from the 13th
# quarter-hour, ending at 96
elif row['pos'] == 100: # True when summertime ends in October
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 13)].index
df.loc[slicer, 'pos'] = df['pos'] - 4
# Compute timestamp from position and generate datetime-index
df['hour'] = (np.trunc((df['pos'] - 1) / 4)).astype(int).astype(str)
df['minute'] = (((df['pos'] - 1) % 4) * 15).astype(int).astype(str)
df.index = pd.to_datetime(
df['date'] + ' ' + df['hour'] + ':' + df['minute'], dayfirst=True)
# DST-handling
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_transnetbw(filepath, dataset_name):
'''Read a file from TransnetBW into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=0,
index_col=None,
parse_dates=None, # {'timestamp': ['Datum von', 'Uhrzeit von']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands=None,
converters=None,
)
# Wind onshore
if dataset_name == 'wind':
df['wind_onshore'] = df['Ist-Wert (MW)']
# rename columns
renamer = {'Datum von': 'date', 'Uhrzeit von': 'time'}
df.rename(columns=renamer, inplace=True)
# DST-handling
# timestamp 01:45 just before spring DST transistion has been falsely set to
# 3:45, which we correct here
slicer = (df['time'] == '03:45') & (df['time'].shift(periods=1) == '01:30')
df.loc[slicer, 'time'] = '01:45'
df.index = pd.to_datetime(df['date'] + ' ' + df['time'], dayfirst=True)
dst_arr = np.zeros(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_opsd(filepath, param_dict, headers):
'''Read a file from OPSD into a DataFrame'''
df = pd.read_csv(
filepath,
sep=',',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['day']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
converters=None,
)
# Split the colname after the first "_"
cols = [(col_name.split('_')[0], '_'.join(col_name.split('_')[1:-1]))
for col_name in df.columns]
df.columns = pd.MultiIndex.from_tuples(cols)
# Keep only wind and solar
keep = ['wind', 'wind_onshore', 'wind_offshore', 'solar']
df = df.loc[:, (slice(None), keep)]
# The capacities data only has one entry per day, which pandas
# interprets as 00:00h. We will broadcast the dayly data for
# all quarter-hours of the day until the next given data point.
# For this, we we expand the index so it reaches to 23:59 of
# the last day, not only 00:00.
last = pd.to_datetime([df.index[-1]]) + timedelta(days=1, minutes=59)
until_last = df.index.append(last).rename('timestamp')
df = df.reindex(index=until_last, method='ffill')
df = df.loc[(2005 <= df.index.year) & (df.index.year <= 2019)]
dfs = {}
for timezone, res, ddf in [
('CET', '15min', df.loc[:, ['DE']]),
('WET', '30min', df.loc[:, ['GB-UKM', 'GB-GBN', 'GB-NIR']]),
('CET', '60min', df.loc[:, ['CH', 'DK', 'SE']])]:
# DST-handling
ddf.index = ddf.index.tz_localize(timezone).tz_convert(None)
ddf = ddf.resample(res).ffill().round(0)
# Create the MultiIndex
cols = [tuple(param_dict['colmap'][col_name[0]][level]
.format(variable=col_name[1].lower())
for level in headers) for col_name in ddf.columns]
ddf.columns = pd.MultiIndex.from_tuples(cols, names=headers)
dfs[res] = ddf
return dfs
def read_svenska_kraftnaet(filepath, dataset_name):
'''Read a file from Svenska Kraftnät into a DataFrame'''
if dataset_name in ['wind_solar_1', 'wind_solar_2']:
skip = 4
cols = {0: 'date', 1: 'hour', 2: 'load', 3: 'wind'}
else:
if dataset_name == 'wind_solar_4':
skip = 5
else:
skip = 7
cols = {0: 'timestamp', 1: 'load', 2: 'wind', 8: 'solar'}
df = pd.read_excel(
io=filepath,
# read the last sheet (in some years,
# there are hidden sheets that would cause errors)
sheet_name=-1,
header=None,
skiprows=skip,
index_col=None,
usecols=cols.keys(),
names=cols.values()
)
if dataset_name in ['wind_solar_1', 'wind_solar_2']:
# in 2009 there is a row below the table for the sums that we don't
# want to read in
df = df[df['date'].notnull()]
df.index = pd.to_datetime(
df['date'].astype(int).astype(str) + ' ' +
df['hour'].astype(int).astype(str).str.replace('00', '') + ':00',
dayfirst=False,
infer_datetime_format=True)
else:
# in 2011 there is a row below the table for the sums that we don't
# want to read in
df = df[((df['timestamp'].notnull()) &
(df['timestamp'].astype(str) != 'Tot summa GWh'))]
df.index = pd.to_datetime(df['timestamp'], dayfirst=True)
# The timestamp ("Tid" in the original) gives the time without
# daylight savings time adjustments (normaltid). To convert to UTC,
# one hour has to be deducted
df.index = df.index - timedelta(hours=1) # + pd.offsets.Hour(-1)
return df
def read_apg(filepath):
'''Read a file from APG into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
encoding='latin_1',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['Von']},
dayfirst=True,
decimal=',',
thousands='.',
# Format of the raw_hour-column is normally is 01:00:00, 02:00:00 etc.
# throughout the year, but 3A:00:00, 3B:00:00 for the (possibly
# DST-transgressing) 3rd hour of every day in October, we truncate the
# hours column after 2 characters and replace letters which are there to
# indicate the order during fall DST-transition.
converters={'Von': lambda x: str(x).replace('A', '').replace('B', '')}
)
# Correct column names
df.rename(columns=lambda x: x.replace(' ', ' '), inplace=True)
# DST-handling
df.index = df.index.tz_localize('Europe/Vienna', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_rte(filepath):
'''Read a file from RTE into a DataFrame'''
cols = ['Date', 'Heure', 'Consommation (MW)', 'Prévision J-1 (MW)',
'Eolien (MW)', 'Solaire (MW)']
df = pd.read_csv(
filepath,
sep=';',
encoding='utf-8',
header=0,
index_col='timestamp',
# there eis also a column with UTC but it is incorrect
parse_dates={'timestamp': ['Date', 'Heure']},
dayfirst=True,
usecols=cols
)
# filter out quarter-hourly oberservations and sort the index
df = df.loc[df.index.minute.isin([0, 30]), :]
df.sort_index(axis='index', inplace=True)
# DST handling
# drop 1 hour after spring dst as it contains inconsistent data (copy of
# hour before). The 1 hour will later be interpolated
dst_transitions_spring = [
dd for d in pytz.timezone('Europe/Paris')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3
for dd in (d.replace(hour=2, minute=0), d.replace(hour=2, minute=30))]
df = df.loc[~df.index.isin(dst_transitions_spring)]
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.zeros(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Paris', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
return df
def read_GB(filepath):
'''Read a file from National Grid or Elexon into a DataFrame'''
time_cols = {
'#Settlement Date': 'date', # Elexon
'Settlement Period': 'pos', # Elexon
'SETTLEMENT_DATE': 'date', # National Grid
'SETTLEMENT_PERIOD': 'pos' # National Grid
}
df = pd.read_csv(
filepath,
header=0,
usecols=None,
dayfirst=True
)
df.rename(columns=time_cols, inplace=True)
for i, row in df.iterrows():
# there must not be more than 50 half-hours in a day
if row['pos'] > 50:
logger.warning('%s th half-hour at %s, position %s',
row['pos'], row['date'], i)
# On the day in March when summertime begins, shift the data forward by
# 1 hour, beginning with the 5th half-hour, so the index runs again
# up to 48
elif (row['pos'] == 46 and (
(i == len(df.index) - 1) or (df['pos'][i + 1] == 1))):
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 3)].index
df.loc[slicer, 'pos'] = df['pos'] + 2
# Instead of having the half-hours' index run up to 50, we want
# to have it set back by 1 hour beginning from the 5th
# half-hour, ending at 48
elif row['pos'] == 50: # True when summertime ends in October
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 5)].index
df.loc[slicer, 'pos'] = df['pos'] - 2
# Compute timestamp from position and generate datetime-index
df['hour'] = (np.trunc((df['pos'] - 1) / 2)).astype(int).astype(str)
df['minute'] = (((df['pos'] - 1) % 2) * 30).astype(int).astype(str)
df.index = pd.to_datetime(
df['date'] + ' ' + df['hour'] + ':' + df['minute'], dayfirst=True)
# DST-handling
df.index = df.index.tz_localize('Europe/London', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def terna_file_to_initial_dataframe(filepath):
'''
Parse the xml or read excel directly,
returning the data from the file in a simple-index dataframe.
Some files are formated as xml, some are pure excel files.
This function handles both cases.
Parameters:
----------
filepath: str
The path of the file to process
Returns:
----------
df: pandas.DataFrame
A pandas dataframe containing the data from the specified file.
'''
# First, we'll try to parse the file as if it is xml.
try:
excelHandler = ExcelHandler()
parse(filepath, excelHandler)
# Create the dataframe from the parsed data
df = pd.DataFrame(excelHandler.tables[0][2:],
columns=excelHandler.tables[0][1])
# Convert the "Generation [MWh]"-column to numeric
df['Generation [MWh]'] = pd.to_numeric(df['Generation [MWh]'])
except:
# In the case of an exception, treat the file as excel.
try:
df = pd.read_excel(filepath, header=1)
except xlrd.XLRDError:
df = pd.DataFrame()
return df
def read_terna(filepath, filedate, param_dict, headers):
'''
Read a file from Terna into a dataframe
Parameters:
----------
filepath: str
The path of the file to read.
url:
The url of the Terna page.
headers:
Levels for the MultiIndex.
Returns:
----------
df: pandas.DataFrame
A pandas multi-index dataframe containing the data from the specified
file.
'''
# Reading the file into a pandas dataframe
df = terna_file_to_initial_dataframe(filepath)
if df.empty:
return df
# Rename columns to match conventions
renamer = {
'Date/Hour': 'timestamp',
'Bidding Area': 'region',
'Type': 'variable',
'Generation [MWh]': 'values'
}
df.rename(columns=renamer, inplace=True)
# Casting the timestamp column to datetime and set it as index
df.index =
|
pd.to_datetime(df['timestamp'])
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
# #NUMBER 1: DATA PREPARATION
# In[4]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
print("Done importing libraries")
# In[5]:
#path_to_file= "C:/Users/necbanking/Desktop/2.1 modular/fifa_AI/"
df=pd.read_csv("players_20.csv")
df_19=pd.read_csv("players_19.csv")
df.head()
# In[6]:
df_19.head()
# In[7]:
df = df.drop('dob', axis =1)
df = df.drop('weight_kg', axis =1)
df = df.drop('international_reputation', axis =1)
df = df.drop('real_face', axis =1)
df = df.drop('release_clause_eur', axis =1)
df = df.drop('player_tags', axis =1)
df = df.drop('team_jersey_number', axis =1)
df = df.drop('loaned_from', axis =1)
df = df.drop('joined', axis =1)
df = df.drop('contract_valid_until', axis =1)
df = df.drop('nation_position', axis =1)
df = df.drop('nation_jersey_number', axis =1)
df = df.drop('player_traits', axis =1)
df = df.drop('sofifa_id', axis =1)
df = df.drop('long_name', axis =1)
# In[8]:
df_19 = df_19.drop('dob', axis =1)
df_19 = df_19.drop('weight_kg', axis =1)
df_19 = df_19.drop('international_reputation', axis =1)
df_19 = df_19.drop('real_face', axis =1)
df_19 = df_19.drop('release_clause_eur', axis =1)
df_19 = df_19.drop('player_tags', axis =1)
df_19 = df_19.drop('team_jersey_number', axis =1)
df_19 = df_19.drop('loaned_from', axis =1)
df_19 = df_19.drop('joined', axis =1)
df_19 = df_19.drop('contract_valid_until', axis =1)
df_19 = df_19.drop('nation_position', axis =1)
df_19 = df_19.drop('nation_jersey_number', axis =1)
df_19 = df_19.drop('player_traits', axis =1)
df_19 = df_19.drop('sofifa_id', axis =1)
df_19 = df_19.drop('long_name', axis =1)
# #NUMBER 2: CORRELATION
# In[9]:
#splitting data
train_data, test_data=train_test_split(df,test_size=0.25)
print("Leingth of training data is:"+str(len(train_data)))
print("Leingth of test data is:"+str(len(test_data)))
# In[10]:
#selecting features
target_feature='overall'
#finding features that arecorrelated to the overall column
feature_corr=train_data.corr(method='pearson')[target_feature]
feature_corr=feature_corr.sort_values(ascending=False)
#print thetop ten correlations with the target value
print(feature_corr[1:21])
corr_matrix = df.corr()
corr_matrix['overall'].sort_values(ascending=False)
##
# #NUMBER 3: REGRESSION MODEL
#
# In[11]:
#Training Rgression model
features=corr_matrix['overall'].sort_values(ascending=False)
features=['potential','value_eur','wage_eur','attacking_short_passing','skill_long_passing','age','skill_ball_control','skill_curve','skill_moves','attacking_volleys']
X_train=df[features]
y_train=df['overall']
r = LinearRegression()
r.fit(X_train,y_train )
print(r.score(X_train,y_train))
# In[12]:
#copying top 20 relavent features to be used by model
features=feature_corr[1:14].index.tolist()
print(features)
# In[13]:
#training the model
x_train=train_data[features]
y_train=train_data[target_feature]
#replace all empty cells with zero
x_train.fillna(0,inplace=True)
#using the LinearRegression method to build the model
model=LinearRegression().fit(x_train,y_train)
#print score
print("Score:"+str(model.score(x_train,y_train)))
# #NUMBER 4: A PROCESS OF OPTIMISATION
# In[14]:
#testing the model usint the 25% of the players_20.csv(df) dataframe
#sort test data first
test_data=test_data.sort_values([target_feature], ascending=False)
x_test=test_data[features]
x_test.fillna(0,inplace=True)
y_test=test_data[target_feature]
#start predicting
y_predict=model.predict(x_test)
#add new column called predicted
test_data['predicted']=y_predict
rating=((y_predict-y_test)/y_test*100)
#add anew column called accuracy
test_data['difference']=rating
test_data[["short_name","overall","predicted","difference"]]
# In[16]:
#preproccessing features
df_19['potential'] = pd.to_numeric(df_19['potential'],errors='coerce')
df_19['value_eur'] =
|
pd.to_numeric(df_19['value_eur'],errors='coerce')
|
pandas.to_numeric
|
# -----------------------------------------------------------------PORTRAY----------------------------------------------------------------------#
# AUTHORS
# <NAME> -> <EMAIL>
# <NAME> -> <EMAIL>
# TEAM ILLUMINATI
# ----------------------------------------------------------------IMAGE ANALYSER----------------------------------------------------------------#
'''DISABLE WARNINGS'''
import os
import warnings
import gc
import sys
import yake
import urllib.request
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import json
import glob
import pickle
import random
from pathlib import Path
import pickle
import cv2
import editdistance
import string
from sklearn.preprocessing import MinMaxScaler
import io
import itertools
import networkx as nx
import nltk
import re
import networkx
from rake_nltk import Rake
from nltk.tokenize import word_tokenize, sent_tokenize
import numpy as np
import numpy as np
import pandas as pd
import itertools
from tqdm import tqdm
from imgaug import augmenters as iaa
from sklearn.model_selection import StratifiedKFold, KFold
import mrcnn
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
import keras.layers
from mrcnn.model import log
from mrcnn.model import log, BatchNorm
COCO_WEIGHTS_PATH = 'mask_rcnn_coco.h5'
warnings.filterwarnings("ignore", category=DeprecationWarning)
# ------------------------------------------------------------SETTING UP THE IMAGE MODEL AND LOADING WEIGHTS------------------------------------------------------------
'''SET CONFIGURATIONS FOR MODEL'''
NUM_CATS = 46
IMAGE_SIZE = 512
class FashionConfig(Config):
NAME = "fashion"
NUM_CLASSES = NUM_CATS + 1
GPU_COUNT = 1
IMAGES_PER_GPU = 4
BACKBONE = 'resnet50'
IMAGE_MIN_DIM = IMAGE_SIZE
IMAGE_MAX_DIM = IMAGE_SIZE
IMAGE_RESIZE_MODE = 'none'
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
STEPS_PER_EPOCH = 1000
VALIDATION_STEPS = 200
config = FashionConfig()
'''LOAD LABELS FOR IMAGE SEGMENTATION'''
with open("label_descriptions.json") as f:
label_descriptions = json.load(f)
label_names = [x['name'] for x in label_descriptions['categories']]
'''Helper Functions For Image Analysis'''
def resize_image(image_path):
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)
return img
def to_rle(bits):
rle = []
pos = 0
for bit, group in itertools.groupby(bits):
group_list = list(group)
if bit:
rle.extend([pos, sum(group_list)])
pos += len(group_list)
return rle
def refine_masks(masks, rois):
areas = np.sum(masks.reshape(-1, masks.shape[-1]), axis=0)
mask_index = np.argsort(areas)
union_mask = np.zeros(masks.shape[:-1], dtype=bool)
for m in mask_index:
masks[:, :, m] = np.logical_and(masks[:, :, m], np.logical_not(union_mask))
union_mask = np.logical_or(masks[:, :, m], union_mask)
for m in range(masks.shape[-1]):
mask_pos = np.where(masks[:, :, m]==True)
if np.any(mask_pos):
y1, x1 = np.min(mask_pos, axis=1)
y2, x2 = np.max(mask_pos, axis=1)
rois[m, :] = [y1, x1, y2, x2]
return masks, rois
augmentation = iaa.Sequential([
iaa.Fliplr(0.5) # only horizontal flip here
])
'''Model Setup And Download'''
model_path = 'mask_rcnn_fashion_0008.h5'
class InferenceConfig(FashionConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir='../Mask_RCNN/')
assert model_path != '', "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
print("MODEL LOADED")
print()
'''Main Functions for Image Analysis -> Download, Save and Run Predictions'''
def main(df):
'''
This function utilises the loaded model to extract featires from the images
df -> Final Dataframe with ranks
feature_list -> This is the jsin file containing list of all predefined features.
'''
feature_list = []
missing_count = 0
os.chdir('static/Images/')
for i in range(len(df)):
image_url = df["Image_Link"][i]
save_name = df["Name"][i] + '.jpg'
urllib.request.urlretrieve(image_url, save_name)
for i in tqdm(range(len(df))):
labels = []
path = df["Name"][i] + '.jpg'
try:
image = resize_image(path)
result = model.detect([image])[0]
except:
print(df["Name"][i])
feature_list.append([1])
continue
if result['masks'].size > 0:
masks, _ = refine_masks(result['masks'], result['rois'])
for m in range(masks.shape[-1]):
mask = masks[:, :, m].ravel(order='F')
rle = to_rle(mask)
label = result['class_ids'][m] - 1
labels.append(label)
feature_list.append(list(set(labels)))
else:
feature_list.append([1])
missing_count += 1
for i in range(len(feature_list)):
for j in range(len(feature_list[i])):
feature_list[i][j] = label_names[feature_list[i][j]]
df["Feature"] = pd.Series(feature_list)
os.chdir('..')
os.chdir('..')
return df
def cleanresults(df):
'''
A simple funtion to remove unwanted data after ranking products.
'''
del df["Discount"], df["Rating"], df["Number of Ratings"], df["Reviews"], df["Current Views"]
# lis = getanalysis(df)
# df["Keywords"] = pd.Series(lis)
return df
print("SETUP COMPLETE")
print()
# --------------------------------------------------------------DATA SCRAPER--------------------------------------------------------------------------#
# Setting up the Chome Instance
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument('--disable-extensions')
class DataCollectionEcomm:
'''
The main class instance for Data Scraping.
All inputs are provided through a unique pkl file generated by the code previously for each website. The PKL file contains XPaths for each element.
DataFrame Description:
NAME | BRAND | PRICE | DISCOUNT | IMAGE LINK | RATING | NUMBER OF RATINGS | REVIEWS | CURRENT VIEWS | DESCRIPTION
'''
def __init__(self, base_site, search, path, query = ['T-Shirt']):
self.browser = self.genrateBroswer()
self.links = []
self.base_site = base_site
self.path = path
self.search = search
self.query = query
self.df = pd.DataFrame(columns=["Name", "Brand", "Price", "Discount", "Image_Link", "Rating", "Number of Ratings", "Reviews", "Current Views", "Description"])
def getalllinkstoproduct(self, query):
'''
Gathers Links to all related Products.
'''
self.browser.find_element_by_xpath(self.search["search_box"]).click()
self.browser.implicitly_wait(5)
self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(query)
self.browser.implicitly_wait(10)
self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(Keys.ENTER)
temps = []
for i in range(1,1000):
lis =self.browser.find_elements_by_css_selector(self.search["product_selector"] + str(i) + self.search["product_selector_no"])
if (not lis):
break
temps.append(lis[0].get_attribute('href'))
self.browser.get(self.base_site)
self.browser.implicitly_wait(5)
return temps
def genrateBroswer(self):
'''
Generates Browser Instance
'''
self.browser = webdriver.Chrome(options=options)
return self.browser
def getproductdata(self):
'''
Uses selectors from pkl file to extract data.
'''
self.browser.implicitly_wait(3)
Product_Name = self.browser.find_element_by_xpath(self.path["p_name"]).text
try:
Product_Brand = self.browser.find_element_by_xpath(self.path["p_brand"]).text
except:
Product_Brand = Product_Name
try:
Product_Price = self.browser.find_element_by_xpath(self.path["p_price"]).text
except:
Product_Price = "Out Of Stock"
try:
Product_Disc = self.browser.find_element_by_xpath(self.path["p_disc"]).text[:3]
print(1)
except:
Product_Disc = 'NULL'
try:
Product_Image = self.browser.find_element_by_xpath(self.path["p_img"]).get_attribute("src")
except:
Product_Image = self.browser.find_element_by_xpath(self.path["p_img2"]).get_attribute("src")
'''
Using EC for dynamic websites.
Comment out in case of static.
'''
for second in range(0,50):
self.browser.execute_script("window.scrollBy(0,300)", "")
time.sleep(5)
try:
self.browser.find_element_by_id(self.path["p_rev"])
break
except:
continue
Product_Reviews = []
try:
Product_Rating = self.browser.find_element_by_xpath(self.path["p_rat"]).text
except:
Product_Rating = "None"
print("Help - STOP")
try:
Product_NumRatings = self.browser.find_element_by_xpath(self.path["p_numrat"]).text
except:
Product_NumRatings = "Zero"
print("Help - STOP")
try:
Curr_Views = self.browser.find_element_by_xpath(self.path["p_curr"]).text
except:
Curr_Views = "0"
print('Help')
try:
Product_Desc = self.browser.find_element_by_xpath("//*[@id='product-page-selling-statement']").text
except:
Product_Desc = ""
print("Help")
reviews = self.browser.find_elements_by_class_name("_2k-Kq")
for x in reviews:
subject = x.find_element_by_class_name("_3P2YP").text
text = x.find_element_by_class_name("_2wSBV").text
stars = x.find_element_by_class_name("_3tZR1").value_of_css_property('width')[:-2]
Product_Reviews.append([subject, text, stars])
self.df = self.df.append({'Name': Product_Name, 'Brand': Product_Brand, "Price": Product_Price, "Discount": Product_Disc, "Image_Link": Product_Image, "Rating": Product_Rating, "Number of Ratings": Product_NumRatings, "Reviews": Product_Reviews, "Current Views": Curr_Views, "Description": Product_Desc}, ignore_index=True)
def helper(self, link):
self.browser.get(link)
def main_1(self):
self.browser.get(self.base_site)
self.browser.delete_all_cookies()
temp = []
time.sleep(10)
for i in self.query:
link = self.getalllinkstoproduct(i)
temp += link
link_set = set(temp)
self.links = list(link_set)
return self.links
def main_2(self):
for i in tqdm(range(len(self.links))):
self.helper(self.links[i])
time.sleep(5)
self.getproductdata()
'''FOR SHEIN'''
# 1. Comment out:
# for second in range(0,50):
# self.browser.execute_script("window.scrollBy(0,300)", "")
# time.sleep(5)
# try:
# self.browser.find_element_by_id(self.path["p_rev"])
# break
# except:
# continue
# 2. Change:
# self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(query)
# self.browser.implicitly_wait(10)
# self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(Keys.ENTER)
# 3. Change:
# reviews = self.browser.find_elements_by_class_name('common-reviews__list-item-detail')
# for i in range(len(reviews)):
# subject = ''
# text = reviews[i].find_element_by_class_name("rate-des").text
# stars = Product_Rating
# Product_Reviews.append([subject, text, stars])
# --------------------------------------------------------------Review Weights-------------------------------------------------------------------#
class WeightingReviews:
def __init__(self, df):
self.df = df
self.k = 0.3
def setup_environment(self):
"""Download required resources."""
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
print('Completed resource downloads.')
def filter_for_tags(self, tagged, tags=['NN', 'JJ', 'NNP']):
"""Semantic Filter Based on POS."""
return [item for item in tagged if item[1] in tags]
def normal(self, tagged):
return [(item[0].replace('.', ' '), item[1]) for item in tagged]
def unique_ever(self, iterable, key=None):
'''
Extracts only unique nodes for graph.
'''
seen = set()
seen_add = seen.add
if key is None:
for element in [x for x in iterable if x not in seen]:
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def build_graph(self, nodes):
"""Return a networkx graph instance.
nodes-> List of hashables that represent the nodes of a graph.
"""
gr = nx.Graph()
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = editdistance.eval(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extract_key_phrases(self, text):
'''
Main function to extract key phrases by buiding a Leventshien Distance Graph.
text-> Text to run on
'''
word_tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(word_tokens)
textlist = [x[0] for x in tagged]
tagged = self.filter_for_tags(tagged)
tagged = self.normal(tagged)
unique_word_set = self.unique_ever([x[0] for x in tagged])
word_set_list = list(unique_word_set)
graph = self.build_graph(word_set_list)
calculated_page_rank = nx.pagerank(graph, weight='weight')
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get,
reverse=True)
one_third = 50
keyphrases = keyphrases[0:50]
modified_key_phrases = set([])
dealt_with = set([])
i = 0
j = 1
while j < len(textlist):
first = textlist[i]
second = textlist[j]
if first in keyphrases and second in keyphrases:
keyphrase = first + ' ' + second
modified_key_phrases.add(keyphrase)
dealt_with.add(first)
dealt_with.add(second)
else:
if first in keyphrases and first not in dealt_with:
modified_key_phrases.add(first)
if j == len(textlist) - 1 and second in keyphrases and \
second not in dealt_with:
modified_key_phrases.add(second)
i = i + 1
j = j + 1
return modified_key_phrases
def raking(self, text):
'''
Using Python Module RAKE to supplement TextRank
'''
r = Rake(min_length=1, max_length=3)
r.extract_keywords_from_text(text)
ans = r.get_ranked_phrases_with_scores()
return ans
def calcweight(self, text, final):
'''
Calculating weights based on frequency of keywords/phrases.
final-> Final chosen keywords.
'''
count = 0
words = word_tokenize(text)
for i in words:
if i in final:
count += 1
weight = (count/len(final)) * 100
return weight
def main_weights(self):
text = ""
for i in self.df["Reviews"]:
for j in i:
text = text + "" + j
pattern = '[0-9]'
text = re.sub(pattern, ' ', text)
result_rake = self.raking(text)
final = []
for i in result_rake:
if (i[0] > 8):
lis = nltk.word_tokenize(i[1])
final += lis
result_textrank = self.extract_key_phrases(text)
final += result_textrank
resulting = []
for i in self.df["Reviews"]:
lis = []
if (not i):
lis.append(self.k)
resulting.append(lis)
continue
for text, score in i.items():
weight_factor = self.calcweight(text, final)
a = weight_factor + self.k
lis.append(a)
resulting.append(lis)
self.df["Weights"] = pd.Series(resulting)
return self.df
# --------------------------------------------------------------Pre Processor-------------------------------------------------------------------#
class PreProcessEcomm:
'''
PreProcess Funtion. It utilises multiple helper function to clean data. As Data extracted
from different websites have a different format, it brings them to the same format.
'''
def __init__(self, df):
self.df = df
def simplify(self, rev):
reviews = []
if (type(rev) != str):
for i in rev:
text = i[0] + i[1] + ' ' + i[2]
reviews.append(text)
return reviews
temp = rev.split(']')
reviews = []
for i in temp:
if i != '':
reviews.append(i)
return reviews
def clean(self, rev):
lis = []
for i in rev:
i = re.sub(r'[^\w\s]','',i)
i = i.replace("\n", " ")
lis.append(i)
return lis
def clean2(self, rev):
try:
i = re.sub(r'[^\w\s]','',rev)
i = i.replace("\n", " ")
return i
except:
return ""
def reviewtodict(self):
lis = []
for i in self.df["Reviews"]:
a = {}
for j in i:
try:
score = int(j[-2:])
text = j[:len(j) - 2]
a[text] = score
except:
score = 0
text = j[:len(j) - 2]
a[text] = score
lis.append(a)
self.df["Reviews"] = pd.Series(lis)
return
def ratings(self, s):
x = s[:3]
try:
return float(x)
except:
return 0
def num_ratings(self, s):
try:
x = re.findall(r'\d+', s)
return int(x[0])
except:
return 0
def curr_views(self, s):
try:
x = re.findall(r'\d+', s)[0]
ans = int(x)
return ans
except:
return 0
def price(self, s):
try:
x = re.findall('[\$\£\€](\d+(?:\.\d{1,2})?)', s)
return float(x[0])
except:
if (s == 'Out Of Stock'):
return 0
s = s[1:]
return float(s[:4])
def discount(self, s):
if (s == 0):
return 0
elif (s == None):
return 0
else:
return int(re.findall(r'\d+', s)[0])
def main_pre(self):
self.df['Reviews']= self.df.Reviews.apply(self.simplify)
self.df['Reviews']= self.df.Reviews.apply(self.clean)
self.df['Discount'] = self.df['Discount'].fillna(0)
self.df["Rating"] = self.df["Rating"].apply(self.ratings)
self.df["Number of Ratings"] = self.df["Number of Ratings"].apply(self.num_ratings)
self.df["Current Views"] = self.df["Current Views"].apply(self.curr_views)
self.df["Price"] = self.df["Price"].apply(self.price)
self.df["Discount"] = self.df["Discount"].apply(self.discount)
self.df['Description'] = self.df.Description.apply(self.clean2)
self.reviewtodict()
return self.df
# --------------------------------------------------------------PORTRAY - ECOMMERCE--------------------------------------------------------------#
scaler = MinMaxScaler(feature_range = (0,10))
class PORTRAY_E:
'''
PORTRAY RANKING ALGORITHM
It utilises :
1. Weights of the Reviews
2. Star Rating
3. Number of Views and Number of ratings.
4. Price and Discount
To rank the products.
'''
def __init__(self, df):
self.df = df
def normalize(self):
column_names_to_normalize = ['RSCORE']
x = self.df[column_names_to_normalize].values
x_scaled = scaler.fit_transform(x)
df_temp = pd.DataFrame(x_scaled, columns=column_names_to_normalize, index = self.df.index)
self.df[column_names_to_normalize] = df_temp
def r1score(self):
mean_revs = self.df["Number of Ratings"].mean()
mean_views = self.df["Current Views"].mean()
r1scores = []
for i in range(len(self.df)):
rating = self.df["Rating"][i]
views = self.df["Current Views"][i]
count = self.df["Current Views"][i]
factor = (views + count) / 2
r1 = factor*rating
r1scores.append(r1)
self.df["R1SCORE"] = pd.Series(r1scores)
mean_dist = self.df["R1SCORE"].mean()
self.df["R1SCORE"] = self.df["R1SCORE"] / mean_dist
return
def r2score(self):
r2scores = []
for i in range(len(self.df)):
currdict = self.df["Reviews"][i]
weights = self.df["Weights"][i]
if (not currdict):
r2scores.append(weights[0])
continue
j = 0
r2 = 0
for key, val in currdict.items():
r2 = r2 + (val*weights[j])
j += 1
r2scores.append(r2/10)
self.df["R2SCORE"] =
|
pd.Series(r2scores)
|
pandas.Series
|
#!/usr/bin/env python3
#libraries
import pandas as pd
import numpy as np
import re
import os
pd.set_option('display.max_rows',200)
pd.set_option('display.max_columns',200)
import matplotlib.pyplot as plt
import seaborn as sns
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import timedelta
import warnings
warnings.filterwarnings("ignore")
def read_sql_table(table_name):
db = pymysql.connect(host='localhost', user="###",passwd="####" )
cur = db.cursor()
sql="SELECT * FROM {} ".format(table_name)
in_data = pd.read_sql(sql,db)
return in_data
uspa_subset = read_sql_table("recoms.uspa_subset")
uspa_subset['mobile'] = uspa_subset['mobile'].astype(str)
billing_data = uspa_subset[['first_name','mobile','bill_date','Bill_time','bill_date_time','bill_amount','bill_discount','total_quantity']]
billing_data['key'] = billing_data['mobile'] + ' ' + billing_data['bill_date'].astype(str)
billing_data.drop_duplicates('key',inplace=True)
billing_avg = billing_data.groupby(['mobile']).agg({'bill_amount':'mean','bill_discount':'mean',
'total_quantity':'mean','key':'count',
'bill_date':'max'}).reset_index()
billing_avg_temp = billing_data.groupby(['mobile']).agg({'bill_date':'min'}).reset_index()
billing_avg.rename(columns = {'bill_amount':'average_bill_amount','bill_discount':'average_bill_discount',
'total_quantity':'quantities_per_visit','key':'visit_count','bill_date':'last_visit_date'},inplace=True)
def visit_freq_bins(visit_counts,frequency=45):
visit_count_bin = visit_counts//frequency
if (visit_counts > 359):
formatted_bin = '> 1 year'
else:
formatted_bin = str(visit_count_bin * frequency) + "-" + str((visit_count_bin +1) * frequency)
return formatted_bin
billing_avg_temp.rename(columns={'bill_date':'first_visit_date'},inplace=True)
billing_avg = pd.merge(billing_avg,billing_avg_temp,left_on = 'mobile',right_on = 'mobile',how='left')
billing_avg['last_visit_date'] =
|
pd.to_datetime(billing_avg['last_visit_date'])
|
pandas.to_datetime
|
'data engineering'
#%%
import numpy as np
import matplotlib.pyplot as pyplot
import seaborn as sns
import pandas as pd
# import xgboost as xgb
from scipy.stats import skew
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from scipy.special import boxcox1p
from xgboost.sklearn import XGBRegressor
from sklearn.metrics import mean_squared_error
rawTrain = pd.read_csv('house-pricing/data/train.csv')
#删除outliers离群数据,只删除train上的
rawTrain = rawTrain.drop(rawTrain[(rawTrain['GrLivArea'] > 4000) & (rawTrain['SalePrice'] < 300000)].index)
rawTrainX = rawTrain.drop(['SalePrice', 'Id'], axis=1)
rawTrainY = rawTrain['SalePrice']
rawTest =
|
pd.read_csv('house-pricing/data/test.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from nyc_ccci_etl.commons.configuration import get_database_connection_parameters
import json
from datetime import datetime
class InspectionsTransformer:
def __init__(self, year, month, day):
self.date_filter = "{}_{}_{}t00:00:00.000".format(str(year).zfill(2), str(month).zfill(2), str(day).zfill(2))
host, database, user, password = get_database_connection_parameters()
engine_string = "postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}".format(
user = user,
password = password,
host = host,
port = 5432,
database = database,
)
self.engine = create_engine(engine_string)
def execute(self):
df = pd.read_sql("select * from clean.inspections where inspectiondate='{}'".format(self.date_filter), self.engine)
tabla_4 = df.loc[:, ['dc_id', 'inspectiondate', 'regulationsummary', 'violationcategory', 'healthcodesubsection', 'violationstatus', 'inspectionsummaryresult', 'borough']]
tabla_4['inspectionsummaryresult'] = tabla_4['inspectionsummaryresult'].astype('str')
#Cambia de .split('_-_',1) a split('___',1) porque clean.py reemplaza - por _
df_2 = pd.DataFrame(tabla_4.inspectionsummaryresult.str.split('___',1).tolist(), columns= ['reason', 'result'])
df_2['result'] = df_2['result'].astype('str')
df_3 = pd.DataFrame(df_2.result.str.split(';_',1).tolist(), columns = ['result_1', 'result_2'])
df_2 = df_2.drop(df_2.columns[[1]], axis=1)
df_2 = df_2.join(df_3)
tabla_4 = tabla_4.join(df_2)
tabla_4 = tabla_4.drop(['inspectionsummaryresult'], axis = 1) #Eliminar inspection_summary_result
tabla_4.reason.value_counts(dropna=False)
tabla_4 = tabla_4.loc[tabla_4['reason'] == 'initial_annual_inspection']
tabla_4['result_2'] = tabla_4['result_2'].fillna('NR')
categorias = ["result_1", "result_2"]
df_4 = pd.get_dummies(tabla_4[categorias])
tabla_4 = tabla_4.join(df_4)
tabla_4 = tabla_4.drop(['result_1', 'result_2'], axis = 1) #Eliminamos variables que no necesitamos
tabla_4['inspectiondate'] = tabla_4['inspectiondate'].astype('str')
tabla_4['inspectiondate'] = pd.to_datetime(tabla_4.inspectiondate, infer_datetime_format=False, format='%Y_%m_%dt00:00:00.000')
tabla_4['inspection_year'] = tabla_4['inspectiondate'].dt.year
tabla_4['inspection_month'] = tabla_4['inspectiondate'].dt.month
tabla_4['inspection_day_name'] = tabla_4['inspectiondate'].dt.day_name()
tabla_4 = tabla_4.drop(tabla_4.loc[tabla_4['inspection_day_name']== 'Saturday'].index)
tabla_4 = tabla_4.drop(tabla_4.loc[tabla_4['inspection_day_name']== 'Sunday'].index)
dias = {"Monday":'1', "Tuesday":'2', "Wednesday":'3', "Thursday":'4',"Friday":'5'}
tabla_4['inspection_day_name'] = tabla_4['inspection_day_name'].map(dias)
tabla_4['inspection_day_name'] = tabla_4['inspection_day_name'].astype(float)
tabla_4.rename(columns={'dc_id':'center_id'}, inplace=True)
tabla_4.sort_values(['inspectiondate'], ascending=[False], inplace=True)
categorias = ["violationcategory"]
df_5 = pd.get_dummies(tabla_4[categorias])
encoded_columns = ["violationcategory_public_health_hazard", "violationcategory_critical", "violationcategory_general"]
for col in encoded_columns:
if col not in df_5.columns:
df_5[col] = 0
tabla_4 = tabla_4.join(df_5)
tabla_4 = tabla_4.drop(['violationcategory'], axis = 1) #Eliminamos variables que no necesitamos
df_6 = tabla_4.loc[tabla_4['inspection_year']!=2020.0]
df_7 = pd.DataFrame(df_6.groupby(["center_id"], sort=False)["inspectiondate"].max().reset_index())
year = str(datetime.now().year)
month = str(datetime.now().month)
day = str(datetime.now().day)
fechas = year + "-" + month + "-" + day
df_7["today"] = pd.to_datetime(fechas)
df_7['dias_ultima_inspeccion'] = df_7['today'] - df_7['inspectiondate']
df_7['dias_ultima_inspeccion'] = df_7['dias_ultima_inspeccion'].dt.days
tabla_4 = pd.merge(tabla_4, df_7, left_on='center_id', right_on='center_id', how='left')
tabla_4 = tabla_4.rename(columns = {'inspectiondate_x':'inspectiondate'})
tabla_4 = tabla_4.drop(['today', 'inspectiondate_y'], axis = 1)
df_8 = pd.DataFrame(df_6.groupby(["center_id"], sort=False)["violationcategory_public_health_hazard"].sum().reset_index())
df_8 = df_8.rename(columns = {'violationcategory_public_health_hazard':'violaciones_hist_salud_publica'})
tabla_4 = pd.merge(tabla_4, df_8, left_on='center_id', right_on='center_id', how='left')
df_9 = tabla_4.loc[tabla_4['inspection_year']==2019.0]
df_10 = pd.DataFrame(df_9.groupby(["center_id"], sort=False)["violationcategory_public_health_hazard"].sum().reset_index())
df_10 = df_10.rename(columns = {'violationcategory_public_health_hazard':'violaciones_2019_salud_publica'})
tabla_4 = pd.merge(tabla_4, df_10, left_on='center_id', right_on='center_id', how='left')
df_11 = pd.DataFrame(df_6.groupby(["center_id"], sort=False)["violationcategory_critical"].sum().reset_index())
df_11 = df_11.rename(columns = {'violationcategory_critical':'violaciones_hist_criticas'})
tabla_4 = pd.merge(tabla_4, df_11, left_on='center_id', right_on='center_id', how='left')
df_12 = pd.DataFrame(df_9.groupby(["center_id"], sort=False)["violationcategory_critical"].sum().reset_index())
df_12 = df_12.rename(columns = {'violationcategory_critical':'violaciones_2019_criticas'})
tabla_4 = pd.merge(tabla_4, df_12, left_on='center_id', right_on='center_id', how='left')
df_13 = pd.merge(df_8, df_11)
df_13['total'] = df_13['violaciones_hist_salud_publica'] + df_13['violaciones_hist_criticas']
df_14 = pd.DataFrame(df_6.groupby(["center_id"], sort=False)["reason"].count().reset_index())
df_15 = pd.merge(df_13, df_14)
df_15['ratio_violaciones_hist'] = df_15['total'] / df_15['reason']
df_15 = df_15.drop(['violaciones_hist_salud_publica', 'violaciones_hist_criticas', 'total', 'reason'], axis = 1)
tabla_4 = pd.merge(tabla_4, df_15, left_on='center_id', right_on='center_id', how='left')
df_16 = pd.merge(df_10, df_12)
df_16['total'] = df_16['violaciones_2019_salud_publica'] + df_16['violaciones_2019_criticas']
df_17 = pd.DataFrame(df_9.groupby(["center_id"], sort=False)["reason"].count().reset_index())
df_18 = pd.merge(df_16, df_17)
df_18['ratio_violaciones_2019'] = df_18['total'] / df_18['reason']
df_18 = df_18.drop(['violaciones_2019_salud_publica', 'violaciones_2019_criticas', 'total', 'reason'], axis = 1)
tabla_4 = pd.merge(tabla_4, df_18, left_on='center_id', right_on='center_id', how='left')
df_19 = pd.DataFrame(df_6.groupby(["borough"], sort=False)[["violationcategory_critical", "violationcategory_general", "violationcategory_public_health_hazard"]].sum().reset_index())
df_19['prom_violaciones_hist_borough'] = df_19[['violationcategory_critical', 'violationcategory_general', 'violationcategory_public_health_hazard']].mean(axis=1)
df_19 = df_19.drop(['violationcategory_critical', 'violationcategory_general', 'violationcategory_public_health_hazard'], axis = 1)
tabla_4 = pd.merge(tabla_4, df_19, left_on='borough', right_on='borough', how='left')
df_20 = pd.DataFrame(df_9.groupby(["borough"], sort=False)[["violationcategory_critical", "violationcategory_general", "violationcategory_public_health_hazard"]].sum().reset_index())
df_20['prom_violaciones_2019_borough'] = df_20[['violationcategory_critical', 'violationcategory_general', 'violationcategory_public_health_hazard']].mean(axis=1)
df_20 = df_20.drop(['violationcategory_critical', 'violationcategory_general', 'violationcategory_public_health_hazard'], axis = 1)
tabla_4 = pd.merge(tabla_4, df_20, left_on='borough', right_on='borough', how='left')
df_21 = pd.DataFrame(df_6.groupby(["center_id"], sort=False)[["violationcategory_critical", "violationcategory_general", "violationcategory_public_health_hazard"]].sum().reset_index())
df_21['total'] = df_21['violationcategory_critical'] + df_21['violationcategory_general'] + df_21['violationcategory_public_health_hazard']
df_21['ratio_violaciones_hist_sp'] = df_21['violationcategory_public_health_hazard'] / df_21['total']
df_21 = df_21.drop(['violationcategory_critical', 'violationcategory_general', 'violationcategory_public_health_hazard', 'total'], axis = 1)
tabla_4 =
|
pd.merge(tabla_4, df_21, left_on='center_id', right_on='center_id', how='left')
|
pandas.merge
|
import unittest
from unittest import mock
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.util.testing import assert_frame_equal
import tests.test_data as td
from shift_detector.checks.statistical_checks import numerical_statistical_check, categorical_statistical_check
from shift_detector.checks.statistical_checks.categorical_statistical_check import CategoricalStatisticalCheck
from shift_detector.checks.statistical_checks.numerical_statistical_check import NumericalStatisticalCheck
from shift_detector.checks.statistical_checks.text_metadata_statistical_check import TextMetadataStatisticalCheck
from shift_detector.detector import Detector
from shift_detector.precalculations.store import Store
from shift_detector.precalculations.text_metadata import NumCharsMetadata, NumWordsMetadata, \
DistinctWordsRatioMetadata, LanguagePerParagraph, UnknownWordRatioMetadata, StopwordRatioMetadata, LanguageMetadata
from shift_detector.utils.visualization import PlotData
class TestTextMetadataStatisticalCheck(unittest.TestCase):
def setUp(self):
self.poems = td.poems
self.phrases = td.phrases
def test_significant_metadata(self):
pvalues = pd.DataFrame([[0.001, 0.2]], columns=['num_chars', 'distinct_words_ratio'], index=['pvalue'])
result = TextMetadataStatisticalCheck(significance=0.01).significant_metadata_names(pvalues)
self.assertIn('num_chars', result)
self.assertNotIn('distinct_words_ratio', result)
def test_not_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': list(reversed(self.poems))})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck().run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(0, len(result.shifted_columns))
self.assertEqual(0, len(result.explanation))
def test_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': self.phrases})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck([NumCharsMetadata(), NumWordsMetadata(),
DistinctWordsRatioMetadata(), LanguagePerParagraph()]
).run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(1, len(result.shifted_columns))
self.assertEqual(1, len(result.explanation))
def test_compliance_with_detector(self):
df1 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
df2 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
detector = Detector(df1=df1, df2=df2, log_print=False)
detector.run(TextMetadataStatisticalCheck())
column_index = pd.MultiIndex.from_product([['text'], ['distinct_words', 'num_chars', 'num_words']],
names=['column', 'metadata'])
solution = pd.DataFrame([[1.0, 1.0, 1.0]], columns=column_index, index=['pvalue'])
self.assertEqual(1, len(detector.check_reports[0].examined_columns))
self.assertEqual(0, len(detector.check_reports[0].shifted_columns))
self.assertEqual(0, len(detector.check_reports[0].explanation))
assert_frame_equal(solution, detector.check_reports[0].information['test_results'])
def test_language_can_be_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], language='fr')
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertEqual('fr', mdtype.language)
def test_infer_language_is_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], infer_language=True)
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertTrue(mdtype.infer_language)
def test_figure_function_is_collected(self):
df1 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
df2 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
metadata_names = ['num_chars', 'num_words']
cols = pd.MultiIndex.from_product([df1.columns, metadata_names], names=['column', 'metadata'])
check = TextMetadataStatisticalCheck()
pvalues = pd.DataFrame(columns=cols, index=['pvalue'])
for solution, num_sig_metadata in [(1, 2), (1, 1), (0, 0)]:
p = [0.001] * num_sig_metadata + [0.05] * (2 - num_sig_metadata)
pvalues[('text', 'num_chars')] = p[0]
pvalues[('text', 'num_words')] = p[1]
with self.subTest(solution=solution, pvalues=pvalues):
result = check.metadata_figure(pvalues=pvalues, df1=df1, df2=df2)
self.assertEqual(solution, len(result))
@mock.patch('shift_detector.checks.statistical_checks.text_metadata_statistical_check.plt')
def test_all_plot_functions_are_called_and_plot_is_shown(self, mock_plt):
plot_data = [PlotData(MagicMock(), 1), PlotData(MagicMock(), 2), PlotData(MagicMock(), 3)]
TextMetadataStatisticalCheck.plot_all_metadata(plot_data)
mock_plt.figure.assert_called_with(figsize=(12.0, 30.0), tight_layout=True)
for func, rows in plot_data:
self.assertTrue(func.called)
mock_plt.show.assert_called_with()
def test_column_tuples_are_handled_by_numerical_visualization(self):
columns = ['text']
metadata_names = ['num_chars']
cols = pd.MultiIndex.from_product([columns, metadata_names], names=['column', 'metadata'])
df1 = pd.DataFrame(columns=cols)
df2 = pd.DataFrame(columns=cols)
df1[('text', 'num_chars')] = [1, 2, 3]
df2[('text', 'num_chars')] = [3, 2, 1]
mock_figure = MagicMock(autospec=Figure)
mock_axes = MagicMock(autospec=Axes)
with mock.patch.object(numerical_statistical_check.vis, 'plot_binned_ratio_histogram'):
NumericalStatisticalCheck.overlayed_hist_plot(mock_figure, mock_axes, ('text', 'num_chars'), df1, df2)
mock_axes.legend.assert_called_with(["DS 1", "DS 2"], fontsize='x-small')
mock_axes.set_title.assert_called_with("Column: '('text', 'num_chars')' (Histogram)")
with mock.patch.object(numerical_statistical_check.vis, 'plot_cumulative_step_ratio_histogram',
return_value=(np.array([0]), np.array([0]))):
NumericalStatisticalCheck.cumulative_hist_plot(mock_figure, mock_axes, ('text', 'num_chars'), df1, df2)
mock_axes.legend.assert_called_with(["DS 1", "DS 2", 'maximal distance = 0'],
fontsize='x-small')
mock_axes.set_title.assert_called_with("Column: '('text', 'num_chars')' (Cumulative Distribution)")
def test_column_tuples_are_handled_by_categorical_visualization(self):
columns = ['text']
metadata_names = ['category']
cols = pd.MultiIndex.from_product([columns, metadata_names], names=['column', 'metadata'])
df1 = pd.DataFrame(columns=cols)
df2 = pd.DataFrame(columns=cols)
df1[('text', 'category')] = ['latin' * 3]
df2[('text', 'category')] = ['arabic' * 3]
mock_figure = MagicMock(autospec=Figure)
mock_axes = MagicMock(autospec=Axes)
with mock.patch.object(categorical_statistical_check.vis, 'plot_categorical_horizontal_ratio_histogram',
return_value=mock_axes):
CategoricalStatisticalCheck.paired_total_ratios_plot(mock_figure, mock_axes, ('text', 'category'), df1, df2)
mock_axes.set_title.assert_called_once_with("Column: '('text', 'category')'", fontsize='x-large')
def test_correct_visualization_is_chosen_categorical(self):
with mock.patch.object(CategoricalStatisticalCheck, 'column_plot') as mock_plot:
figure = MagicMock(spec=Figure)
tile = MagicMock()
TextMetadataStatisticalCheck.metadata_plot(figure, tile, 'text', LanguageMetadata(), None, None)
self.assertTrue(mock_plot.called)
def test_correct_visualization_is_chosen_numerical(self):
with mock.patch.object(NumericalStatisticalCheck, 'column_plot') as mock_plot:
figure = MagicMock(spec=Figure)
tile = MagicMock()
TextMetadataStatisticalCheck.metadata_plot(figure, tile, 'text', NumCharsMetadata(), None, None)
self.assertTrue(mock_plot.called)
def test_correct_number_of_plot_data(self):
df1 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
df2 =
|
pd.DataFrame.from_dict({'text': ['blub'] * 10})
|
pandas.DataFrame.from_dict
|
import os
import pandas as pd
from _pytest.capture import CaptureFixture
from pytest_mock import MockerFixture
import nlpland.data.check as check_
from nlpland.constants import (
ABSTRACT_SOURCE_ANTHOLOGY,
ABSTRACT_SOURCE_RULE,
COLUMN_ABSTRACT,
COLUMN_ABSTRACT_SOURCE,
)
def test_print_null_values(capfd: CaptureFixture) -> None:
test_df =
|
pd.DataFrame({"col": ["1", "2", "34", "2", None, pd.NA]})
|
pandas.DataFrame
|
from datetime import datetime
from functools import lru_cache
from typing import Union, Callable, Tuple
import dateparser
import pandas as pd
from dateutil.relativedelta import relativedelta
from numpy.distutils.misc_util import as_list
from wetterdienst.dwd.metadata import Parameter, TimeResolution, PeriodType
from wetterdienst.dwd.metadata.column_names import (
DWDMetaColumns,
DWDOrigDataColumns,
DWDDataColumns,
)
from wetterdienst.dwd.metadata.column_types import (
DATE_FIELDS_REGULAR,
DATE_FIELDS_IRREGULAR,
QUALITY_FIELDS,
INTEGER_FIELDS,
STRING_FIELDS,
)
from wetterdienst.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.dwd.metadata.parameter import TIME_RESOLUTION_PARAMETER_MAPPING
from wetterdienst.dwd.metadata.time_resolution import (
TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING,
)
from wetterdienst.exceptions import InvalidParameter
def check_parameters(
parameter: Parameter, time_resolution: TimeResolution, period_type: PeriodType
) -> bool:
"""
Function to check for element (alternative name) and if existing return it
Differs from foldername e.g. air_temperature -> tu
"""
check = TIME_RESOLUTION_PARAMETER_MAPPING.get(time_resolution, {}).get(
parameter, []
)
if period_type not in check:
return False
return True
def coerce_field_types(
df: pd.DataFrame, time_resolution: TimeResolution
) -> pd.DataFrame:
"""
A function used to create a unique dtype mapping for a given list of column names.
This function is needed as we want to ensure the expected dtypes of the returned
DataFrame as well as for mapping data after reading it from a stored .h5 file. This
is required as we want to store the data in this file with the same format which is
a string, thus after reading data back in the dtypes have to be matched.
Args:
df: the station_data gathered in a pandas.DataFrame
time_resolution: time resolution of the data as enumeration
Return:
station data with converted dtypes
"""
for column in df.columns:
# Station ids are handled separately as they are expected to not have any nans
if column == DWDMetaColumns.STATION_ID.value:
df[column] = df[column].astype(int)
elif column in DATE_FIELDS_REGULAR:
df[column] = pd.to_datetime(
df[column],
format=TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING[time_resolution],
)
elif column in DATE_FIELDS_IRREGULAR:
df[column] = pd.to_datetime(
df[column], format=DatetimeFormat.YMDH_COLUMN_M.value
)
elif column in QUALITY_FIELDS or column in INTEGER_FIELDS:
df[column] = pd.to_numeric(df[column], errors="coerce").astype(
pd.Int64Dtype()
)
elif column in STRING_FIELDS:
df[column] = df[column].astype(pd.StringDtype())
else:
df[column] = df[column].astype(float)
return df
def parse_enumeration_from_template(
enum_: Union[str, Parameter, TimeResolution, PeriodType],
enum_template: Union[Parameter, TimeResolution, PeriodType, Callable],
) -> Union[Parameter, TimeResolution, PeriodType]:
"""
Function used to parse an enumeration(string) to a enumeration based on a template
:param "enum_": Enumeration as string or Enum
:param enum_template: Base enumeration from which the enumeration is parsed
:return: Parsed enumeration from template
:raises InvalidParameter: if no matching enumeration found
"""
try:
return enum_template[enum_.upper()]
except (KeyError, AttributeError):
try:
return enum_template(enum_)
except ValueError:
raise InvalidParameter(
f"{enum_} could not be parsed from {enum_template.__name__}."
)
@lru_cache(maxsize=None)
def create_humanized_column_names_mapping(
time_resolution: TimeResolution, parameter: Parameter
) -> dict:
"""
Function to create an extend humanized column names mapping. The function
takes care of the special cases of quality columns. Therefor it requires the
time resolution and parameter.
Args:
time_resolution: time resolution enumeration
parameter: parameter enumeration
Returns:
dictionary with mappings extended by quality columns mappings
"""
column_name_mapping = {
orig_column.value: humanized_column.value
for orig_column, humanized_column in zip(
DWDOrigDataColumns[time_resolution.name][parameter.name],
DWDDataColumns[time_resolution.name][parameter.name],
)
}
return column_name_mapping
def parse_enumeration(template, values):
return list(
map(lambda x: parse_enumeration_from_template(x, template), as_list(values))
)
def parse_datetime(date_string: str) -> datetime:
"""
Function used mostly for client to parse given date
Args:
date_string: the given date as string
Returns:
any kind of datetime
"""
# Tries out any given format of DatetimeFormat enumeration
return dateparser.parse(
date_string, date_formats=[dt_format.value for dt_format in DatetimeFormat]
)
def mktimerange(
time_resolution: TimeResolution,
date_from: Union[datetime, str],
date_to: Union[datetime, str] = None,
) -> Tuple[datetime, datetime]:
"""
Compute appropriate time ranges for monthly and annual time resolutions.
This takes into account to properly floor/ceil the date_from/date_to
values to respective "begin of month/year" and "end of month/year" values.
Args:
time_resolution: time resolution as enumeration
date_from: datetime string or object
date_to: datetime string or object
Returns:
Tuple of two Timestamps: "date_from" and "date_to"
"""
if date_to is None:
date_to = date_from
if time_resolution == TimeResolution.ANNUAL:
date_from = pd.to_datetime(date_from) + relativedelta(month=1, day=1)
date_to = pd.to_datetime(date_to) + relativedelta(month=12, day=31)
elif time_resolution == TimeResolution.MONTHLY:
date_from = pd.to_datetime(date_from) + relativedelta(day=1)
date_to =
|
pd.to_datetime(date_to)
|
pandas.to_datetime
|
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df =
|
pandas.DataFrame(data=d)
|
pandas.DataFrame
|
import pandas as pd
df = pd.DataFrame(columns=['p', 'Solvability'])
record = pd.read_csv('Question_4.csv')
x = 0
p = 0.01
ans = 0
for index, row in record.iterrows():
#print(row['p'], row['Solvable'])
x = x + 1
if row['Solvable']:
ans = ans + 1
if x==100:
d =
|
pd.DataFrame([[p, ans]], columns=['p', 'Solvability'])
|
pandas.DataFrame
|
import os, sys
import logging
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.ndimage import label
from .utils import watershed_tissue_sections, get_spot_adjacency_matrix
# Read in a series of Loupe annotation files and return the set of all unique categories.
# NOTE: "Undefined"
def unique_annots_loupe(loupe_files):
all_annots = []
for fh in loupe_files:
df = pd.read_csv(fh, header=0, sep=",")
for a in df.iloc[:,1].values:
if isinstance(a,str) and len(a)>0 and a.lower() != "undefined":
all_annots.append(a)
return sorted(list(set(all_annots)))
# Annotataion matrix from Loupe annotation file
def read_annot_matrix_loupe(loupe_file, position_file, unique_annots):
annots = pd.read_csv(loupe_file, header=0, sep=",")
positions = pd.read_csv(position_file, index_col=0, header=None,
names=["in_tissue", "array_row", "array_col", "pixel_row", "pixel_col"])
annot_matrix = np.zeros((len(unique_annots), len(annots['Barcode'])), dtype=int)
positions_list = []
for i,b in enumerate(annots['Barcode']):
xcoor = positions.loc[b,'array_col']
ycoor = positions.loc[b,'array_row']
positions_list.append('%d_%d' % (xcoor, ycoor))
if annots.iloc[i,1] in unique_annots:
annot_matrix[unique_annots.index(annots.iloc[i,1]),i] = 1
annot_frame =
|
pd.DataFrame(annot_matrix, index=unique_annots, columns=positions_list)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 10:37:55 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from itertools import combinations
import os
import utils
utils.start(__file__)
#==============================================================================
PREF = 'f002_'
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
# load
# =============================================================================
cat = ['NAME_CONTRACT_TYPE',
'CODE_GENDER',
'FLAG_OWN_CAR',
'FLAG_OWN_REALTY',
'NAME_TYPE_SUITE',
'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'NAME_HOUSING_TYPE',
'OCCUPATION_TYPE',
'WEEKDAY_APPR_PROCESS_START',
'ORGANIZATION_TYPE',
'FONDKAPREMONT_MODE',
'HOUSETYPE_MODE',
'WALLSMATERIAL_MODE',
'EMERGENCYSTATE_MODE']
train = utils.load_train(cat)
test = utils.load_test(cat)
train_row = train.shape[0]
trte =
|
pd.concat([train, test])
|
pandas.concat
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import subprocess
import errno, os, pty
import shlex
from subprocess import Popen, PIPE
from ConfigReader import configuration
import mysql.connector
from mysql.connector import errorcode
from common.Singleton import Singleton
from common import constants as constant
from common.Exceptions import *
from DBImportConfig import export_config
from DBImportOperation import common_operations
from DBImportOperation import atlas_operations
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import time
class operation(object, metaclass=Singleton):
def __init__(self, connectionAlias=None, targetSchema=None, targetTable=None):
logging.debug("Executing export_operations.__init__()")
self.connectionAlias = connectionAlias
self.targetSchema = targetSchema
self.targetTable = targetTable
self.hiveDB = None
self.hiveTable = None
self.hiveExportTempDB = None
self.hiveExportTempTable = None
self.tempTableNeeded = None
self.sqoopSize = None
self.sqoopRows = None
self.sqoopMappers = None
self.globalHiveConfigurationSet = False
self.atlasOperation = atlas_operations.atlasOperation()
if connectionAlias == None and targetSchema == None and targetTable == None:
self.export_config = export_config.config()
self.common_operations = common_operations.operation()
else:
try:
self.export_config = export_config.config(connectionAlias=connectionAlias, targetSchema=targetSchema, targetTable=targetTable)
self.common_operations = common_operations.operation()
self.export_config.getExportConfig()
self.export_config.common_config.lookupConnectionAlias(connection_alias=self.connectionAlias)
self.hiveDB = self.export_config.hiveDB
self.hiveTable = self.export_config.hiveTable
self.hiveExportTempDB = self.export_config.hiveExportTempDB
self.hiveExportTempTable = self.export_config.hiveExportTempTable
self.checkHiveDB(self.hiveDB)
self.checkHiveTable(self.hiveDB, self.hiveTable)
# self.export_config.setHiveTable(hiveDB=self.hiveDB, hiveTable=self.hiveTable)
self.common_operations.setHiveTable(Hive_DB=self.hiveDB, Hive_Table=self.hiveTable)
self.hiveTableIsTransactional = self.common_operations.isHiveTableTransactional(hiveDB=self.hiveDB, hiveTable=self.hiveTable)
self.hiveTableIsView = self.common_operations.isHiveTableView(hiveDB=self.hiveDB, hiveTable=self.hiveTable)
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
try:
self.export_config.remove_temporary_files()
except:
pass
raise
sys.exit(1)
logging.debug("Executing export_operations.__init__() - Finished")
def runStage(self, stage):
self.export_config.setStage(stage)
if self.export_config.common_config.getConfigValue(key = "export_stage_disable") == True:
logging.error("Stage execution disabled from DBImport configuration")
self.export_config.remove_temporary_files()
sys.exit(1)
tempStage = self.export_config.getStage()
if stage == tempStage:
return True
else:
return False
def setStage(self, stage, force=False):
self.export_config.setStage(stage, force=force)
def getStage(self):
return self.export_config.getStage()
def clearStage(self):
self.export_config.clearStage()
def saveRetryAttempt(self, stage):
self.export_config.saveRetryAttempt(stage)
def setStageOnlyInMemory(self):
self.export_config.setStageOnlyInMemory()
def convertStageStatisticsToJSON(self):
self.export_config.convertStageStatisticsToJSON()
def saveStageStatistics(self):
self.export_config.saveStageStatistics()
def saveIncrPendingValues(self):
self.export_config.saveIncrPendingValues()
def resetIncrMinMaxValues(self, maxValue):
self.export_config.resetIncrMinMaxValues(maxValue=maxValue)
def resetMaxValueFromTarget(self):
self.export_config.resetMaxValueFromTarget()
def removeHiveLocks(self):
if self.export_config.common_config.getConfigValue(key = "hive_remove_locks_by_force") == True:
self.common_operations.removeHiveLocksByForce(self.hiveExportTempDB, self.hiveExportTempTable)
def checkHiveTable(self, hiveDB, hiveTable):
if self.common_operations.checkHiveTable(hiveDB, hiveTable) == False:
logging.error("Hive table '%s' cant be found in '%s' database"%(hiveTable, hiveDB))
self.export_config.remove_temporary_files()
sys.exit(1)
def updateAtlasWithExportData(self):
if self.atlasOperation.checkAtlasSchema() == True:
targetSchema = self.export_config.targetSchema
targetTable = self.export_config.targetTable
if self.export_config.common_config.jdbc_servertype in (constant.ORACLE, constant.DB2_UDB):
targetSchema = self.export_config.targetSchema.upper()
targetTable = self.export_config.targetTable.upper()
if self.export_config.common_config.jdbc_servertype in (constant.POSTGRESQL):
targetSchema = self.export_config.targetSchema.lower()
targetTable = self.export_config.targetTable.lower()
configObject = self.export_config.common_config.getAtlasDiscoverConfigObject()
self.atlasOperation.setConfiguration(configObject)
startStopDict = self.export_config.stage.getStageStartStop(stage = self.export_config.exportTool)
# Fetch the remote system schema again as it might have been updated in the export
self.export_config.common_config.getJDBCTableDefinition(source_schema = targetSchema, source_table = targetTable, printInfo=False)
self.atlasOperation.source_columns_df = self.export_config.common_config.source_columns_df
self.atlasOperation.source_keys_df = self.export_config.common_config.source_keys_df
try:
self.atlasOperation.updateAtlasWithRDBMSdata(schemaName = targetSchema, tableName = targetTable)
self.atlasOperation.updateAtlasWithExportLineage(
hiveDB=self.hiveDB,
hiveTable=self.hiveTable,
hiveExportTempDB=self.export_config.hiveExportTempDB,
hiveExportTempTable=self.export_config.hiveExportTempTable,
targetSchema=targetSchema,
targetTable=targetTable,
tempTableNeeded=self.export_config.tempTableNeeded,
startStopDict=startStopDict,
fullExecutedCommand=self.export_config.fullExecutedCommand,
exportTool=self.export_config.exportTool)
except:
pass
def checkHiveDB(self, hiveDB):
try:
self.common_operations.checkHiveDB(hiveDB)
except databaseNotFound as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
self.export_config.remove_temporary_files()
raise
def getHiveTableSchema(self):
try:
self.export_config.updateLastUpdateFromHive()
self.export_config.saveColumnData(columnsDF = self.common_operations.getHiveColumns(self.hiveDB, self.hiveTable, includeType=True, includeComment=True, includeIdx=True))
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when reading and/or processing Hive table schema")
self.export_config.remove_temporary_files()
sys.exit(1)
def dropJDBCTable(self):
try:
if self.export_config.truncateTargetTable == True:
if self.export_config.common_config.checkJDBCTable(schema=self.targetSchema, table=self.targetTable) == True:
logging.info("Dropping Target Table")
self.export_config.common_config.dropJDBCTable(schema=self.targetSchema, table=self.targetTable)
else:
logging.warning("Nothing to drop. Target table does not exists")
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when truncating target table")
self.export_config.remove_temporary_files()
sys.exit(1)
def truncateJDBCTable(self, force=False):
if self.export_config.truncateTargetTable == True or force == True:
logging.info("Truncating Target Table")
self.export_config.common_config.truncateJDBCTable(schema=self.targetSchema, table=self.targetTable)
def createTargetTable(self):
try:
if self.export_config.checkJDBCTable() == False:
self.export_config.createTargetTable()
except SQLerror as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when creating the target table")
self.export_config.remove_temporary_files()
sys.exit(1)
def updateTargetTable(self):
try:
self.export_config.updateTargetTable()
except SQLerror as errMsg:
if "ORA-22859: invalid modification of columns" in str(errMsg):
# We get this message from Oracle when we try to change a column type that is not supported
if self.export_config.exportIsIncremental == False:
try:
logging.info("There is a column type change that is not supported by Oracle. But because this export is a full export, we will drop the Target table and recreate it automatically")
self.dropJDBCTable()
self.createTargetTable()
self.export_config.updateTargetTable()
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when updating the target table")
self.export_config.remove_temporary_files()
sys.exit(1)
else:
logging.error("There is a column type change required on the target table but Oracle doesnt support that change. Drop the table or disable the column is the only option. As this is an incremental export, we cant do that automatically as it might result in loss of data")
self.export_config.remove_temporary_files()
sys.exit(1)
else:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when updating the target table")
self.export_config.remove_temporary_files()
sys.exit(1)
def getJDBCTableRowCount(self):
try:
self.export_config.getJDBCTableRowCount()
except:
logging.exception("Fatal error when reading source table row count")
self.export_config.remove_temporary_files()
sys.exit(1)
def discoverAndAddTablesFromHive(self, dbalias, schema, dbFilter=None, tableFilter=None, addDBToTable=False, addCustomText=None, addCounterToTable=False, counterStart=None):
""" This is the main function to search for tables/view in Hive and add them to export_tables """
logging.debug("Executing export_operations.discoverAndAddTablesFromHive()")
errorDuringAdd = False
sourceDF = self.common_operations.getHiveTables(dbFilter=dbFilter, tableFilter=tableFilter)
if len(sourceDF) == 0:
print("There are no tables in the source database that we dont already have in DBImport")
self.export_config.remove_temporary_files()
sys.exit(0)
exportDF = self.export_config.getExportTables(dbalias=dbalias, schema=schema)
mergeDF = pd.merge(sourceDF, exportDF, on=None, how='outer', indicator='Exist')
mergeDF['targetTable'] = mergeDF['hiveTable']
discoveredTables = len(mergeDF.loc[mergeDF['Exist'] == 'left_only'])
if addCounterToTable == True or addDBToTable == True or addCustomText != None:
for index, row in mergeDF.iterrows():
if mergeDF.loc[index, 'Exist'] == 'left_only':
mergeDF.loc[index, 'targetTable'] = "_%s"%(mergeDF.loc[index, 'targetTable'])
if addCounterToTable == True:
if counterStart == None:
counterStart = "1"
numberLength=len(counterStart)
try:
startValue = int(counterStart)
except ValueError:
logging.error("The value specified for --counterStart must be a number")
self.export_config.remove_temporary_files()
sys.exit(1)
for index, row in mergeDF.iterrows():
if mergeDF.loc[index, 'Exist'] == 'left_only':
zeroToAdd = ""
while len(zeroToAdd) < (numberLength - len(str(startValue))):
zeroToAdd += "0"
mergeDF.loc[index, 'targetTable'] = "%s%s%s"%(zeroToAdd, startValue, mergeDF.loc[index, 'targetTable'])
startValue += 1
if addDBToTable == True:
for index, row in mergeDF.iterrows():
if mergeDF.loc[index, 'Exist'] == 'left_only':
mergeDF.loc[index, 'targetTable'] = "%s%s"%(mergeDF.loc[index, 'hiveDB'], mergeDF.loc[index, 'targetTable'])
if addCustomText != None:
for index, row in mergeDF.iterrows():
if mergeDF.loc[index, 'Exist'] == 'left_only':
mergeDF.loc[index, 'targetTable'] = "%s%s"%(addCustomText.lower().strip(), mergeDF.loc[index, 'targetTable'])
if discoveredTables == 0:
print("There are no tables in the source database that we dont already have in DBImport")
self.export_config.remove_temporary_files()
sys.exit(0)
# At this stage, we have discovered tables in the source system that we dont know about in DBImport
print("The following tables and/or views have been discovered in Hive and not found as export tables in DBImport")
print("")
print("%-20s %-40s %-30s %-20s %s"%("Hive DB", "Hive Table", "Connection Alias", "Schema", "Table/View"))
print("=============================================================================================================================")
for index, row in mergeDF.loc[mergeDF['Exist'] == 'left_only'].iterrows():
# if addSchemaToTable == True:
# hiveTable = "%s_%s"%(row['schema'].lower().strip(), row['table'].lower().strip())
# else:
# hiveTable = row['table'].lower()
# print("%-20s%-40s%-30s%-20s%s"%(hiveDB, hiveTable, dbalias, row['schema'], row['table']))
print("%-20s %-40s %-30s %-20s %s"%(row['hiveDB'], row['hiveTable'], dbalias, schema, row['targetTable']))
answer = input("Do you want to add these exports to DBImport? (y/N): ")
if answer == "y":
print("")
for index, row in mergeDF.loc[mergeDF['Exist'] == 'left_only'].iterrows():
# for index, row in mergeDFLeftOnly.iterrows():
# if addSchemaToTable == True:
# hiveTable = "%s_%s"%(row['schema'].lower().strip(), row['table'].lower().strip())
# else:
# hiveTable = row['table'].lower()
addResult = self.export_config.addExportTable(
dbalias=dbalias,
schema=schema,
table=row['targetTable'],
hiveDB=row['hiveDB'],
hiveTable=row['hiveTable'])
if addResult == False:
errorDuringAdd = True
if errorDuringAdd == False:
print("All tables saved successfully in DBImport")
else:
print("")
print("Not all tables was saved to DBImport. Please check log and output")
else:
print("")
print("Aborting")
logging.debug("Executing export_operations.discoverAndAddTablesFromHive() - Finished")
def clearValidationData(self):
try:
self.export_config.clearValidationData()
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when clearing validation data from previous exports")
self.export_config.remove_temporary_files()
sys.exit(1)
def isExportTempTableNeeded(self):
try:
self.tempTableNeeded = self.export_config.isExportTempTableNeeded(hiveTableIsTransactional = self.hiveTableIsTransactional, hiveTableIsView = self.hiveTableIsView)
except invalidConfiguration as errMsg:
logging.error(errMsg)
self.export_config.remove_temporary_files()
sys.exit(1)
except:
logging.exception("Fatal error when clearing row counts from previous exports")
self.export_config.remove_temporary_files()
sys.exit(1)
return self.tempTableNeeded
def getIncrMaxvalueFromHive(self, column=None, hiveDB=None, hiveTable=None):
logging.debug("Executing export_operations.getIncrMaxvalueFromHive()")
if hiveDB == None: hiveDB = self.hiveDB
if hiveTable == None: hiveTable = self.hiveTable
if column == None: column = self.export_config.incr_column
query = "select max(`%s`) from `%s`.`%s`"%(column, hiveDB, hiveTable)
resultDF = self.common_operations.executeHiveQuery(query)
maxValue = resultDF.loc[0][0]
logging.debug("Maxvalue: %s"%(maxValue))
logging.debug("Executing export_operations.getIncrMaxvalueFromHive() - Finished")
return maxValue
def updateStatisticsOnExportedTable(self,):
if self.common_operations.isHiveTableView(hiveDB = self.hiveDB, hiveTable = self.hiveTable) == False:
logging.info("Updating the Hive statistics on the exported table")
self.common_operations.updateHiveTableStatistics(self.hiveDB, self.hiveTable)
def createExportTempTable(self):
logging.debug("Executing export_operations.createExportTempTable()")
if self.common_operations.checkHiveTable(self.hiveExportTempDB, self.hiveExportTempTable) == False:
# Target table does not exist. We just create it in that case
logging.info("Creating Export Temp table %s.%s in Hive"%(self.hiveExportTempDB, self.hiveExportTempTable))
if self.export_config.exportTool == "sqoop":
columnsDF = self.export_config.getColumnsFromConfigDatabase(excludeColumns=True, getReplacedColumnTypes=True)
else:
columnsDF = self.export_config.getColumnsFromConfigDatabase(excludeColumns=True, getReplacedColumnTypes=False)
# columnsDF = self.export_config.getColumnsFromConfigDatabase(excludeColumns=True)
query = "create table `%s`.`%s` ("%(self.hiveExportTempDB, self.hiveExportTempTable)
firstLoop = True
for index, row in columnsDF.iterrows():
targetColumnName = row['targetColumnName']
if targetColumnName != None and targetColumnName.strip() != "":
columnName = targetColumnName
else:
columnName = row['hiveColumnName']
if firstLoop == False: query += ", "
query += "`%s` %s"%(columnName, row['columnType'])
if row['comment'] != None:
query += " COMMENT \"%s\""%(row['comment'])
firstLoop = False
query += ") STORED AS ORC TBLPROPERTIES ('orc.compress'='ZLIB')"
self.common_operations.executeHiveQuery(query)
self.common_operations.reconnectHiveMetaStore()
logging.debug("Executing export_operations.createExportTempTable() - Finished")
def connectToHive(self,):
logging.debug("Executing export_operations.connectToHive()")
try:
self.common_operations.connectToHive()
except Exception as ex:
logging.error(ex)
self.export_config.remove_temporary_files()
sys.exit(1)
if self.globalHiveConfigurationSet == False:
self.globalHiveConfigurationSet = True
if self.export_config.hiveJavaHeap != None:
query = "set hive.tez.container.size=%s"%(self.export_config.hiveJavaHeap)
self.common_operations.executeHiveQuery(query)
logging.debug("Executing export_operations.connectToHive() - Finished")
def remove_temporary_files(self):
self.export_config.remove_temporary_files()
def checkTimeWindow(self):
self.export_config.checkTimeWindow()
def updateExportTempTable(self):
""" Update the Export Temp table based on the column information in the configuration database """
logging.debug("Executing export_operations.updateExportTempTable()")
hiveDB = self.hiveExportTempDB
hiveTable = self.hiveExportTempTable
columnsHive = self.common_operations.getHiveColumns(hiveDB, hiveTable, includeType=True, includeIdx=False)
if self.export_config.exportTool == "sqoop":
columnsConfig = self.export_config.getColumnsFromConfigDatabase(excludeColumns=True, getReplacedColumnTypes=True)
else:
columnsConfig = self.export_config.getColumnsFromConfigDatabase(excludeColumns=True, getReplacedColumnTypes=False)
columnsConfig.rename(columns={'hiveColumnName':'name', 'columnType':'type'}, inplace=True)
for index, row in columnsConfig.iterrows():
targetColumnName = row['targetColumnName']
if targetColumnName != None and targetColumnName.strip() != "":
columnsConfig.iloc[index]['name'] = targetColumnName
columnsConfig.drop('targetColumnName', axis=1, inplace=True)
# Check for missing columns
columnsConfigOnlyName = columnsConfig.filter(['name'])
columnsHiveOnlyName = columnsHive.filter(['name'])
columnsMergeOnlyName = pd.merge(columnsConfigOnlyName, columnsHiveOnlyName, on=None, how='outer', indicator='Exist')
columnsConfigCount = len(columnsConfigOnlyName)
columnsHiveCount = len(columnsHiveOnlyName)
columnsMergeLeftOnlyCount = len(columnsMergeOnlyName.loc[columnsMergeOnlyName['Exist'] == 'left_only'])
columnsMergeRightOnlyCount = len(columnsMergeOnlyName.loc[columnsMergeOnlyName['Exist'] == 'right_only'])
logging.debug("columnsConfigOnlyName")
logging.debug(columnsConfigOnlyName)
logging.debug("================================================================")
logging.debug("columnsHiveOnlyName")
logging.debug(columnsHiveOnlyName)
logging.debug("================================================================")
logging.debug("columnsMergeOnlyName")
logging.debug(columnsMergeOnlyName)
logging.debug("")
if columnsConfigCount == columnsHiveCount and columnsMergeLeftOnlyCount > 0:
# The number of columns in config and Hive is the same, but there is a difference in name. This is most likely because of a rename of one or more of the columns
# To handle this, we try a rename. This might fail if the column types are also changed to an incompatable type
# The logic here is to
# 1. get all columns from mergeDF that exists in Left_Only
# 2. Get the position in configDF with that column name
# 3. Get the column in the same position from HiveDF
# 4. Check if that column name exists in the mergeDF with Right_Only. If it does, the column was just renamed
for index, row in columnsMergeOnlyName.loc[columnsMergeOnlyName['Exist'] == 'left_only'].iterrows():
rowInConfig = columnsConfig.loc[columnsConfig['name'] == row['name']].iloc[0]
indexInConfig = columnsConfig.loc[columnsConfig['name'] == row['name']].index.item()
rowInHive = columnsHive.iloc[indexInConfig]
if len(columnsMergeOnlyName.loc[(columnsMergeOnlyName['Exist'] == 'right_only') & (columnsMergeOnlyName['name'] == rowInHive["name"])]) > 0:
# This is executed if the column is renamed and exists in the same position
logging.debug("Name in config: %s"%(rowInConfig["name"]))
logging.debug("Type in config: %s"%(rowInConfig["type"]))
logging.debug("Index in config: %s"%(indexInConfig))
logging.debug("--------------------")
logging.debug("Name in Hive: %s"%(rowInHive["name"]))
logging.debug("Type in Hive: %s"%(rowInHive["type"]))
logging.debug("======================================")
logging.debug("")
query = "alter table `%s`.`%s` change column `%s` `%s` %s"%(hiveDB, hiveTable, rowInHive['name'], rowInConfig['name'], rowInConfig['type'])
self.common_operations.executeHiveQuery(query)
self.export_config.logHiveColumnRename(rowInConfig['name'], rowInHive["name"], hiveDB=hiveDB, hiveTable=hiveTable)
if rowInConfig["type"] != rowInHive["type"]:
self.export_config.logHiveColumnTypeChange(rowInConfig['name'], rowInConfig['type'], previous_columnType=rowInHive["type"], hiveDB=hiveDB, hiveTable=hiveTable)
else:
if columnsMergeLeftOnlyCount == 1 and columnsMergeRightOnlyCount == 1:
# So the columns are not in the same position, but it's only one column that changed. In that case, we just rename that one column
rowInMergeLeft = columnsMergeOnlyName.loc[columnsMergeOnlyName['Exist'] == 'left_only'].iloc[0]
rowInMergeRight = columnsMergeOnlyName.loc[columnsMergeOnlyName['Exist'] == 'right_only'].iloc[0]
rowInConfig = columnsConfig.loc[columnsConfig['name'] == rowInMergeLeft['name']].iloc[0]
rowInHive = columnsHive.loc[columnsHive['name'] == rowInMergeRight['name']].iloc[0]
logging.debug(rowInConfig["name"])
logging.debug(rowInConfig["type"])
logging.debug("--------------------")
logging.debug(rowInHive["name"])
logging.debug(rowInHive["type"])
query = "alter table `%s`.`%s` change column `%s` `%s` %s"%(hiveDB, hiveTable, rowInHive['name'], rowInConfig['name'], rowInHive['type'])
self.common_operations.executeHiveQuery(query)
self.export_config.logHiveColumnRename(rowInConfig['name'], rowInHive["name"], hiveDB=hiveDB, hiveTable=hiveTable)
self.common_operations.reconnectHiveMetaStore()
columnsHive = self.common_operations.getHiveColumns(hiveDB, hiveTable, includeType=True, includeIdx=False)
columnsHiveOnlyName = columnsHive.filter(['name'])
columnsMergeOnlyName =
|
pd.merge(columnsConfigOnlyName, columnsHiveOnlyName, on=None, how='outer', indicator='Exist')
|
pandas.merge
|
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas_datareader.data as web
import pandas as pd
import datetime
import time
import talib
from seven import bs_k_data_stock, pro_daily_stock, json_to_str
from MplVisualIf import MplVisualIf, MplTypesDraw, DefTypesPool
from MultiGraphIf import MultiGraphIf
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 参数设置
pd.set_option('display.expand_frame_repr', False) # False不允许换行
pd.set_option('display.max_rows', 20) # 显示的最大行数
pd.set_option('display.max_columns', 10) # 显示的最大列数
pd.set_option('precision', 2) # 显示小数点后的位数
app = MplVisualIf()
########################################### basic ################################################################
def draw_kline_chart(stock_dat):
# 绘制K线图
"""
fig = plt.figure(figsize=(14, 7), dpi=100, facecolor="white") # 创建fig对象
graph_kline = fig.add_subplot(1, 1, 1) # 创建子图
mpf.candlestick2_ochl(graph_kline, stock_dat.Open, stock_dat.Close, stock_dat.High, stock_dat.Low, width=0.5,
colorup='r', colordown='g') # 绘制K线走势
ohlc = list(zip(np.arange(0,len(stock_dat.index)),stock_dat.Open,stock_dat.Close,stock_dat.High,stock_dat.Low)) # 使用zip方法生成数据列表
mpf.candlestick_ochl(graph_kline, ohlc, width=0.2, colorup='r', colordown='g', alpha=1.0) # 绘制K线走势
graph_kline.set_title(u"000651 格力电器-日K线")
graph_kline.set_xlabel("日期")
graph_kline.set_ylabel(u"价格")
graph_kline.set_xlim(0, len(stock_dat.index)) # 设置x轴的范围
graph_kline.set_xticks(range(0, len(stock_dat.index), 15)) # X轴刻度设定 每15天标一个日期
graph_kline.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] \
for index in graph_kline.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) # 避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (12, 6),
'index': stock_dat.index,
'draw_kind': {'ochl':
{'Open': stock_dat.Open,
'Close': stock_dat.Close,
'High': stock_dat.High,
'Low': stock_dat.Low
}
},
'title': u"000651 格力电器-日K线",
'ylabel': u"价格"}
app.fig_output(**layout_dict)
def draw_volume_chart(stock_dat):
# 绘制成交量图
bar_red = np.where(stock_dat.Open < stock_dat.Close, stock_dat.Volume, 0) # 绘制BAR>0 柱状图
bar_green = np.where(stock_dat.Open > stock_dat.Close, stock_dat.Volume, 0) # 绘制BAR<0 柱状图
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'bar':
{'bar_red': bar_red,
'bar_green': bar_green
}
},
'title': u"000651 格力电器-成交量",
'ylabel': u"成交量"}
app.fig_output(**layout_dict)
def draw_sma_chart(stock_dat):
# 绘制移动平均线图
stock_dat['SMA20'] = stock_dat.Close.rolling(window=20).mean()
stock_dat['SMA30'] = stock_dat.Close.rolling(window=30).mean()
stock_dat['SMA60'] = stock_dat.Close.rolling(window=60).mean()
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white") # 创建fig对象
graph_sma = fig.add_subplot(1,1,1) # 创建子图
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['SMA20'],'black', label='SMA20',lw=1.0)
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['SMA30'],'green',label='SMA30', lw=1.0)
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['SMA60'],'blue',label='SMA60', lw=1.0)
graph_sma.legend(loc='best')
graph_sma.set_title(u"000651 格力电器-均线")
graph_sma.set_ylabel(u"价格")
graph_sma.set_xlim(0,len(stock_dat.index)) # 设置x轴的范围
graph_sma.set_xticks(range(0,len(stock_dat.index),15)) # X轴刻度设定 每15天标一个日期
graph_sma.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] \
for index in graph_sma.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) # 避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'SMA20': stock_dat.SMA20,
'SMA30': stock_dat.SMA30,
'SMA60': stock_dat.SMA60
}
},
'title': u"000651 格力电器-均线",
'ylabel': u"价格",
'xlabel': u"日期",
'xticks': 15,
'legend': u'best',
'xticklabels': '%Y-%m-%d'}
app.fig_output(**layout_dict)
def draw_kdj_chart(stock_dat):
# 绘制KDJ图
low_list = stock_dat['Low'].rolling(9, min_periods=1).min()
high_list = stock_dat['High'].rolling(9, min_periods=1).max()
rsv = (stock_dat['Close'] - low_list) / (high_list - low_list) * 100
stock_dat['K'] = rsv.ewm(com=2, adjust=False).mean()
stock_dat['D'] = stock_dat['K'].ewm(com=2, adjust=False).mean()
stock_dat['J'] = 3 * stock_dat['K'] - 2 * stock_dat['D']
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white")#创建fig对象
graph_kdj = fig.add_subplot(1,1,1) #创建子图
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['K'], 'blue', label='K') # K
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['D'], 'g--', label='D') # D
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['J'], 'r-', label='J') # J
graph_kdj.legend(loc='best', shadow=True, fontsize='10')
graph_kdj.set_ylabel(u"KDJ")
graph_kdj.set_xlabel("日期")
graph_kdj.set_xlim(0, len(stock_dat.index)) # 设置x轴的范围
graph_kdj.set_xticks(range(0, len(stock_dat.index), 15)) # X轴刻度设定 每15天标一个日期
graph_kdj.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] \
for index in graph_kdj.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) # 避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'K': stock_dat.K,
'D': stock_dat.D,
'J': stock_dat.J
}
},
'title': u"000651 格力电器-KDJ",
'ylabel': u"KDJ",
'legend': u'best'}
app.fig_output(**layout_dict)
def draw_kdj1_chart(stock_dat):
# 绘制KDJ-for in
xd = 9 - 1
date = stock_dat.index.to_series()
RSV = pd.Series(np.zeros(len(date) - xd), index=date.index[xd:])
Kvalue = pd.Series(0.0, index=RSV.index)
Dvalue = pd.Series(0.0, index=RSV.index)
Kvalue[0], Dvalue[0] = 50, 50
for day_ind in range(xd, len(stock_dat.index)):
RSV[date[day_ind]] = (stock_dat.Close[day_ind] - stock_dat.Low[day_ind - xd:day_ind + 1].min()) / \
(stock_dat.High[day_ind - xd:day_ind + 1].max() - stock_dat.Low[
day_ind - xd:day_ind + 1].min()) * 100
if day_ind > xd:
index = day_ind - xd
Kvalue[index] = 2.0 / 3 * Kvalue[index - 1] + RSV[date[day_ind]] / 3
Dvalue[index] = 2.0 / 3 * Dvalue[index - 1] + Kvalue[index] / 3
stock_dat['RSV'] = RSV
stock_dat['K'] = Kvalue
stock_dat['D'] = Dvalue
stock_dat['J'] = 3 * Kvalue - 2 * Dvalue
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'K': stock_dat.K,
'D': stock_dat.D,
'J': stock_dat.J
}
},
'title': u"000651 格力电器-KDJ",
'ylabel': u"KDJ",
'legend': u'best'}
app.fig_output(**layout_dict)
def draw_macd_chart(stock_dat):
# 绘制MACD
macd_dif = stock_dat['Close'].ewm(span=12, adjust=False).mean() - stock_dat['Close'].ewm(span=26,
adjust=False).mean()
macd_dea = macd_dif.ewm(span=9, adjust=False).mean()
macd_bar = 2 * (macd_dif - macd_dea)
bar_red = np.where(macd_bar > 0, macd_bar, 0) # 绘制BAR>0 柱状图
bar_green = np.where(macd_bar < 0, macd_bar, 0) # 绘制BAR<0 柱状图
# macd_dif, macd_dea, macd_bar = talib.MACD(stock_dat['Close'].values, fastperiod=12, slowperiod=26, signalperiod=9)
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white") # 创建fig对象
graph_macd = fig.add_subplot(1,1,1) # 创建子图
graph_macd.plot(np.arange(0, len(stock_dat.index)), macd_dif, 'red', label='macd dif') # dif
graph_macd.plot(np.arange(0, len(stock_dat.index)), macd_dea, 'blue', label='macd dea') # dea
graph_macd.bar(np.arange(0, len(stock_dat.index)), bar_red, facecolor='red')
graph_macd.bar(np.arange(0, len(stock_dat.index)), bar_green, facecolor='green')
graph_macd.legend(loc='best',shadow=True, fontsize ='10')
graph_macd.set_ylabel(u"MACD")
graph_macd.set_xlabel("日期")
graph_macd.set_xlim(0,len(stock_dat.index)) # 设置x轴的范围
graph_macd.set_xticks(range(0,len(stock_dat.index),15)) # X轴刻度设定 每15天标一个日期
graph_macd.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] for index in graph_macd.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) # 避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'bar':
{'bar_red': bar_red,
'bar_green': bar_green
},
'line':
{'macd dif': macd_dif,
'macd dea': macd_dea
}
},
'title': u"000651 格力电器-MACD",
'ylabel': u"MACD",
'legend': u'best'}
app.fig_output(**layout_dict)
########################################### advance ################################################################
def draw_cross_annotate(stock_dat):
# 绘制均线金叉和死叉
# graph_sma.legend(loc='upper left')
# graph_range = stock_dat.High.max() - stock_dat.Low.min()
# graph_sma.set_ylim(stock_dat.Low.min() - graph_range * 0.25, stock_dat.High.max()) # 设置y轴的范围
# 绘制移动平均线图
stock_dat['Ma20'] = stock_dat.Close.rolling(window=20).mean() # pd.rolling_mean(stock_dat.Close,window=20)
stock_dat['Ma30'] = stock_dat.Close.rolling(window=30).mean() # pd.rolling_mean(stock_dat.Close,window=30)
# 长短期均线序列相减取符号
list_diff = np.sign(stock_dat['Ma20'] - stock_dat['Ma30'])
# print(list_diff)
list_signal = np.sign(list_diff - list_diff.shift(1))
# print(list_signal)
down_cross = stock_dat[list_signal < 0]
up_cross = stock_dat[list_signal > 0]
# 循环遍历 显示均线金叉/死叉提示符
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'SMA-20': stock_dat.Ma20,
'SMA-30': stock_dat.Ma30
},
'annotate':
{u'死叉':
{'andata': down_cross,
'va': 'top',
'xy_y': 'Ma20',
'xytext': (-30, -stock_dat['Ma20'].mean() * 0.5),
'fontsize': 8,
'arrow': dict(facecolor='green', shrink=0.1)
},
u'金叉':
{'andata': up_cross,
'va': 'bottom',
'xy_y': 'Ma20',
'xytext': (-30, stock_dat['Ma20'].mean() * 0.5),
'fontsize': 8,
'arrow': dict(facecolor='red', shrink=0.1)
}
}
},
'title': u"000651 格力电器-均线交叉",
'ylabel': u"价格",
'xlabel': u"日期",
'legend': u'best'}
app.fig_output(**layout_dict)
def apply_gap(changeRatio, preLow, preHigh, Low, High, threshold):
jump_power = 0
if (changeRatio > 0) and ((Low - preHigh) > threshold):
# 向上跳空 (今最低-昨最高)/阈值
jump_power = (Low - preHigh) / threshold # 正数
elif (changeRatio < 0) and ((preLow - High) > threshold):
# 向下跳空 (今最高-昨最低)/阈值
jump_power = (High - preLow) / threshold # 负数
return jump_power
def draw_gap_annotate(stock_dat):
# 绘制K线图
# 挖掘跳空缺口
jump_threshold = stock_dat.Close.median() * 0.01 # 跳空阈值 收盘价中位数*0.01
stock_dat['changeRatio'] = stock_dat.Close.pct_change() * 100 # 计算涨/跌幅 (今收-昨收)/昨收*100% 判断向上跳空缺口/向下跳空缺口
stock_dat['preLow'] = stock_dat.Low.shift(1) # 增加昨日最低价序列
stock_dat['preHigh'] = stock_dat.High.shift(1) # 增加昨日最高价序列
stock_dat = stock_dat.assign(jump_power=0)
# for kl_index in np.arange(0, df_stockload.shape[0]):
# today = df_stockload.iloc[kl_index]
# note: A value is trying to be set on a copy of a slice from a DataFrame
# involve change the value of df_stockload but iloc just copy the dataframe
stock_dat['jump_power'] = stock_dat.apply(lambda row: apply_gap(row['changeRatio'],
row['preLow'],
row['preHigh'],
row['Low'],
row['High'],
jump_threshold),
axis=1)
up_jump = stock_dat[(stock_dat.changeRatio > 0) & (stock_dat.jump_power > 0)]
down_jump = stock_dat[(stock_dat.changeRatio < 0) & (stock_dat.jump_power < 0)]
layout_dict = {'figsize': (14, 7),
'index': stock_dat.index,
'draw_kind': {'ochl': # 绘制K线图
{'Open': stock_dat.Open,
'Close': stock_dat.Close,
'High': stock_dat.High,
'Low': stock_dat.Low
},
'annotate':
{u'up':
{'andata': up_jump,
'va': 'top',
'xy_y': 'preHigh',
'xytext': (0, -stock_dat['Close'].mean() * 0.5),
'fontsize': 8,
'arrow': dict(facecolor='red', shrink=0.1)
},
u'down':
{'andata': down_jump,
'va': 'bottom',
'xy_y': 'preLow',
'xytext': (0, stock_dat['Close'].mean() * 0.5),
'fontsize': 8,
'arrow': dict(facecolor='green', shrink=0.1)
}
}
},
'title': u"000651 格力电器-跳空缺口",
'ylabel': u"价格"}
app.fig_output(**layout_dict)
print(up_jump.filter(['jump_power', 'preClose', 'changeRatio', 'Close', 'Volume'])) # 向上跳空缺口 按顺序显示列
"""
jump_power changeRatio Close Volume
Date
2018-10-22 1.07 3.83 40.11 8.51e+07
2019-01-09 1.58 3.22 37.51 1.06e+08
2019-04-09 11.48 10.00 51.93 1.08e+07
2019-04-10 6.40 9.99 57.12 3.23e+08
"""
print(down_jump.filter(['jump_power', 'preClose', 'changeRatio', 'Close', 'Volume'])) # 向下跳空缺口 按顺序显示列
"""
jump_power changeRatio Close Volume
Date
2018-10-08 -1.22 -5.65 37.93 7.15e+07
"""
format = lambda x: '%.2f' % x
up_jump = up_jump[(np.abs(up_jump.changeRatio) > 2) & (up_jump.Volume > up_jump.Volume.median())] # abs取绝对值
up_jump = up_jump.applymap(format) # 处理后数据为str
print(up_jump.filter(['jump_power', 'preClose', 'changeRatio', 'Close', 'Volume'])) # 按顺序只显示该列
"""
jump_power changeRatio Close Volume
Date
2019-01-09 1.58 3.22 37.51 105806077.00
2019-04-10 6.40 9.99 57.12 322875034.00
"""
down_jump = down_jump[
(np.abs(down_jump.changeRatio) > 2) & (down_jump.Volume > down_jump.Volume.median())] # abs取绝对值
down_jump = down_jump.applymap(format) # 处理后数据为str
print(down_jump.filter(['jump_power', 'preClose', 'changeRatio', 'Close', 'Volume'])) # 按顺序只显示该列
"""
Empty DataFrame
Columns: [jump_power, changeRatio, Close, Volume]
Index: []
"""
def draw_kweek_chart(stock_dat):
# 周期重采样
# rule='W'周 how=last()最后一天 closed='right'右闭合 label='right'右标签
# print(stock_dat.resample('W', closed='right', label='right').last().head())
Freq_T = 'W-FRI'
# print(stock_dat.resample(Freq_T, closed='right', label='right').last().head())
# 周线Close等于一周中最后一个交易日Close
week_dat = stock_dat.resample(Freq_T, closed='right', label='right').last()
# 周线Open等于一周中第一个交易日Open
week_dat.Open = stock_dat.Open.resample(Freq_T, closed='right', label='right').first()
# 周线High等于一周中High的最大值
week_dat.High = stock_dat.High.resample(Freq_T, closed='right', label='right').max()
# 周线Low等于一周中Low的最小值
week_dat.Low = stock_dat.Low.resample(Freq_T, closed='right', label='right').min()
# 周线Volume等于一周中Volume的总和
week_dat.Volume = stock_dat.Volume.resample(Freq_T, closed='right', label='right').sum()
# print(week_dat.head())
layout_dict = {'figsize': (14, 7),
'index': week_dat.index,
'draw_kind': {'ochl':
{'Open': week_dat.Open,
'Close': week_dat.Close,
'High': week_dat.High,
'Low': week_dat.Low
}
},
'title': u"000651 格力电器-周K线",
'ylabel': u"价格"}
app.fig_output(**layout_dict)
def draw_fibonacci_chart(stock_dat):
# 黄金分割线
Fib_max = stock_dat.Close.max()
Fib_maxid = stock_dat.index.get_loc(stock_dat.Close.idxmax())
Fib_min = stock_dat.Close.min()
Fib_minid = stock_dat.index.get_loc(stock_dat.Close.idxmin())
Fib_382 = (Fib_max - Fib_min) * 0.382 + Fib_min
Fib_618 = (Fib_max - Fib_min) * 0.618 + Fib_min
print(u'黄金分割0.382:{}'.format(round(Fib_382, 2)))
print(u'黄金分割0.618:{}'.format(round(Fib_618, 2)))
# 黄金分割0.382:46.88
# 黄金分割0.618:53.8
max_df = stock_dat[stock_dat.Close == stock_dat.Close.max()]
min_df = stock_dat[stock_dat.Close == stock_dat.Close.min()]
print(max_df)
print(min_df)
# graph_kline.legend(['0.382', '0.618'], loc='upper left')
# 绘制K线图+支撑/阻力
layout_dict = {'figsize': (14, 7),
'index': stock_dat.index,
'draw_kind': {'ochl': # 绘制K线图
{'Open': stock_dat.Open,
'Close': stock_dat.Close,
'High': stock_dat.High,
'Low': stock_dat.Low
},
'hline':
{'Fib_382':
{'pos': Fib_382,
'c': 'r'
},
'Fib_618':
{'pos': Fib_618,
'c': 'g'
}
},
'annotate':
{u'max':
{'andata': max_df,
'va': 'top',
'xy_y': 'High',
'xytext': (-30, stock_dat.Close.mean()),
'fontsize': 8,
'arrow': dict(facecolor='red', shrink=0.1)
},
u'min':
{'andata': min_df,
'va': 'bottom',
'xy_y': 'Low',
'xytext': (-30, -stock_dat.Close.mean()),
'fontsize': 8,
'arrow': dict(facecolor='green', shrink=0.1)
}
}
},
'title': u"000651 格力电器-支撑/阻力位",
'ylabel': u"价格",
'legend': u'best'}
app.fig_output(**layout_dict)
########################################### talib ################################################################
def draw_tasma_chart(stock_dat):
# 绘制talib SMA
stock_dat['SMA20'] = talib.SMA(stock_dat.Close, timeperiod=20)
stock_dat['SMA30'] = talib.SMA(stock_dat.Close, timeperiod=30)
stock_dat['SMA60'] = talib.SMA(stock_dat.Close, timeperiod=60)
stock_dat['SMA20'].fillna(method='bfill', inplace=True)
stock_dat['SMA30'].fillna(method='bfill', inplace=True)
stock_dat['SMA60'].fillna(method='bfill', inplace=True)
# stock_dat['Ma20'] = talib.MA(stock_dat.Close, timeperiod=20, matype=0)
# stock_dat['Ma30'] = talib.MA(stock_dat.Close, timeperiod=30, matype=1)
# stock_dat['Ma60'] = talib.MA(stock_dat.Close, timeperiod=60, matype=2)
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white")#创建fig对象
graph_sma = fig.add_subplot(1,1,1) #创建子图
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['Ma20'],'black', label='M20',lw=1.0)
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['Ma30'],'green',label='M30', lw=1.0)
graph_sma.plot(np.arange(0, len(stock_dat.index)), stock_dat['Ma60'],'blue',label='M60', lw=1.0)
graph_sma.legend(loc='best')
graph_sma.set_title(u"000651 格力电器-MA-talib")
graph_sma.set_ylabel(u"价格")
graph_sma.set_xlim(0,len(stock_dat.index)) #设置x轴的范围
graph_sma.set_xticks(range(0,len(stock_dat.index),15))#X轴刻度设定 每15天标一个日期
graph_sma.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] for index in graph_sma.get_xticks()])#标签设置为日期
fig.autofmt_xdate(rotation=45) #避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'SMA20': stock_dat.SMA20,
'SMA30': stock_dat.SMA30,
'SMA60': stock_dat.SMA60
}
},
'title': u"000651 格力电器-SMA-talib",
'ylabel': u"价格",
'legend': u'best'}
app.fig_output(**layout_dict)
def draw_tamacd_chart(stock_dat):
# 绘制talib MACD
macd_dif, macd_dea, macd_bar = talib.MACD(stock_dat['Close'].values, fastperiod=12, slowperiod=26, signalperiod=9)
# RuntimeWarning: invalid value encountered in greater
# RuntimeWarning: invalid value encountered in less
# solve the problem
macd_dif[np.isnan(macd_dif)], macd_dea[np.isnan(macd_dea)], macd_bar[np.isnan(macd_bar)] = 0, 0, 0
bar_red = np.where(macd_bar > 0, 2 * macd_bar, 0) # 绘制BAR>0 柱状图
bar_green = np.where(macd_bar < 0, 2 * macd_bar, 0) # 绘制BAR<0 柱状图
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white")#创建fig对象
graph_macd = fig.add_subplot(1,1,1) #创建子图
graph_macd.plot(np.arange(0, len(stock_dat.index)), macd_dif, 'red', label='macd dif') # dif
graph_macd.plot(np.arange(0, len(stock_dat.index)), macd_dea, 'blue', label='macd dea') # dea
graph_macd.bar(np.arange(0, len(stock_dat.index)), bar_red, facecolor='red')
graph_macd.bar(np.arange(0, len(stock_dat.index)), bar_green, facecolor='green')
graph_macd.legend(loc='best',shadow=True, fontsize ='10')
graph_macd.set_ylabel(u"MACD-talib")
graph_macd.set_xlabel("日期")
graph_macd.set_xlim(0,len(stock_dat.index)) #设置x轴的范围
graph_macd.set_xticks(range(0,len(stock_dat.index),15))#X轴刻度设定 每15天标一个日期
graph_macd.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] for index in graph_macd.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) #避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'bar':
{'bar_red': bar_red,
'bar_green': bar_green
},
'line':
{'macd dif': macd_dif,
'macd dea': macd_dea
}
},
'title': u"000651 格力电器-MACD-talib",
'ylabel': u"MACD",
'legend': u'best'}
app.fig_output(**layout_dict)
def draw_takdj_chart(stock_dat):
# 绘制talib KDJ
stock_dat['K'], stock_dat['D'] = talib.STOCH(stock_dat.High.values, stock_dat.Low.values, stock_dat.Close.values, \
fastk_period=9, slowk_period=3, slowk_matype=0, slowd_period=3,
slowd_matype=0)
stock_dat['K'].fillna(0, inplace=True), stock_dat['D'].fillna(0, inplace=True)
stock_dat['J'] = 3 * stock_dat['K'] - 2 * stock_dat['D']
"""
fig = plt.figure(figsize=(14, 5), dpi=100, facecolor="white") # 创建fig对象
graph_kdj = fig.add_subplot(1,1,1) # 创建子图
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['K'], 'blue', label='K') # K
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['D'], 'g--', label='D') # D
graph_kdj.plot(np.arange(0, len(stock_dat.index)), stock_dat['J'], 'r-', label='J') # J
graph_kdj.legend(loc='best', shadow=True, fontsize='10')
graph_kdj.set_ylabel(u"KDJ-talib")
graph_kdj.set_xlabel("日期")
graph_kdj.set_xlim(0, len(stock_dat.index)) # 设置x轴的范围
graph_kdj.set_xticks(range(0, len(stock_dat.index), 15)) # X轴刻度设定 每15天标一个日期
graph_kdj.set_xticklabels([stock_dat.index.strftime('%Y-%m-%d')[index] for index in graph_kdj.get_xticks()]) # 标签设置为日期
fig.autofmt_xdate(rotation=45) # 避免x轴日期刻度标签的重叠 将每个ticker标签倾斜45度
plt.show()
"""
layout_dict = {'figsize': (14, 5),
'index': stock_dat.index,
'draw_kind': {'line':
{'K': stock_dat.K,
'D': stock_dat.D,
'J': stock_dat.J
}
},
'title': u"000651 格力电器-KDJ-talib",
'ylabel': u"KDJ",
'legend': u'best'}
app.fig_output(**layout_dict)
def draw_takpattern_annotate(stock_dat):
# 绘制 talib K线形态 乌云压顶
# CDL2CROWS = talib.CDL2CROWS(stock_dat.Open.values, stock_dat.High.values, stock_dat.Low.values,stock_dat.Close.values)
# CDLHAMMER = talib.CDLHAMMER(stock_dat.Open.values, stock_dat.High.values, stock_dat.Low.values,stock_dat.Close.values)
# CDLMORNINGSTAR = talib.CDLMORNINGSTAR(stock_dat.Open.values, stock_dat.High.values, stock_dat.Low.values,stock_dat.Close.values)
CDLDARKCLOUDCOVER = talib.CDLDARKCLOUDCOVER(stock_dat.Open.values, stock_dat.High.values, stock_dat.Low.values,
stock_dat.Close.values)
# 绘制K线图
pattern = stock_dat[(CDLDARKCLOUDCOVER == 100) | (CDLDARKCLOUDCOVER == -100)]
layout_dict = {'figsize': (14, 7),
'index': stock_dat.index,
'draw_kind': {'ochl': # 绘制K线图
{'Open': stock_dat.Open,
'Close': stock_dat.Close,
'High': stock_dat.High,
'Low': stock_dat.Low
},
'annotate':
{u'CDLDARKCLOUDCOVER':
{'andata': pattern,
'va': 'bottom',
'xy_y': 'High',
'xytext': (0, stock_dat['Close'].mean()),
'fontsize': 8,
'arrow': dict(arrowstyle='->', facecolor='blue',
connectionstyle="arc3,rad=.2")
}
}
},
'title': u"000651 格力电器-日K线-CDLDARKCLOUDCOVER",
'ylabel': u"价格"}
app.fig_output(**layout_dict)
def talib_speed_example():
# 对比效率上的差别
close_price = np.random.random(1000000)
df_random =
|
pd.DataFrame(close_price, columns=['close_price'])
|
pandas.DataFrame
|
#!/usr/bin/env python
r"""Aggregate, create, and save spiral plots.
"""
import pdb # noqa: F401
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
from datetime import datetime
from numbers import Number
from collections import namedtuple
from numba import njit, prange
from matplotlib import pyplot as plt
from . import base
from . import labels as labels_module
InitialSpiralEdges = namedtuple("InitialSpiralEdges", "x,y")
# SpiralMeshData = namedtuple("SpiralMeshData", "x,y")
SpiralMeshBinID = namedtuple("SpiralMeshBinID", "id,fill,visited")
SpiralFilterThresholds = namedtuple(
"SpiralFilterThresholds", "density,size", defaults=(False,)
)
@njit(parallel=True)
def get_counts_per_bin(bins, x, y):
nbins = bins.shape[0]
cell_count = np.full(nbins, 0, dtype=np.int64)
for i in prange(nbins):
x0, x1, y0, y1 = bins[i]
left = x >= x0
right = x < x1
bottom = y >= y0
top = y < y1
chk_cell = left & right & bottom & top
cell_count[i] = chk_cell.sum()
return cell_count
@njit(parallel=True)
def calculate_bin_number_with_numba(mesh, x, y):
fill = -9999
zbin = np.full(x.size, fill, dtype=np.int64)
nbins = mesh.shape[0]
bin_visited = np.zeros(nbins, dtype=np.int64)
for i in prange(nbins):
x0, x1, y0, y1 = mesh[i]
# Assume that largest x- and y-edges are extended by larger of 1% and 0.01
# so that we can just naively use < instead of a special case of <=.
# At time of writing (20200418), `SpiralPlot.initialize_mesh` did this.
tk = (x >= x0) & (x < x1) & (y >= y0) & (y < y1)
zbin[tk] = i
bin_visited[i] += 1
return zbin, fill, bin_visited
class SpiralMesh(object):
def __init__(self, x, y, initial_xedges, initial_yedges, min_per_bin=250):
self.set_data(x, y)
self.set_min_per_bin(min_per_bin)
self.set_initial_edges(initial_xedges, initial_yedges)
self._cell_filter_thresholds = SpiralFilterThresholds(density=False, size=False)
@property
def bin_id(self):
return self._bin_id
@property
def cat(self):
r""":py:class:`pd.Categorical` version of `bin_id`, with fill bin removed."""
return self._cat
@property
def data(self):
return self._data
@property
def initial_edges(self):
return self._initial_edges
@property
def mesh(self):
return self._mesh
@property
def min_per_bin(self):
return self._min_per_bin
@property
def cell_filter_thresholds(self):
return self._cell_filter_thresholds
@property
def cell_filter(self):
r"""Build a boolean :py:class:`Series` selecting mesh cells that meet
density and area criteria specified by `mesh_cell_filter_thresholds`.
Notes
----
Neither `density` nor `size` convert log-scale edges into linear scale.
Doing so would overweight the area of mesh cells at larger values on a given axis.
"""
density = self.cell_filter_thresholds.density
size = self.cell_filter_thresholds.size
x = self.mesh[:, [0, 1]]
y = self.mesh[:, [2, 3]]
dx = x[:, 1] - x[:, 0]
dy = y[:, 1] - y[:, 0]
dA = dx * dy
tk = np.full_like(dx, True, dtype=bool)
if size:
size_quantile = np.quantile(dA, size)
tk_size = dA < size_quantile
tk = tk & (tk_size)
if density:
cnt = np.bincount(self.bin_id.id, minlength=self.mesh.shape[0])
assert cnt.shape == tk.shape
cell_density = cnt / dA
density_quantile = np.quantile(cell_density, density)
tk_density = cell_density > density_quantile
tk = tk & tk_density
return tk
def set_cell_filter_thresholds(self, **kwargs):
r"""Set or update the :py:meth:`mesh_cell_filter_thresholds`.
Parameters
----------
density: scalar
The density quantile above which we want to select bins, e.g.
above the 0.01 quantile. This ensures that each bin meets some
sufficient fill factor.
size: scalar
The size quantile below which we want to select bins, e.g.
below the 0.99 quantile. This ensures that the bin isn't so large
that it will appear as an outlier.
"""
density = kwargs.pop("density", False)
size = kwargs.pop("size", False)
if len(kwargs.keys()):
extra = "\n".join(["{}: {}".format(k, v) for k, v in kwargs.items()])
raise KeyError("Unexpected kwarg\n{}".format(extra))
self._cell_filter_thresholds = SpiralFilterThresholds(
density=density, size=size
)
def set_initial_edges(self, xedges, yedges):
self._initial_edges = InitialSpiralEdges(xedges, yedges)
def set_data(self, x, y):
data = pd.concat({"x": x, "y": y}, axis=1)
self._data = data # SpiralMeshData(x, y)
def set_min_per_bin(self, new):
self._min_per_bin = int(new)
def initialize_bins(self):
# Leaves initial edges altered when we change maximum edge.
xbins = self.initial_edges.x
ybins = self.initial_edges.y
# # Account for highest bin = 0 already done in `SpiralPlot2D.initialize_mesh`.
# xbins[-1] = np.max([0.01, 1.01 * xbins[-1]])
# ybins[-1] = np.max([0.01, 1.01 * ybins[-1]])
left = xbins[:-1]
right = xbins[1:]
bottom = ybins[:-1]
top = ybins[1:]
nx = left.size
ny = bottom.size
mesh = np.full((nx * ny, 4), np.nan, dtype=np.float64)
for x0, x1, i in zip(left, right, range(nx)):
for y0, y1, j in zip(bottom, top, range(ny)):
# NOTE: i*ny+j means go to i'th row, which has
# nrow * number of bins passed. Then go
# to j'th bin because we have to traverse
# to the j'th y-bin too.
mesh[(i * ny) + j] = [x0, x1, y0, y1]
mesh = np.array(mesh)
# pdb.set_trace()
self.initial_mesh = np.array(mesh)
return mesh
@staticmethod
def process_one_spiral_step(bins, x, y, min_per_bin):
# print("Processing spiral step", flush=True)
# start0 = datetime.now()
cell_count = get_counts_per_bin(bins, x, y)
bins_to_replace = cell_count > min_per_bin
nbins_to_replace = bins_to_replace.sum()
if not nbins_to_replace:
return None, 0
xhyh = 0.5 * (bins[:, [0, 2]] + bins[:, [1, 3]])
def split_this_cell(idx):
x0, x1, y0, y1 = bins[idx]
xh, yh = xhyh[idx]
# Reduce calls to `np.array`.
# Just return a list here.
split_cell = [
[x0, xh, y0, yh],
[xh, x1, y0, yh],
[xh, x1, yh, y1],
[x0, xh, yh, y1],
]
return split_cell
new_cells = bins_to_replace.sum() * [None]
for i, idx in enumerate(np.where(bins_to_replace)[0]):
new_cells[i] = split_this_cell(idx)
new_cells = np.vstack(new_cells)
bins[bins_to_replace] = np.nan
# stop = datetime.now()
# print(f"Done Building replacement grid cells (dt={stop-start1})", flush=True)
# print(f"Done Processing spiral step (dt={stop-start0})", flush=True)
return new_cells, nbins_to_replace
@staticmethod
def _visualize_logged_stats(stats_str):
from matplotlib import pyplot as plt
stats = [[y.strip() for y in x.split(" ") if y] for x in stats_str.split("\n")]
stats.pop(1) # Remove column underline row
stats = np.array(stats)
index = pd.Index(stats[1:, 0].astype(int), name="Step")
n_replaced = stats[1:, 1].astype(int)
dt = pd.to_timedelta(stats[1:, 2]).total_seconds()
dt_unit = "s"
if dt.max() > 60:
dt /= 60
dt_unit = "m"
if dt.max() > 60:
dt /= 60
dt_unit = "H"
if dt.max() > 24:
dt /= 24
dt_unit = "D"
dt_key = f"Elapsed [{dt_unit}]"
stats = pd.DataFrame({dt_key: dt, "N Divisions": n_replaced}, index=index)
# stats = pd.Series(stats[1:, 1].astype(int), index=stats[1:, 0].astype(int), name=stats[0, 1])
# stats.index.name = stats[0, 0]
fig, ax = plt.subplots()
tax = ax.twinx()
x = stats.index
k = f"Elapsed [{dt_unit}]"
ax.plot(x, stats.loc[:, k], label=k, marker="+", ms=8)
k = "N Divisions"
tax.plot(x, stats.loc[:, k], label=k, c="C1", ls="--", marker="x", ms=8)
tax.grid(False)
ax.set_xlabel("Step Number")
ax.set_ylabel(dt_key)
tax.set_ylabel("N Divisions")
h0, l0 = ax.get_legend_handles_labels()
h1, l1 = tax.get_legend_handles_labels()
ax.legend(
h0 + h1,
l0 + l1,
title=fr"$\Delta t = {stats.loc[:, dt_key].sum():.0f} \, {dt_unit}$",
)
ax.set_yscale("log")
tax.set_yscale("log")
return ax, tax, stats
def generate_mesh(self):
logger = logging.getLogger("__main__")
start = datetime.now()
logger.warning(f"Generating {self.__class__.__name__} at {start}")
x = self.data.x.values
y = self.data.y.values
min_per_bin = self.min_per_bin
# max_bins = int(1e5)
initial_bins = self.initialize_bins()
# To reduce memory needs, only process data in mesh.
x0 = initial_bins[:, 0].min()
x1 = initial_bins[:, 1].max()
y0 = initial_bins[:, 2].min()
y1 = initial_bins[:, 3].max()
tk_data_in_mesh = (
(x0 <= x)
& (x <= x1)
& (y0 <= y)
& (y <= y1)
& np.isfinite(x)
& np.isfinite(y)
)
x = x[tk_data_in_mesh]
y = y[tk_data_in_mesh]
initial_cell_count = get_counts_per_bin(initial_bins, x, y)
# initial_cell_count = self.get_counts_per_bin_loop(initial_bins, x, y)
bins_to_replace = initial_cell_count > min_per_bin
nbins_to_replace = bins_to_replace.sum()
# raise ValueError
list_of_bins = [initial_bins]
active_bins = initial_bins
logger.warning(
"""
Step N Elapsed Time
====== ======= =============="""
)
step_start = datetime.now()
step = 0
while nbins_to_replace > 0:
active_bins, nbins_to_replace = self.process_one_spiral_step(
active_bins, x, y, min_per_bin
)
now = datetime.now()
# if not(step % 10):
logger.warning(f"{step:>6} {nbins_to_replace:>7} {(now - step_start)}")
list_of_bins.append(active_bins)
step += 1
step_start = now
list_of_bins = [b for b in list_of_bins if b is not None]
final_bins = np.vstack(list_of_bins)
valid_bins = np.isfinite(final_bins).all(axis=1)
final_bins = final_bins[valid_bins]
stop = datetime.now()
# logger.warning(f"Complete at {stop}")
logger.warning(f"\nCompleted {self.__class__.__name__} at {stop}")
logger.warning(f"Elasped time {stop - start}")
logger.warning(f"Split bin threshold {min_per_bin}")
logger.warning(
f"Generated {final_bins.shape[0]} bins for {x.size} spectra (~{x.size/final_bins.shape[0]:.3f} spectra per bin)\n"
)
self._mesh = final_bins
# return final_bins
def calculate_bin_number(self):
logger = logging.getLogger(__name__)
logger.warning(
f"Calculating {self.__class__.__name__} bin_number at {datetime.now()}"
)
x = self.data.loc[:, "x"].values
y = self.data.loc[:, "y"].values
mesh = self.mesh
nbins = mesh.shape[0]
start = datetime.now()
zbin, fill, bin_visited = calculate_bin_number_with_numba(mesh, x, y)
stop = datetime.now()
logger.warning(f"Elapsed time {stop - start}")
# return calculate_bin_number_with_numba_broadcast(mesh, x, y, fill)
# if ( verbose > 0 and
# (i % verbose == 0) ):
# print(i+1, end=", ")
if (zbin == fill).any():
# if (zbin < 0).any():
# pdb.set_trace()
logger.warning(
f"""`zbin` contains {(zbin == fill).sum()} ({100 * (zbin == fill).mean():.1f}%) fill values that are outside of mesh.
They will be replaced by NaNs and excluded from the aggregation.
"""
)
# raise ValueError(msg % (zbin == fill).sum())
# Set fill bin to zero
is_fill = zbin == fill
# zbin[~is_fill] += 1
# zbin[is_fill] = -1
# print(zbin.min())
# zbin += 1
# print(zbin.min())
# `minlength=nbins` forces us to include empty bins at the end of the array.
bin_frequency = np.bincount(zbin[~is_fill], minlength=nbins)
n_empty = (bin_frequency == 0).sum()
logger.warning(
f"""Largest bin population is {bin_frequency.max()}
{n_empty} of {nbins} bins ({100 * n_empty / nbins:.1f}%) are empty
"""
)
if not bin_visited.all():
logger.warning(f"{(~bin_visited).sum()} bins went unvisited.")
if (bin_visited > 1).any():
logger.warning(f"({(bin_visited > 1).sum()} bins visted more than once.")
if nbins - bin_frequency.shape[0] != 0:
raise ValueError(
f"{nbins - bin_frequency.shape[0]} mesh cells do not have an associated z-value"
)
# zbin = _pd.Series(zbin, index=self.data.index, name="zbin")
# # Pandas groupby will treat NaN as not belonging to a bin.
# zbin.replace(fill, _np.nan, inplace=True)
bin_id = SpiralMeshBinID(zbin, fill, bin_visited)
self._bin_id = bin_id
return bin_id
def place_spectra_in_mesh(self):
self.generate_mesh()
bin_id = self.calculate_bin_number()
return bin_id
def build_cat(self):
bin_id = self.bin_id.id
fill = self.bin_id.fill
# Integer number corresponds to the order over
# which the mesh was traversed.
cat =
|
pd.Categorical(bin_id, ordered=False)
|
pandas.Categorical
|
# encoding: utf-8
"""
plot.py
~~~~~~~
Functionality for creating performance summary plots.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__created__ = "2018-06-08"
__copyright__ = "Copyright 2018 <NAME>"
__license__ = "MIT https://opensource.org/licenses/MIT"
# standard imports
# third party imports
import random
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.colors import CSS4_COLORS
from sklearn.metrics import accuracy_score
from sklearn.metrics.scorer import check_scoring, _PredictScorer, _ProbaScorer
from sklearn.base import clone as clone_model
from sklearn.model_selection import cross_val_score
# local imports
from atnlp.eval.table import topic_labelling_summary_table
# globals
COLORS = ['red', 'blue', 'green', 'orange', 'magenta', 'yellow', 'brown']
random.seed(42)
COLORS += random.sample(list(CSS4_COLORS), len(CSS4_COLORS))
def create_awesome_plot_grid(nminor, ncol=5, maj_h=2, maj_w=3,
min_xlabel=None, min_ylabel=None,
maj_xlabel=None, maj_ylabel=None,
grid=True):
"""Returns an awesome plot grid
The grid includes a specified number (*nminor*) of minor
plots (unit size in the grid) and a single major plot
whose size can be specified in grid units (*maj_h* and *maj_w*).
The major plot is located top-right. If either dimension
is 0 the major plot is omitted.
The minor plots are tiled from left-to-right, top-to-bottom
on a grid of width *ncol* and will be spaced around the major
plot.
The grid will look something like this
.. code-block:: text
#----#----#----#---------#
| | | | |
| | | | |
#----#----#----# |
| | | | |
| | | | |
#----#----#----#----#----#
| | | | | |
| | | | | |
#----#----#----#----#----#
| | |
| | | -->
#----#----#
:param nminor: number of minor plots
:param ncol: width of grid (in grid units)
:param maj_h: height of major plot (in grid units)
:param maj_w: width of major plot (in grid units)
:param min_xlabel: x-axis label of minor plots
:param min_ylabel: y-axis label of minor plots
:param maj_xlabel: x-axis label of major plot
:param maj_ylabel: y-axis label of major plot
:param grid: draw grid lines (if True)
:return: tuple (figure, major axis, minor axes (flat list), minor axes (2D list))
"""
assert maj_w <= ncol, "Major fig cannot be wider than grid!"
def pad_coord(ipad):
"""Return x-y coordinate for ith element"""
i = int(np.floor(ipad / ncol))
j = ipad % ncol
return (i, j)
def in_main(ipad):
"""Return True if ith element within major plot space"""
(i, j) = pad_coord(ipad)
if j >= ncol - maj_w and i < maj_h: return True
return False
# derived quantities
n = maj_w * maj_h + nminor
nrow = int(np.ceil(n / ncol))
if maj_h and nminor <= ncol - maj_w:
ncol = maj_w + nminor
if maj_w:
nrow = max(nrow, maj_h)
# create figure
f = plt.figure(figsize=(16 * ncol / 5, 16 * nrow / 5))
# create major axis
if maj_h and maj_w:
ax_maj = plt.subplot2grid((nrow, ncol), (0, ncol - maj_w), colspan=maj_w, rowspan=maj_h)
if maj_xlabel: ax_maj.set_xlabel(maj_xlabel)
if maj_ylabel: ax_maj.set_ylabel(maj_ylabel)
ax_maj.tick_params(top=True, right=True,
labeltop=True, labelright=True,
labelleft=False, labelbottom=False,
grid_linestyle='-.')
ax_maj.grid(grid)
else:
ax_maj = None
# create minor axes
ax_min = []
ax_min_ij = [[None] * ncol] * nrow
ipad = 0
imin = 0
while imin < nminor:
if not in_main(ipad):
(i, j) = pad_coord(ipad)
ax0 = ax_min[0] if ax_min else None
ax = plt.subplot2grid((nrow, ncol), (i, j), sharex=ax0, sharey=ax0)
ax.i = i
ax.j = j
ax.tick_params(top=True, right=True, grid_linestyle='-.')
ax.grid(grid)
# add top labels
if i == 0:
ax.tick_params(labeltop=True)
# add right labels
if j == ncol - 1: ax.tick_params(labelright=True)
# remove inner left labels
if j > 0:
ax.tick_params(labelleft=False)
# set y-titles
elif min_ylabel:
ax.set_ylabel(min_ylabel)
# set x-titles
if min_xlabel: ax.set_xlabel(min_xlabel)
# remove inner bottom labels
if i > 0 and ax_min_ij[i - 1][j]:
ax_min_ij[i - 1][j].tick_params(labelbottom=False)
ax_min_ij[i - 1][j].set_xlabel("")
ax_min.append(ax)
ax_min_ij[i][j] = ax
imin += 1
ipad += 1
return (f, ax_maj, ax_min, ax_min_ij)
def binary_classification_accuracy_overlays(classifiers, X_train, y_train, X_test, y_test):
"""Create overlays of binary classification accuracy for multiple classifiers
:param classifiers: list of tuples (name, classifier)
:param X_train: training data
:param y_train: binary training labels
:param X_test: testing data
:param y_test: binary testing labels
:return: tuple (figure, axis)
"""
acc_train = [accuracy_score(y_train, c.predict(X_train))
for (_,c) in classifiers]
acc_test = [accuracy_score(y_test, c.predict(X_test))
for (_,c) in classifiers]
acc_cv = [c.cv_results_['mean_test_score'][c.best_index_]
for (_,c) in classifiers]
acc_err_cv = [c.cv_results_['std_test_score'][c.best_index_]
for (_,c) in classifiers]
names = [n for (n,_) in classifiers]
ypos = np.arange(len(classifiers))
fig, ax = plt.subplots()
ax.barh(ypos, acc_cv, xerr=acc_err_cv, align='center',
color='g', label='cv', alpha=0.5)
ax.set_yticks(ypos)
ax.set_yticklabels(names)
ax.set_xlabel('Accuracy')
ax.scatter(acc_train, ypos, color='red', label='train')
ax.scatter(acc_test, ypos, color='b', label='test')
ax.invert_yaxis()
ax.legend()
xmin = 0.98 * min(acc_train+acc_test+acc_cv)
xmax = 1.02 * max(acc_train+acc_test+acc_cv)
ax.set_xlim(xmin,xmax)
return (fig, ax)
def topic_labelling_scatter_plots(Y_true, Y_pred, sample_min=None, thresholds=None):
"""Create scatter plots comparing precision, recall and number of samples
:param Y_true: ground truth topic labels (one-hot format)
:param Y_pred: topic predictions (one-hot format)
:param sample_min: minimum number of examples per topic
:param thresholds: list of thresholds per category (optional)
:return: tuple (figure, list of axes)
"""
table = topic_labelling_summary_table(Y_true, Y_pred, sample_min, thresholds)
# Make scatter plots
f = plt.figure(figsize=(20,5))
ax1 = plt.subplot(1,3,1)
ax1.scatter(table['recall'], table['precision'])
plt.xlabel('recall')
plt.ylabel('contamination')
ax2 = plt.subplot(1,3,2)
ax2.scatter(table['samples'], table['recall'])
ax2.set_xscale('log')
plt.xlabel('samples')
plt.ylabel('recall')
ax3 = plt.subplot(1,3,3)
ax3.scatter(table['samples'], table['precision'])
ax3.set_xscale('log')
plt.xlabel('samples')
plt.ylabel('contamination')
return (f, (ax1, ax2, ax3))
def topic_labelling_barchart(Y_true, Y_preds, model_names):
"""Create topic labelling barchart
The figure includes a 1x4 grid of bar charts, illustrating
the number of samples, precision, recall and f1 scores for
each topic. The scores are overlayed for each model.
:param Y_true: ground truth topic labels (one-hot format)
:param Y_preds: topic predictions for each model (list of one-hot formats)
:param model_names: topic labelling model names
:return: tuple (figure, list of axes)
"""
n = len(model_names)
tables = [topic_labelling_summary_table(Y_true, Y_preds[i]) for i in range(n)]
topics = tables[0]['topic']
samples = tables[0]['samples']
# y-axis
ypos = np.arange(len(samples))
# figure
plt.close('all')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True, figsize=(16, 0.25 * len(samples)))
# samples subfig
ax1.set_xlabel('Samples')
ax1.barh(ypos, samples, align='center',
color='g', label='Samples', alpha=0.25)
ax1.set_yticks(ypos)
ax1.set_yticklabels(topics)
ax1.invert_yaxis()
# precision
ax2.set_xlabel('Precision')
ax2.set_xlim((-0.05, 1.05))
for i in range(n):
ax2.scatter(tables[i]['precision'], ypos, color=COLORS[i],
label=model_names[i], alpha=0.5)
# recall
ax3.set_xlabel('Recall')
ax3.set_xlim((-0.05, 1.05))
for i in range(n):
ax3.scatter(tables[i]['recall'], ypos, color=COLORS[i],
label=model_names[i], alpha=0.5)
# recall
ax4.set_xlabel('F1')
ax4.set_xlim((-0.05, 1.05))
for i in range(n):
ax4.scatter(tables[i]['f1'], ypos, color=COLORS[i],
label=model_names[i], alpha=0.5)
ax4.legend(loc='center left', bbox_to_anchor=(1, 1))
gridlines = []
for ax in [ax1, ax2, ax3, ax4]:
ax.grid()
gridlines += ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle('-.')
return (f, (ax1, ax2, ax3, ax4))
def topic_labelling_barchart_cv(models, model_names, model_inputs, Y, cv=10):
"""Create topic labelling barchart with k-fold cross-validation
Figure layout is the same as in :func:`topic_labelling_barchart`.
K-fold cross-validation is used to estimate uncertainties on the metrics.
:param models: list of topic labelling models
:param model_names: list of model names
:param model_inputs: list of input data for models
:param Y: ground truth topic labels (one-hot format)
:param cv: number of folds for cross-validation
:return: tuple (figure, list of axes)
"""
n = len(models)
samples = np.array([sum(Y[cat]) for cat in Y.columns])
order = np.argsort(samples)[::-1]
samples = samples[order]
topics = Y.columns[order]
def get_cv_scores(scoring, model, X):
scores = np.array([cross_val_score(model.estimators_[i], X, Y[cat], scoring=scoring, cv=cv)
for (i, cat) in enumerate(Y.columns[order])])
smed = np.median(scores, axis=1)
smin = np.min(scores, axis=1)
smax = np.max(scores, axis=1)
err = np.column_stack([np.abs(smin - smed), np.abs(smax - smed)])
return [smed, err]
precision = [get_cv_scores('precision', m, X) for (m, X) in zip(models, model_inputs)]
recall = [get_cv_scores('recall', m, X) for (m, X) in zip(models, model_inputs)]
f1 = [get_cv_scores('f1', m, X) for (m, X) in zip(models, model_inputs)]
# y-axis
ypos = np.arange(len(samples))
# figure
plt.close('all')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True, figsize=(16, 0.25 * len(samples)))
# samples subfig
ax1.set_xlabel('Samples')
ax1.barh(ypos, samples, align='center',
color='g', label='Samples', alpha=0.25)
ax1.set_yticks(ypos)
ax1.set_yticklabels(topics)
ax1.invert_yaxis()
# precision
ax2.set_xlabel('Precision')
ax2.set_xlim((-0.05, 1.05))
for i in range(n):
(med, err) = precision[i]
ax2.errorbar(med, ypos, xerr=err.T, color=COLORS[i],
fmt='o', capsize=5, label=model_names[i], alpha=0.5)
# recall
ax3.set_xlabel('Recall')
ax3.set_xlim((-0.05, 1.05))
for i in range(n):
(med, err) = recall[i]
ax3.errorbar(med, ypos, xerr=err.T, color=COLORS[i],
fmt='o', capsize=5, label=model_names[i], alpha=0.5)
# f1
ax4.set_xlabel('F1')
ax4.set_xlim((-0.05, 1.05))
for i in range(n):
(med, err) = f1[i]
ax4.errorbar(med, ypos, xerr=err.T, color=COLORS[i],
fmt='o', capsize=5, label=model_names[i], alpha=0.5)
ax4.legend(loc='center left', bbox_to_anchor=(1, 1))
gridlines = []
for ax in [ax1, ax2, ax3, ax4]:
ax.grid()
gridlines += ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle('-.')
return (f, (ax1, ax2, ax3, ax4))
def background_composition_pie(Y_true, Y_score, topic, threshold, min_topic_frac=0.05):
"""Create a pie chart illustrating the major background contributions for given label
Background topics contributing less than *min_topic_frac* will be merged into a
single contribution called "Other".
A bar chart is also included illustrating the overall topic composition.
:param Y_true: ground truth topic labels (one-hot format)
:param Y_score: topic probability predictions (shape: samples x topics)
:param topic: name of topic to investigate
:param threshold: threshold above which to investigate background contributions
:param min_topic_frac: minimum background sample fraction
:return: tuple (figure, list of axes)
"""
ix = Y_true.columns.get_loc(topic)
y_score = Y_score[:, ix]
topics = np.array([t for t in Y_true.columns if t != topic])
composition = np.array([np.sum(Y_true[topic][y_score > threshold]) for topic in topics])
# combine contributions less than 5%
tot = np.sum(composition)
mask = (composition < tot * min_topic_frac)
other = np.sum(composition[mask])
topics = np.array(topics[~mask].tolist() + ["Other"])
composition = np.array(composition[~mask].tolist() + [other])
# sort
topics = topics[np.argsort(composition)]
composition = np.sort(composition)
# make fig
fig = plt.figure(figsize=(15, 5))
# Plot 1: bar
ax1 = plt.subplot(1, 2, 1)
ypos = np.arange(len(composition))
ax1.barh(ypos, composition, align='center')
ax1.set_yticks(ypos)
ax1.set_yticklabels(topics)
ax1.set_xlabel('Samples')
# Plot 2: pie
ax2 = plt.subplot(1, 2, 2)
ax2.pie(composition, labels=topics, autopct='%1.1f%%', startangle=90)
plt.axis('equal')
return (fig, (ax1, ax2))
def get_multimodel_sample_size_dependence(models, datasets, labels, sample_fracs, scoring=None, cat_scoring=None):
"""Return performance metrics vs training sample size
Fractions of data (*sample_fracs*) are randomly sampled from the training dataset
and used to train the models, which are always evaluated on the full testing datasets.
:param models: list of topic labelling models
:param datasets: list of input data for models (each is (training, testing) tuple)
:param labels: tuple (train, test) of ground truth topic labels (one-hot format)
:param sample_fracs: list of sample fractions to scan
:param scoring: sklearn scorer or scoring name for topic averaged metric
:param cat_scoring: sklearn scorer or scoring name for individual topic metric
:return: tuple (entries per step, averaged model scores for each step, model scores for each topic for each step)
"""
# inputs
(Y_train, Y_test) = labels
train_size = len(Y_train)
test_size = len(Y_test)
train_indices = np.arange(train_size)
categories = Y_train.columns
# check input dataset size compatibility
assert np.all(np.array([X.shape[0] for (X, _) in datasets]) == train_size), \
"Model training sample sizes are incompatible!"
assert np.all(np.array([X.shape[0] for (_, X) in datasets]) == test_size), \
"Model testing sample sizes are incompatible!"
# values to fill
entries = []
scores = [] if scoring is not None else None
cat_scores = [] if cat_scoring is not None else None
for frac in sample_fracs:
# sub-sampling
subsample_size = int(frac * train_size)
np.random.seed(42)
rand_indices = np.random.choice(train_indices, subsample_size, replace=False)
Y_train_sub = Y_train.iloc[rand_indices]
# account for active categories (ie have at least 1 True and 1 False label)
active_cats = [cat for cat in categories if len(Y_train_sub[cat].unique()) == 2]
if len(active_cats) == 0:
print("no active categories, skipping frac: ", frac)
continue
print("frac: {}, samples: {}, active cats: {}".format(frac, subsample_size, len(active_cats)))
Y_train_sub = Y_train_sub[active_cats]
Y_test_sub = Y_test[active_cats]
# evaluate model
model_scores = []
cat_model_scores = []
for (model, (X_train, X_test)) in zip(models, datasets):
# print ("evaluating model...")
X_train_sub = X_train[rand_indices]
# train
model_tmp = clone_model(model)
model_tmp.fit(X_train_sub, Y_train_sub)
# predict/eval overall
scorer = Y_test_pred = None
if scoring is not None:
scorer = check_scoring(model_tmp, scoring)
if isinstance(scorer, _PredictScorer):
Y_test_pred = model_tmp.predict(X_test)
elif isinstance(scorer, _ProbaScorer):
Y_test_pred = model_tmp.predict_proba(X_test)
else:
assert False, "Scorer not supported"
model_scores.append(scorer._score_func(Y_test_sub, Y_test_pred, **scorer._kwargs))
# predict/eval per category
if cat_scoring is not None:
cat_scorer = check_scoring(model_tmp.estimators_[0], cat_scoring)
if scoring is not None and type(scorer) == type(cat_scorer):
Y_test_pred_cat = Y_test_pred
else:
if isinstance(cat_scorer, _PredictScorer):
Y_test_pred_cat = model_tmp.predict(X_test)
elif isinstance(cat_scorer, _ProbaScorer):
Y_test_pred_cat = model_tmp.predict_proba(X_test)
else:
assert False, "Category Scorer not supported"
# eval
cat_score = []
for cat in categories:
if cat not in active_cats:
s = 0.0
else:
icat = np.where(Y_test_sub.columns == cat)[0][0]
s = cat_scorer._score_func(Y_test_sub[cat],
Y_test_pred_cat[:, icat],
**cat_scorer._kwargs)
cat_score.append(s)
cat_model_scores.append(cat_score)
# Note: this is typically how to call the scorer (but we hacked to avoid multiple prediction)
# score = scorer(model_tmp, X_test, Y_test_sub)
entries.append(subsample_size)
if scoring is not None:
scores.append(model_scores)
if cat_scoring is not None:
cat_scores.append(cat_model_scores)
entries = np.array(entries)
if scoring is not None:
scores = np.array(scores).T
if cat_scoring is not None:
cat_scores = np.array(cat_scores).T
return (entries, scores, cat_scores)
def multimodel_sample_size_dependence_graph(models, model_names, datasets, labels, sample_fracs, scoring=None,
cat_scoring=None):
"""Create graph of performance metric vs training sample size
Fractions of data (*sample_fracs*) are randomly sampled from the training dataset
and used to train the models, which are always evaluated on the full testing datasets.
:param models: list of topic labelling models
:param model_names: list of model names
:param datasets: list of input data for models (each is (training, testing) tuple)
:param labels: tuple (train, test) of ground truth topic labels (one-hot format)
:param sample_fracs: list of sample fractions to scan
:param scoring: sklearn scorer or scoring name for topic averaged metric
:param cat_scoring: sklearn scorer or scoring name for individual topic metric
:return: tuple (figure, major axis, minor axes (flat list), minor axes (2D list))
"""
(entries, scores, cat_scores) = get_multimodel_sample_size_dependence(
models, datasets, labels, sample_fracs, scoring=scoring, cat_scoring=cat_scoring)
# set figure configuration
if scoring is None:
maj_w = maj_h = None
else:
maj_w = 3
maj_h = 2
if cat_scoring is None:
ncat = 0
else:
ncat = cat_scores.shape[0]
plt.close('all')
(f, ax_maj, ax_min, ax_min_ij) = create_awesome_plot_grid(
ncat, maj_w=maj_w, maj_h=maj_h, min_xlabel="Train sample size", min_ylabel="Score")
# plot main figure
if scoring:
ax = ax_maj
for j in range(len(models)):
ax.plot(entries, scores[j], color=COLORS[j], label=model_names[j])
ax.set_title("Overall", pad=25)
ax.legend()
# plot grid with categories
if cat_scoring:
# get category sample fractions
categories = labels[0].columns
cfracs = np.array([np.sum(labels[0][cat]) / len(labels[0]) for cat in categories])
# sort categories by size
order = np.argsort(cfracs)[::-1]
categories = categories[order]
cfracs = cfracs[order]
cat_scores = cat_scores[order]
# plot subfigs
for i in range(len(categories)):
ax = ax_min[i]
for j in range(len(models)):
ax.plot(entries, cat_scores[i, j], color=COLORS[j], label=model_names[j])
pad = 25 if ax.i == 0 else None
ax.set_title("{} ({:.1f}% frac)".format(categories[i], 100. * cfracs[i]), pad=pad)
if not scoring: ax_min[0].legend()
return (f, ax_maj, ax_min, ax_min_ij)
def topic_correlation_matrix(Y):
"""Create MxM correlation matrix for M topics
Each column represents a given ground truth topic label.
Each row represents the relative frequency with which other
ground truth labels co-occur.
:param Y: ground truth topic labels (one-hot format)
:return: tuple (figure, axis)
"""
d = np.array([np.sum(Y[Y[t]], axis=0) / np.sum(Y[t]) for t in Y.columns]) * 100
d = d.T
df = pd.DataFrame(d, columns=Y.columns)
df['topic'] = Y.columns
df = df.set_index('topic')
fig, ax = plt.subplots(figsize=(11, 11))
graph = sns.heatmap(df, annot=True, fmt=".0f", cbar=False, cmap="Blues", linewidths=0.2)
ax.xaxis.tick_top() # x axis on top
ax.xaxis.set_label_position('top')
ax.set_xlabel('Chosen label')
ax.set_ylabel('Coincidence of other labels with chosen label [%]')
_ = plt.xticks(rotation=90)
return (fig, ax)
def topic_migration_matrix(Y_true, Y_pred):
"""Create MxM migration matrix for M topics
Each column represents a given ground truth topic label.
Each row represents the relative frequency with which
predicted labels are assigned.
:param Y_true: ground truth topic labels (one-hot format)
:param Y_pred: topic predictions (one-hot format)
:return: tuple (figure, axis)
"""
d = np.array([np.sum(Y_pred[Y_true[t]], axis=0) / np.sum(Y_true[t]) for t in Y_true.columns]) * 100
d = d.T
df =
|
pd.DataFrame(d, columns=Y_true.columns)
|
pandas.DataFrame
|
# 数据处理
import numpy as np
import pandas as pd
# 绘图
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# 各种模型、数据处理方法
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import precision_score
import warnings
data = r'E:\OpenSourceDatasetCode\Dataset\Titiannic Disaster'
train_df = pd.read_csv(data + r'\train.csv')
test_df = pd.read_csv(data + r'\test.csv')
combine_df = pd.concat([train_df, test_df])
# NameLength
train_df.groupby(train_df.Name.apply(lambda x: len(x)))['Survived'].mean().plot()
combine_df['Name_Len'] = combine_df['Name'].apply(lambda x: len(x))
combine_df['Name_Len'] = pd.qcut(combine_df['Name_Len'], 5)
combine_df.groupby(combine_df['Name'].apply(lambda x: x.split(', ')[1]).apply(lambda x: x.split('.')[0]))[
'Survived'].mean().plot()
# Title
combine_df['Title'] = combine_df['Name'].apply(lambda x: x.split(', ')[1]).apply(lambda x: x.split('.')[0])
combine_df['Title'] = combine_df['Title'].replace(
['Don', 'Dona', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir', 'Dr'], 'Mr')
combine_df['Title'] = combine_df['Title'].replace(['Mlle', 'Ms'], 'Miss')
combine_df['Title'] = combine_df['Title'].replace(['the Countess', 'Mme', 'Lady', 'Dr'], 'Mrs')
df = pd.get_dummies(combine_df['Title'], prefix='Title')
combine_df = pd.concat([combine_df, df], axis=1)
combine_df['Fname'] = combine_df['Name'].apply(lambda x: x.split(',')[0])
combine_df['Familysize'] = combine_df['SibSp'] + combine_df['Parch']
# 有女性死亡的家庭
dead_female_Fname = list(set(combine_df[(combine_df.Sex == 'female') & (combine_df.Age >= 12)
& (combine_df.Survived == 0) & (combine_df.Familysize >= 1)]['Fname'].values))
# 有男性存活的家庭
survive_male_Fname = list(set(combine_df[(combine_df.Sex == 'male') & (combine_df.Age >= 12)
& (combine_df.Survived == 1) & (combine_df.Familysize >= 1)]['Fname'].values))
combine_df['Dead_female_family'] = np.where(combine_df['Fname'].isin(dead_female_Fname), 0, 1)
combine_df['Survive_male_family'] = np.where(combine_df['Fname'].isin(survive_male_Fname), 0, 1)
# Name->Title
combine_df = combine_df.drop(['Name', 'Fname'], axis=1)
# 添加一个小孩子标签
group = combine_df.groupby(['Title', 'Pclass'])['Age']
combine_df['Age'] = group.transform(lambda x: x.fillna(x.median()))
combine_df = combine_df.drop('Title', axis=1)
combine_df['IsChild'] = np.where(combine_df['Age'] <= 12, 1, 0)
combine_df['Age'] = pd.cut(combine_df['Age'], 5)
combine_df = combine_df.drop('Age', axis=1)
# 将上面提取过的Familysize再离散化
combine_df['Familysize'] = np.where(combine_df['Familysize'] == 0, 'ALone',
np.where(combine_df['Familysize'] <= 3, 'Normal', 'Big'))
df = pd.get_dummies(combine_df['Familysize'], prefix='Familysize')
combine_df = pd.concat([combine_df, df], axis=1).drop(['SibSp', 'Parch', 'Familysize'], axis=1)
# ticket
combine_df['Ticket_Lett'] = combine_df['Ticket'].apply(lambda x: str(x)[0])
combine_df['Ticket_Lett'] = combine_df['Ticket_Lett'].apply(lambda x: str(x))
combine_df['High_Survival_Ticket'] = np.where(combine_df['Ticket_Lett'].isin(['1', '2', 'P']), 1, 0)
combine_df['Low_Survival_Ticket'] = np.where(combine_df['Ticket_Lett'].isin(['A', 'W', '3', '7']), 1, 0)
combine_df = combine_df.drop(['Ticket', 'Ticket_Lett'], axis=1)
# 缺省的Embarked用S填充
combine_df.Embarked = combine_df.Embarked.fillna('S')
df = pd.get_dummies(combine_df['Embarked'], prefix='Embarked')
combine_df = pd.concat([combine_df, df], axis=1).drop('Embarked', axis=1)
# Cabin
combine_df['Cabin_isNull'] = np.where(combine_df['Cabin'].isnull(), 0, 1)
combine_df = combine_df.drop('Cabin', axis=1)
# Pclass
df = pd.get_dummies(combine_df['Pclass'], prefix='Pclass')
combine_df = pd.concat([combine_df, df], axis=1).drop('Pclass', axis=1)
# Sex
df =
|
pd.get_dummies(combine_df['Sex'], prefix='Sex')
|
pandas.get_dummies
|
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is part of a time series analysis and strategy testing tool
#Kth fold optimization using RSI indicator as a signal
#Define function
def KthFoldRSIDecisionOptimizer(Asset, NumIterations):
#Import modules
import numpy as np
import pandas as pd
import time as t
import random as rand
#Number of iterations for brute force optimization
iterations = range(0,NumIterations)
#Variable assignments
Empty = []
Counter = 0
DataSet = pd.DataFrame()
start = t.time()
for i in iterations:
Counter = Counter + 1
print(Counter)
a = 1 - (rand.random() * 3)
b = 1 - (rand.random() * 3)
Asset['Regime'] = np.where(Asset['AggregateDecision'] > a, 1 , 0)
Asset['Regime'] = np.where(Asset['AggregateDecision'] < b, -1, Asset['Regime'])
Asset['Strategy'] = Asset['Regime'].shift(1)*Asset['LogRet']
Asset['Strategy'] = Asset['Strategy'].fillna(0)
if Asset['Strategy'].std() == 0:
continue
Asset['Sharpe'] = Asset['Strategy'].mean()/Asset['Strategy'].std()
if Asset['Sharpe'][-1] < -.01:
continue
Asset['Multiplier'] = Asset['Strategy'].cumsum().apply(np.exp)
Empty.append(a)
Empty.append(b)
#May want to optimize for max return instead of Sharpe
Empty.append(Asset['Sharpe'][-1])
Empty.append(Asset['Multiplier'][-1])
emptyseries =
|
pd.Series(Empty)
|
pandas.Series
|
from typing import Optional
from dataclasses import dataclass
import pandas as pd
from poker.base import unique_values, native_mean, running_mean, running_std, running_median, running_percentile
from poker.document_filter_class import DocumentFilter
pd.set_option('use_inf_as_na', True)
def _ts_concat(dic: dict, index_lst: list) -> pd.DataFrame:
"""Concat a dict of dicts or pd.DataFrames"""
lst_df = []
for key, val in dic.items():
if type(val) != pd.DataFrame:
val = pd.DataFrame(val, index=index_lst)
val.columns = [key + ' ' + col if col != '' else key for col in val.columns]
else:
val.columns = [key]
lst_df.append(val)
final_df = pd.concat(lst_df, axis=1).reset_index()
return final_df
def _ts_hand(data: pd.DataFrame) -> pd.DataFrame:
"""Build Hand related data"""
pos_dic = {'Pre Flop': 0.25, 'Post Flop': 0.50, 'Post Turn': 0.75, 'Post River': 1.0}
# Game Id
g_i_df = pd.DataFrame(data.groupby('Start Time')['Game Id'].last())
g_i_df.columns = ['']
# Time in Hand
t_h_df = pd.DataFrame(data.groupby('Start Time')['Seconds into Hand'].last())
t_h_df.columns = ['']
# Last Position
last_position = data.groupby('Start Time')['Position'].last().tolist()
l_p_df = pd.DataFrame([pos_dic[item] for item in last_position], index=t_h_df.index, columns=[''])
# Win
r_w_p = data.groupby('Start Time')['Win'].last().tolist()
r_w_p = [1 if item is True else 0 for item in r_w_p]
r_w_p_df = pd.DataFrame(running_mean(data=r_w_p, num=5), index=t_h_df.index, columns=[''])
ind_lst = data.groupby('Start Time').last().index.tolist()
lst_dic = {'Seconds per Hand': t_h_df, 'Last Position in Hand': l_p_df, 'Rolling Win Percent': r_w_p_df,
'Game Id': g_i_df}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_position(data: pd.DataFrame) -> pd.DataFrame:
"""Build position related data"""
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
p_bet = {'Pre Flop': [], 'Post Flop': [], 'Post Turn': [], 'Post River': []}
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
t_p_bet[row['Position']] += row['Bet Amount']
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Position Bet': p_bet, 'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_class_counts_seconds(data: pd.DataFrame) -> pd.DataFrame:
"""Build class, counts, and seconds data"""
# Bet, Count, and Time Per Position
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
pos_lst = ['Pre Flop', 'Post Flop', 'Post Turn', 'Post River']
class_lst, short_class_lst = ['Checks', 'Calls', 'Raises'], ['Calls', 'Raises']
c_count = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_seconds = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_bet = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_pot = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_chips = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_pos, t_bet, t_class, t_second = row['Position'], row['Bet Amount'], row['Class'], row['Seconds']
t_key = t_pos + ' ' + t_class
t_c_count[t_key] += 1
if t_c_seconds[t_key] is not None:
t_c_seconds[t_key] = native_mean(data=[t_c_seconds[t_key]] + [t_second])
else:
t_c_seconds[t_key] = t_second
if t_class != 'Checks':
if t_c_bet[t_key] is not None:
t_c_bet[t_key] = native_mean(data=[t_c_bet[t_key]] + [t_bet])
else:
t_c_bet[t_key] = t_bet
bet_pot_per = t_bet / (row['Pot Size'] - t_bet)
if t_c_bet_per_pot[t_key] is not None:
t_c_bet_per_pot[t_key] = native_mean(data=[t_c_bet_per_pot[t_key]] + [bet_pot_per])
else:
t_c_bet_per_pot[t_key] = bet_pot_per
bet_chip_per = t_bet / (row['Player Current Chips'] + t_bet)
if t_c_bet_per_chips[t_key] is not None:
t_c_bet_per_chips[t_key] = native_mean(data=[t_c_bet_per_chips[t_key]] + [bet_chip_per])
else:
t_c_bet_per_chips[t_key] = bet_chip_per
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Class Count': c_count, 'Class Seconds': c_seconds, 'Class Bet': c_bet,
'Class Bet Percent of Pot': c_bet_per_pot, 'Class Bet Percent of Chips': c_bet_per_chips,
'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
@dataclass
class TSanalysis:
"""
Calculate Time Series stats for a player.
:param data: Input DocumentFilter.
:type data: DocumentFilter
:param upper_q: Upper Quantile percent, default is 0.841. *Optional*
:type upper_q: float
:param lower_q: Lower Quantile percent, default is 0.159. *Optional*
:type lower_q: float
:param window: Rolling window, default is 5. *Optional*
:type window: int
:example:
>>> from poker.time_series_class import TSanalysis
>>> docu_filter = DocumentFilter(data=poker, player_index_lst=['DZy-22KNBS'])
>>> TSanalysis(data=docu_filter)
:note: This class expects a DocumentFilter with only one player_index used.
"""
def __init__(self, data: DocumentFilter, upper_q: Optional[float] = 0.841, lower_q: Optional[float] = 0.159,
window: Optional[int] = 5):
self._docu_filter = data
self._window = window
self._upper_q = upper_q
self._lower_q = lower_q
self._df = data.df
hand_df = _ts_hand(data=self._df)
self._hand = hand_df.copy()
position_df = _ts_position(data=self._df)
self._position = position_df.copy()
class_df = _ts_class_counts_seconds(data=self._df)
self._class = class_df.copy()
hand_cols, hand_ind = hand_df.columns, hand_df.index
self._hand_mean = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_std = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_median = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_upper_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_lower_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
for col in hand_cols:
if col not in ['Game Id', 'index', 'Start Time']:
self._hand_mean[col] = running_mean(data=hand_df[col], num=self._window)
self._hand_std[col] = running_std(data=hand_df[col], num=self._window)
self._hand_median[col] = running_median(data=hand_df[col], num=self._window)
self._hand_upper_q[col] = running_percentile(data=hand_df[col], num=self._window, q=upper_q)
self._hand_lower_q[col] = running_percentile(data=hand_df[col], num=self._window, q=lower_q)
pos_cols, pos_ind = position_df.columns, position_df.index
self._position_mean =
|
pd.DataFrame(columns=pos_cols, index=pos_ind)
|
pandas.DataFrame
|
"""
Author: <NAME>
File: model_genderid.py
Description: calls functions in data_processing to init data. runs training and testing on data.
"""
from sklearn.model_selection import cross_val_score, cross_val_predict, StratifiedKFold
from sklearn.metrics import accuracy_score, classification_report,confusion_matrix
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential, load_model
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.layers import Dense
import matplotlib.pyplot as plt
import data_processing as data
import pandas as pd
import numpy as np
import os
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
dataset = data.getData() #instance
X, Y, p = dataset.get()
X = X.reshape(6733, 430)
# baseline model
def create_baseline():
# create model
model = Sequential()
model.add(Dense(430, input_dim=430, kernel_initializer='normal', activation='relu'))
model.add(Dense(215, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model= create_baseline()
#model = model.load_weights(os.path.join('saved_models', 'gen_kcv_.h5'))
# evaluate baseline model with standardized dataset
#np.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=1)))
pipeline = Pipeline(estimators)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
predicted = cross_val_predict(pipeline, X, Y, cv=kfold)
df_predicted =
|
pd.DataFrame(predicted)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.