prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Test various functions regarding chapter 4: Sampling (Bootstrapping, Concurrency).
"""
import unittest
import numpy as np
import pandas as pd
from mlfinlab.sampling.bootstrapping import seq_bootstrap, get_ind_matrix, get_ind_mat_average_uniqueness, \
_bootstrap_loop_run, get_ind_mat_label_uniqueness # pylint: disable=protected-access
from mlfinlab.sampling.concurrent import get_av_uniqueness_from_triple_barrier, num_concurrent_events
def book_ind_mat_implementation(bar_index, label_endtime):
"""
Book implementation of get_ind_matrix function
"""
ind_mat = pd.DataFrame(0, index=bar_index, columns=range(label_endtime.shape[0]))
for i, (start, end) in enumerate(label_endtime.iteritems()):
ind_mat.loc[start:end, i] = 1.
return ind_mat
class TestSampling(unittest.TestCase):
"""
Test Triple barrier, meta-labeling, dropping rare labels, and daily volatility.
"""
def setUp(self):
"""
Set samples_info_sets (t1), price bars
"""
self.price_bars = pd.Series(index= | pd.date_range(start="1/1/2018", end='1/8/2018', freq='H') | pandas.date_range |
"""
Analyze results and plot figures
"""
# Imports
#==============#
import pandas as pd
import numpy as np
import scipy
import random
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import bioinf
# Plots for HMM method 5-fold cross validation
#===============================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
legend_font = {'family':fnt, 'size':'12'}
label_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
ec = 'black'
legend_label = ['CBH', 'EG']
# SwissProt Dataset
ex = pd.read_csv('results_final/swiss_kfold.csv')
lw = 0.8
out1 = plt.bar(range(30), ex.diff_score[:30], color='blue',
linewidth=lw, edgecolor=ec)
out2 = plt.bar(range(30,44), ex.diff_score[30:], color='red',
linewidth=lw, edgecolor=ec)
pltout = [x[0] for x in [out1, out2]]
plt.xlabel('Sequence', **label_font)
plt.ylabel('Score difference', **label_font)
plt.xticks(**ticks_font)
plt.yticks([-300,-150,0,150,300,450], **ticks_font)
plt.xlim([-0.6,43.6])
plt.axhline(color='black', linewidth=1)
plt.legend(pltout, legend_label, prop=legend_font,
loc='upper right')
plt.tight_layout()
plt.savefig('plots/swiss_kfold.pdf')
plt.close()
# NCBI dataset
ex = pd.read_csv('results_final/ncbi_kfold.csv')
lw = 0.15
cbhs = list(ex.diff_score[:291])
egs = list(ex.diff_score[291:])
random.shuffle(cbhs)
random.shuffle(egs)
out1 = plt.bar(range(291), cbhs, color='blue', linewidth=lw,
edgecolor='blue')
out2 = plt.bar(range(291,427), egs, color='red', linewidth=lw,
edgecolor='red')
pltout = [x[0] for x in [out1, out2]]
plt.xlabel('Sequence', **label_font)
plt.ylabel('Score difference', **label_font)
plt.xticks(**ticks_font)
plt.yticks([-300,-150,0,150,300,450], **ticks_font)
plt.xlim([-1,428])
plt.axhline(color='black', linewidth=1)
plt.legend(pltout, legend_label, prop=legend_font,
loc='upper right')
plt.tight_layout()
plt.savefig('plots/ncbi_kfold.pdf')
plt.close()
# Pymol commands for loop positions in TreCel7A and TreCel7B
#==============================================================#
# Cel7A
loopstart = [98, 399, 369, 383, 51, 194, 244, 339]
length = [5,13,5,10,6,8,10,4]
cel7a_start = list(loopstart)
cel7a_stop = [loopstart[i] + length[i] - 1 for i in range(8)]
cel7a_pymol = 'select cel7a_loops, '
for i in range(8):
cel7a_pymol += f'resi {cel7a_start[i]}-{cel7a_stop[i]} or '
# Cel7B
fasta = 'fasta/structure_based_alignment/structure6_mafft.fasta'
heads, seqs = bioinf.split_fasta(fasta)
seq7a_msa, seq7b_msa = seqs[0], seqs[3]
seq7a, seq7b = seq7a_msa.replace('-', ''), seq7b_msa.replace('-','')
msastart = [bioinf.resid_to_msa(seq7a_msa, x-1) for x in cel7a_start]
msastop = [bioinf.resid_to_msa(seq7a_msa, x-1) for x in cel7a_stop]
cel7b_start = [bioinf.msa_to_resid(seq7b_msa, x) for x in msastart]
cel7b_stop = [bioinf.msa_to_resid(seq7b_msa, x+1) for x in msastop]
cel7b_pymol = 'select cel7b_loops, '
for i in range(8):
cel7b_pymol += f'resi {cel7b_start[i] + 1}-{cel7b_stop[i]} or '
# Write
with open('plots/loops_pymol.txt', 'w') as pymol:
pymol.write(cel7a_pymol[:-4] + '\n\n')
pymol.write(cel7b_pymol[:-4])
# Pymol selection command to visualize rules on structure
#=========================================================#
pymol_positions = 'select rules, ('
for pos in positions:
pymol_positions += f'resi {pos} or '
pymol_positions = pymol_positions[:-4]
pymol_positions += ') and name ca'
with open('plots/rules_pymol.txt', 'w') as txt:
txt.write(pymol_positions)
# Table for ML subtype performance
#=====================================#
mlkeys = ['dec', 'svm', 'knn', 'log']
features = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'all8']
store2 = []
columns = []
for key in mlkeys:
excel = pd.read_csv(f'results_final/ml_subtype_pred/{key}.csv', index_col=0)
sens_store, spec_store, acc_store = [], [], []
columns.extend([key + '_sens', key + '_spec', key + '_acc'])
for i in range(len(features)):
sens_store.append(str(round(excel.sens_mean[i], 1)) + ' ± ' + \
str(round(excel.sens_std[i], 1)))
spec_store.append(str(round(excel.spec_mean[i], 1)) + ' ± ' + \
str(round(excel.spec_std[i], 1)))
acc_store.append(str(round(excel.acc_mean[i], 1)) + ' ± ' + \
str(round(excel.acc_std[i], 1)))
store2.extend([sens_store, spec_store, acc_store])
store2 = pd.DataFrame(store2).transpose()
store2.index = features
store2.columns = columns
store2.to_csv('plots/ml_subtype_table.csv')
# Plot MCC values for subtype prediction with ML
#===================================================#
# Variables
mlkeys = ['dec', 'log', 'knn', 'svm']
labels = ['Decision tree', 'Logistic regression', 'KNN', 'SVM']
features = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'All-8']
colors = ['goldenrod', 'magenta', 'cadetblue', 'red']
# Plot specifications
fnt = 'Arial'
ticks_font = {'fontname':fnt, 'size':'14'}
legend_font = {'family':fnt, 'size':'11'}
label_font = {'family':fnt, 'size':'18'}
plt.rcParams["figure.figsize"] = [11,3]
plt.rcParams['grid.alpha'] = 0.5
for i,key in zip(range(len(mlkeys)), mlkeys):
# Get data
data = pd.read_csv(f'results_final/mcc_data/{key}.csv', index_col=0)
# Boxplot specifications
positions = np.arange(9) * (len(mlkeys) + 3) + i
color = colors[i]
meanprops = {'marker':'o',
'markerfacecolor':color,
'markeredgecolor':'black',
'markersize':2.0,
'linewidth':1.0}
medianprops = {'linestyle':'-',
'linewidth':1.0,
'color':'black'}
boxprops = {'facecolor':color,
'color':'black',
'linewidth':1.0}
flierprops = {'marker':'o',
'markerfacecolor':'black',
'markersize':1,
'markeredgecolor':'black'}
whiskerprops = {'linewidth':1.0}
capprops = {'linewidth':1.0}
# Plot the boxplot
_ = plt.boxplot(
data,
positions=positions,
widths=0.85,#(1, 1, 1),
whis=(0,100), # Percentiles for whiskers
showmeans=False, # Show means in addition to median
patch_artist=True, # Fill with color
meanprops=meanprops, # Customize mean points
medianprops=medianprops, # Customize median points
boxprops=boxprops,
showfliers=False, # Show/hide points beyond whiskers
flierprops=flierprops,
whiskerprops=whiskerprops,
capprops=capprops
)
# Plot dummy scatter points for legend
for i in range(len(mlkeys)):
plt.bar([100], [100], color=colors[i], label=labels[i], edgecolor='black',
linewidth=0.5)
# Specifications
plt.legend(frameon=1, numpoints=1, shadow=0, loc='best',
prop=legend_font)
plt.xticks(np.arange(9) * 7 + 1.5, features, **ticks_font)
plt.yticks(**ticks_font)
plt.ylabel('MCC', **label_font)
plt.ylim((-1.1, 1.1))
plt.xlim((-1,61))
plt.tight_layout()
# Save plot
plt.savefig('plots/mcc_boxwhiskerplot.pdf')
plt.show(); plt.close()
# Plots for outlier detection
#===============================#
looplength = pd.read_csv('results_final/looplength.csv', index_col=0)
subtype = pd.read_csv('results_final/cel7_subtypes.csv', index_col=0)['ncbi_pred_class']
looplength.index = range(len(looplength))
subtype.index = range(len(subtype))
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
label_font = {'family':fnt, 'size':'22'}
legend_font = {'family':'Arial', 'size':'14'}
title_font = {'family':fnt, 'size':'30'}
plt.rcParams['figure.figsize'] = [6,4]
# View the distribution to intuitively determine outliers
maxlength = [14, 20, 25, 16, 52, 141, 50, 14] # Values equal or greater than are outliers
topcode_vals = [] # Change the outlier values to top-coded values
for i in range(8):
sortedvals = sorted(looplength.iloc[:,i])
maxval = maxlength[i]
topcode_vals.append(sortedvals[sortedvals.index(maxval) - 1])
color = ['blue' if x<maxval else 'red' for x in sortedvals]
loop = looplength.columns[i]
plt.scatter(range(len(looplength)), sortedvals, color=color,
marker='o')
plt.xticks(**ticks_font)
plt.yticks(**ticks_font)
plt.xlabel('Index', **label_font)
plt.ylabel('Length', **label_font)
plt.title(loop, **title_font)
plt.tight_layout()
#plt.savefig(f'plots/outlier_detection/{loop}.pdf')
plt.show()
plt.close()
# Cap outliers
looplength = looplength.iloc[:,:-1]
for i in range(len(looplength.columns)):
vals = list(looplength.iloc[:,i])
vals = [x if x<maxlength[i] else topcode_vals[i] for x in vals]
looplength.iloc[:,i] = pd.Series(vals)
# Plot loop lengths (box/whisker plot)
#=======================================#
# Get data
cbh_looplength = looplength.iloc[subtype[subtype==1].index]
eg_looplength = looplength.iloc[subtype[subtype==0].index]
data = [cbh_looplength, eg_looplength]
labels = ['CBH', 'EG']
colors = ['lightblue', 'pink']
# Plot specifications
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'16'}
label_font = {'family':fnt, 'size':'18'}
legend_font = {'family':'Arial', 'size':'12'}
title_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
plt.rcParams['grid.alpha'] = 0.3
plt.rcParams['axes.axisbelow'] = True
legend_label = ['CBH', 'EG']
for i in range(2):
positions = np.arange(8) * (len(data) + 1) + i
color = colors[i]
medianprops = {'linestyle':'-',
'linewidth':1.0,
'color':'black'}
boxprops = {'facecolor':color,
'color':'black',
'linewidth':1.0}
flierprops = {'marker':'o',
'markerfacecolor':'black',
'markersize':1,
'markeredgecolor':'black'}
whiskerprops = {'linewidth':1.0}
capprops = {'linewidth':1.0}
# Plot the boxplot
_ = plt.boxplot(
data[i],
positions=positions,
widths=0.75,#(1, 1, 1),
whis=(0,100), # Percentiles for whiskers
showmeans=False, # Show means in addition to median
patch_artist=True, # Fill with color
meanprops=meanprops, # Customize mean points
medianprops=medianprops, # Customize median points
boxprops=boxprops,
showfliers=False, # Show/hide points beyond whiskers
flierprops=flierprops,
whiskerprops=whiskerprops,
capprops=capprops
)
# Plot dummy scatter points for legend
for i in range(2):
plt.bar([100], [100], color=colors[i], label=labels[i], edgecolor='black',
linewidth=1.0)
# Plot specifications
plt.legend(frameon=1, numpoints=1, shadow=0, loc='upper center',
prop=legend_font)
plt.xticks(np.arange(8) * 3 + 0.5, cbh_looplength.columns, **ticks_font)
plt.yticks(np.arange(-4, 24, step=4), **ticks_font)
plt.ylabel('Number of residues', **label_font)
plt.ylim((-0.5, 22))
plt.xlim((-1,23))
plt.tight_layout()
plt.savefig('plots/looplength_boxwhiskerplot.pdf')
plt.show(); plt.close()
# Plot relative standard deviation
#===================================#
mean = np.mean(looplength, axis=0)
std = np.std(looplength, axis=0)
cov = std/mean*100
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'14'}
label_font = {'family':fnt, 'size':'15'}
plt.rcParams['figure.figsize'] = [6,3]
lw=1.3
plt.bar(range(len(cov)), cov, color='brown', linewidth=lw,
edgecolor='black')
plt.xticks(range(len(cov)), cov.index, **ticks_font)
plt.yticks([20,40,60,80,100], **ticks_font)
plt.xlim([-0.45,7.45])
plt.ylim([0,80])
plt.ylabel('Relative standard deviation (%)', **label_font)
plt.tight_layout()
plt.savefig('plots/rsd.pdf')
# Density plots of loop lengths
#=============================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
label_font = {'family':fnt, 'size':'22'}
legend_font = {'family':'Arial', 'size':'14'}
title_font = {'family':fnt, 'size':'30'}
plt.rcParams['figure.figsize'] = [6.5, 5]
bw = 0.5 # Change this to control the steepness of the density kernel function
xmin = [-0.5, -0.5, -0.5, -0.5, -0.5, -1.0, -0.5, -0.6]
xmax = [10, 16, 8, 13, 10, 11, 14, 8]
ymax = [0.5, 0.8, 0.8, 0.7, 0.8, 0.9, 0.5, 0.8]
legend_label = ['CBH', 'EG']
for i in range(len(looplength.columns)):
col = looplength.columns[i]
ax1 = sns.kdeplot(cbh_looplength[col], bw=bw, legend=True,
shade=False, color='blue')
ax2 = sns.kdeplot(eg_looplength[col], bw=bw, legend=True,
shade=False, color='red')
ax1.legend(legend_label, loc='best', prop=legend_font)
plt.xticks(**ticks_font)
plt.yticks(np.arange(0,11,2)*0.1, **ticks_font)
plt.xlim((0, xmax[i]))
plt.ylim((0,ymax[i]))
plt.title(col, **title_font)
plt.xlabel('Number of residues', **label_font)
plt.ylabel('Density', **label_font)
plt.tight_layout()
plt.savefig(f'plots/density_plots/{col}.pdf')
plt.show()
plt.close()
# Heatmap of loop length correlation
#====================================#
p_corr, s_corr = [], [] # Pearson's and Spearman's correlation coefficients
for i in range(len(looplength.columns)):
corr_p, corr_s = [], []
for k in range(len(looplength.columns)):
corr_p.append(np.corrcoef(looplength.iloc[:,i],
looplength.iloc[:,k])[0][1])
corr_s.append(scipy.stats.spearmanr(looplength.iloc[:,i],
looplength.iloc[:,k])[0])
p_corr.append(corr_p)
s_corr.append(corr_s)
p_corr = pd.DataFrame(p_corr)
s_corr = pd.DataFrame(s_corr)
p_corr.index = looplength.columns
p_corr.columns = looplength.columns
s_corr.index = looplength.columns
s_corr.columns = looplength.columns
sns.set(font='Arial', font_scale=0.6)
cluster = sns.clustermap(p_corr, cmap='Reds', metric='euclidean',
method='average', figsize=(3.15,3.15),
annot=True, fmt='.2f', annot_kws={'size':6})
cluster.savefig('plots/looplength_corr.pdf')
# Table of classification/association rules
#===========================================#
from subtype_rules import Cel7MSA
cbhmsa = 'fasta/trecel7a_positions_only/cbh_cat.fasta'
eglmsa = 'fasta/trecel7a_positions_only/egl_cat.fasta'
cel7msa = Cel7MSA(cbhmsa, eglmsa)
cel7msa.get_freq(include_gaps=True)
rules = pd.read_csv('results_final/rules/rules_all.csv', index_col=0)
rules_amino = pd.read_csv('results_final/rules/rules_amino.csv', index_col=0)
rules_type = pd.read_csv('results_final/rules/rules_type.csv', index_col=0)
mcc = list(rules.mcc)
min_mcc = np.percentile(mcc, 95) # mcc > 0.73
rules_mcc = rules[rules.mcc >= min_mcc]
rules_amino_mcc = rules_amino[rules_amino.mcc >= min_mcc] # 45 rules
rules_type_mcc = rules_type[rules_type.mcc >= min_mcc] # 45 rules
positions = sorted(set(rules_mcc.tre_pos)) # 42 positions
rules_mcc.to_csv('results_final/rules/rules_mcc.csv')
rules_amino_mcc.to_csv('results_final/rules/rules_amino_mcc.csv')
rules_type_mcc.to_csv('results_final/rules/rules_type_mcc.csv')
rules_amino_table = rules_amino_mcc.loc[:,['tre_pos','rule', 'closest_subsite',
'dist_subsite','sens', 'spec', 'acc', 'mcc']]
rules_amino_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_amino_table.to_csv('plots/rules_amino_table.csv')
rules_type_table = rules_type_mcc.loc[:,['tre_pos','rule', 'closest_subsite',
'dist_subsite', 'sens', 'spec', 'acc', 'mcc']]
rules_type_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_type_table.to_csv('plots/rules_type_table.csv')
# Plot Histogram for MCC of rules
#=================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'20'}
label_font = {'family':fnt, 'size':'22'}
title_font = {'family':fnt, 'size':'24'}
plt.rcParams['figure.figsize'] = [6,3.5]
plt.rcParams['grid.alpha'] = 0.5
plt.rcParams['axes.axisbelow'] = True
weights = np.zeros_like(mcc) + 1/len(mcc)
plt.hist(mcc, bins=12, rwidth=1, color='darkgreen', weights=weights)
plt.xticks(np.arange(-80,101,40)*0.01, **ticks_font)
plt.yticks(np.arange(0,28,5)*0.01, **ticks_font)
plt.xlabel('MCC', **label_font)
plt.ylabel('Relative frequency', **label_font)
plt.tight_layout()
plt.savefig('plots/rules_mcc_dist.pdf')
# Minimum distance between rules' positions and substrate
#============================================================#
dist50 = np.percentile(rules_mcc.dist_subsite, 50) #4.79Å
rule_dist = list(rules_mcc.dist_subsite)
weights = np.zeros_like(rule_dist) + 1/len(rule_dist)
plt.hist(rule_dist, bins=7, weights=weights, color='indigo')
plt.xticks(np.arange(0,30,5), **ticks_font)
plt.xlim((0,25))
plt.yticks(**ticks_font)
plt.xlabel('Distance to substrate (Å)', **label_font)
plt.ylabel('Relative frequency', **label_font)
plt.tight_layout()
plt.savefig('plots/rules_distance_dist.pdf')
# Distribution at position 40
#===========================#
cel7msa.site_plot(site=40, savefig=True, savepath='plots/position_distribution')
# Distribution at 42 important positions
#==========================================#
plt.rcParams['figure.figsize'] = [7,4]
for i in range(len(positions)):
cel7msa.site_plot(site=positions[i], savefig=True,
savepath='plots/position_distribution')
# Aromatic residues within 6Å of substrate (and consensus CBH and EGL)
#==============================================================================#
cel7msa.get_consensus_sequences()
cbh_consensus = list(cel7msa.consensus_cbh)
egl_consensus = list(cel7msa.consensus_egl)
tre = bioinf.split_fasta('fasta/trecel7a_positions_only/consensus.fasta')[1][1]
excel = pd.read_csv('results_final/residue_distances.csv', index_col=0)
closest_subsite = list(excel.iloc[:,0])
distances = list(excel.iloc[:,1])
resid_aro, tre_aro, cbh_aro, egl_aro, closest_subsite_aro, dist_aro = [],[],[],[],[],[]
cbh_aro_freq, egl_aro_freq, conserved = [], [], []
aro_res = ['F', 'W', 'Y', 'H']
for i in range(len(tre)):
if (tre[i] in aro_res or cbh_consensus[i] in aro_res or egl_consensus[i] in aro_res)\
and distances[i]<=6.0:
resid_aro.append(i+1)
tre_aro.append(tre[i])
cbh_aro.append(cbh_consensus[i])
egl_aro.append(egl_consensus[i])
closest_subsite_aro.append(closest_subsite[i])
dist_aro.append(distances[i])
cbh_freq = cel7msa.cbh_freq.iloc[[4,6,18,19],i].sum()*100
egl_freq = cel7msa.egl_freq.iloc[[4,6,18,19],i].sum()*100
cbh_aro_freq.append(cbh_freq)
egl_aro_freq.append(egl_freq)
if cbh_freq > 66 and egl_freq < 66:
conserved.append('CBH')
elif cbh_freq < 66 and egl_freq > 66:
conserved.append('EG')
elif cbh_freq > 66 and egl_freq > 66:
conserved.append('CBH and EG')
else:
conserved.append('None')
store = pd.DataFrame([resid_aro, tre_aro, cbh_aro, egl_aro, cbh_aro_freq, egl_aro_freq,
closest_subsite_aro, dist_aro, conserved]).transpose()
store.columns = ['Position', 'Trecel7A residue', 'CBH consensus residue',
'EG consensus residue', 'Frequency of aromatic residues in CBHs (%)',
'Frequency of aromatic residues in EGs (%)', 'Closest subsite',
'Distance to closest subsite (Å)', 'Aromatic residues conserved (>66%) in']
store = store.sort_values('Closest subsite')
store.to_csv('results_final/aromatic_residues.csv')
# Pymol commands for viewing aromatic residues on structure
#=============================================================#
pymol_cbh = 'select arocbh, '
pymol_both = 'select aroboth, '
for i in range(len(store)):
pos = store.iloc[i,0]
if store.iloc[i,-1]=='CBH':
pymol_cbh += f'resi {pos} or '
elif store.iloc[i,-1]=='CBH and EG':
pymol_both += f'resi {pos} or '
with open('plots/aromatic_pymol.txt', 'w') as pym:
pym.write(pymol_cbh[:-4] + '\n\n')
pym.write(pymol_both[:-4] + '\n\n')
# Plot feature importances for CBM prediction (All 5933 features)
#===============================================================================#
ex = pd.read_csv('results_final/ml_cbm_pred/featimp_all.csv', index_col=0)
ex = ex.sort_values('mean', ascending=False)
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'16'}
label_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
color = 'red'
plt.bar(range(len(ex)), list(ex.iloc[:,1]), color=color, linewidth=1, edgecolor=color)
plt.xticks(**ticks_font)
plt.yticks(**ticks_font)
plt.xlabel('Feature No.', **label_font)
plt.ylabel('Gini importance', **label_font)
plt.tight_layout()
plt.savefig('plots/cbm_all_featimp.pdf')
plt.show();plt.close()
# Plot of feature importances of top 20 features
#================================================#
# Get data and sort in descending order of feature importance
ex = pd.read_csv('results_final/ml_cbm_pred/featimp_top20_fulldata.csv', index_col=0)
ex = ex.loc[:,ex.mean(axis=0).sort_values(ascending=False).index]
# Plot specifications
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'16'}
label_font = {'family':fnt, 'size':'20'}
legend_font = {'family':'Arial', 'size':'12'}
title_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
plt.rcParams['axes.axisbelow'] = True
positions = np.arange(ex.shape[1])
color = 'red'
medianprops = {'linestyle':'-',
'linewidth':1.0,
'color':'black'}
boxprops = {'facecolor':color,
'color':'black',
'linewidth':1.0}
flierprops = {'marker':'o',
'markerfacecolor':'black',
'markersize':1,
'markeredgecolor':'black'}
whiskerprops = {'linewidth':1.0}
capprops = {'linewidth':1.0}
# Box and whisker plot
_ = plt.boxplot(
ex,
positions=positions,
widths=0.75,#(1, 1, 1),
whis=(0,100), # Percentiles for whiskers
showmeans=False, # Show means in addition to median
patch_artist=True, # Fill with color
medianprops=medianprops, # Customize median points
boxprops=boxprops,
showfliers=False, # Show/hide points beyond whiskers
flierprops=flierprops,
whiskerprops=whiskerprops,
capprops=capprops
)
# Plot specifications
plt.xticks(np.arange(ex.shape[1]), ex.columns, rotation=90, **ticks_font)
plt.yticks(np.arange(0.0, 0.15, step=0.02), **ticks_font)
plt.ylabel('Gini importance', **label_font)
plt.ylim((-0.005, 0.145))
plt.xlim((-1,20))
plt.tight_layout()
plt.savefig('plots/cbm_top20_featimp_boxwhisker.pdf')
plt.show(); plt.close()
'''
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'16'}
label_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
plt.bar(range(len(ex)), ex.iloc[:,1], color='maroon', linewidth=0.6, edgecolor='black',
yerr=ex.iloc[:,2], ecolor='black', capsize=3)
plt.xticks(range(len(ex)), ex.iloc[:,0], **ticks_font, rotation=90)
plt.yticks(**ticks_font)
plt.xlabel('Features', **label_font)
plt.ylabel('Gini importance', **label_font)
plt.tight_layout()
plt.savefig('plots/cbm_top20_featimp_boxwhisker.pdf')
'''
# Pymol commands for viewing top 20 positions on structure
#===========================================================#
positions = list(ex.iloc[:,0])
positions = [x[1:] for x in positions]
pymol_positions = 'select top20, ('
for pos in positions:
pymol_positions += f'resi {pos} or '
pymol_positions = pymol_positions[:-4]
pymol_positions += ') and protein'
with open('plots/cbm_pymol.txt', 'w') as txt:
txt.write(pymol_positions)
# Table of position-specific rules for predicting CBM
#======================================================#
ex = pd.read_csv('results_final/ml_cbm_pred/position_rules.csv', index_col=0)
table = pd.DataFrame()
table['position'] = [int(x.split()[-1].split('=>')[0][1:]) for x in ex.rule]
table['rule'] = ex.rule
table['sensitivity'] = [round(x, 1) for x in ex.sensitivity]
table['specificity'] = [round(x,1) for x in ex.specificity]
table['accuracy'] = [round(x,1) for x in ex.accuracy]
table['mcc'] = [round(x,2) for x in ex.mcc]
table = table.sort_values('position', ascending=True)
table.to_csv('plots/cbm_rules.csv')
# Pairwise distribution of GH7 loop lengths
#=============================================#
looplength = | pd.read_csv('results_final/looplength.csv', index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import sklearn.mixture
import sklearn.metrics
import math
#classifier function
def classifier(q):
file1 = | pd.read_csv("fileto-train.csv") | pandas.read_csv |
import logging
import pandas as pd
from pandas import DataFrame
from autogluon.core.features.types import R_DATETIME, S_DATETIME_AS_OBJECT
from .abstract import AbstractFeatureGenerator
logger = logging.getLogger(__name__)
class DatetimeFeatureGenerator(AbstractFeatureGenerator):
"""Transforms datetime features into numeric features."""
def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
X_out = self._transform(X)
type_family_groups_special = dict(
datetime_as_int=list(X_out.columns)
)
return X_out, type_family_groups_special
def _transform(self, X: DataFrame) -> DataFrame:
return self._generate_features_datetime(X)
@staticmethod
def get_default_infer_features_in_args() -> dict:
return dict(required_raw_special_pairs=[
(R_DATETIME, None),
(None, [S_DATETIME_AS_OBJECT])
])
# TODO: Improve handling of missing datetimes
def _generate_features_datetime(self, X: DataFrame) -> DataFrame:
X_datetime = DataFrame(index=X.index)
for datetime_feature in self.features_in:
# TODO: Be aware: When converted to float32 by downstream models, the seconds value will be up to 3 seconds off the true time due to rounding error. If seconds matter, find a separate way to generate (Possibly subtract smallest datetime from all values).
X_datetime[datetime_feature] = pd.to_datetime(X[datetime_feature])
X_datetime[datetime_feature] = | pd.to_numeric(X_datetime[datetime_feature]) | pandas.to_numeric |
# Particle release class
# -------------------
# release.py
# part of LADiM
# --------------------
# ----------------------------------
# <NAME> <<EMAIL>>
# Institute of Marine Research
# Bergen, Norway
# ----------------------------------
import logging
import numpy as np
import pandas as pd
from typing import Iterator, List
from netCDF4 import Dataset
from .utilities import ingrid
from .configuration import Config
# from .gridforce import Grid
def mylen(df: pd.DataFrame) -> int:
"""Number of rows in a DataFrame,
A workaround for len() which does not
have the expected behaviour with itemizing,
"""
return df.shape[0] if df.ndim > 1 else 1
# if df.ndim == 1:
# return 1
# else:
# return df.shape[0]
class ParticleReleaser(Iterator):
"""Particle Release Class"""
def __init__(self, config: Config, grid) -> None:
start_time = pd.to_datetime(config["start_time"])
stop_time = pd.to_datetime(config["stop_time"])
logging.info("Initializing the particle releaser")
# Read the particle release file
A = pd.read_csv(
config["particle_release_file"],
names=config["release_format"],
converters=config["release_dtype"],
delim_whitespace=True,
)
# If no mult column, add a column of ones
if "mult" not in config["release_format"]:
A["mult"] = 1
# Use release_time as index
A.index = A["release_time"]
# Conversion from longitude, latitude to grid coordinates
if "X" not in A.columns or "Y" not in A.columns:
if "lon" not in A.columns or "lat" not in A.columns:
logging.critical("Particle release mush have position")
raise SystemExit(3)
# else
X, Y = grid.ll2xy(A["lon"], A["lat"])
A["lon"] = X
A["lat"] = Y
A.rename(columns={"lon": "X", "lat": "Y"}, inplace=True)
# Remove everything after simulation stop time
# A = A[A['release_time'] <= stop_time] # Use < ?
A = A[A.index <= stop_time] # Use < ?
if len(A) == 0: # All release after simulation time
logging.critical("All particles released after similation stop")
raise SystemExit(3)
# Optionally, remove everything outside a subgrid
try:
subgrid: List[int] = config["grid_args"]["subgrid"]
except KeyError:
subgrid = []
if subgrid:
lenA = len(A)
A = A[ingrid(A["X"], A["Y"], subgrid)]
if len(A) < lenA:
logging.warning("Ignoring particle release outside subgrid")
# file_times = A['release_time'].unique()
# TODO: Make a function of continuous -> discrete
# Fill out if continuous release
if config["release_type"] == "continuous":
# Find first effective release time
# i.e. the last time <= start_time
# and remove too early releases
# Can be moved out of if-block?
n = np.sum(A.index <= start_time)
if n == 0:
logging.warning("No particles released at simulation start")
n = 1
# First effective release:
# release_time0 = A.iloc[n-1].release_time
# TODO: Check pandas, better way to delete rows?
times = A["release_time"]
try:
release_time0 = times[times <= pd.to_datetime(start_time)][-1]
except IndexError:
release_time0 = times[0]
A = A[A.index >= release_time0]
# time0 = file_times[0]
# time1 = max(file_times[-1], stop_time)
time0 = A.index[0]
time1 = max(A.index[-1], pd.Timestamp(stop_time))
# time1 = max(A['release_time'][-1], stop_time)
times = np.arange(time0, time1, config["release_frequency"])
# A = A.reindex(times, method='pad')
# A['release_time'] = A.index
# Reindex does not work with non-unique index
I = A.index.unique()
J = pd.Series(I, index=I).reindex(times, method="pad")
M = {i: mylen(A.loc[i]) for i in I}
A = A.loc[J]
# Correct time index
S: List[int] = []
for t in times:
S.extend(M[J[t]] * [t])
A["release_time"] = S
A.index = S
# Remove any new instances before start time
# n = np.sum(A['release_time'] <= start_time)
# if n == 0:
# n = 1
# A = A.iloc[n-1:]
# If first release is early set it to start time
# A['release_time'] = np.maximum(A['release_time'], # tart_time)
# If discrete, there is an error to have only early releases
# OK if warm start
else: # Discrete
if A.index[-1] < start_time and config["start"] == "cold":
logging.error("All particles released before similation start")
raise SystemExit
# We are now discrete,
# remove everything before start time
A = A[A.index >= start_time]
# If warm start, no new release at start time (already accounted for)
if config["start"] == "warm":
A = A[A.index > start_time]
# Compute which timestep the release should happen
timediff = A["release_time"] - config['start_time']
dt = np.timedelta64(config["dt"], 's')
rel_tstep = np.int32(timediff / dt)
# Release times, rounded to nearest time step
self.times = np.unique(config['start_time'] + rel_tstep * dt)
logging.info("Number of release times = {}".format(len(self.times)))
# Compute the release time steps
rel_time = self.times - config["start_time"]
rel_time = rel_time.astype("m8[s]").astype("int")
self.steps = rel_time // config["dt"]
# Make dataframes for each timeframe
# self._B = [x[1] for x in A.groupby('release_time')]
self._B = [x[1] for x in A.groupby(rel_tstep)]
# Read the particle variables
self._index = 0 # Index of next release
self._particle_count = 0 # Particle counter
# Handle the particle variables initially
# TODO: Need a test to check that this iw working properly
pvars = dict()
for name in config["particle_variables"]:
dtype = config["release_dtype"][name]
if dtype == np.datetime64:
dtype = np.float64
pvars[name] = np.array([], dtype=dtype)
# Get particle data from warm start
if config["start"] == "warm":
with Dataset(config["warm_start_file"]) as f:
# warm_particle_count = len(f.dimensions['particle'])
warm_particle_count = np.max(f.variables["pid"][:]) + 1
for name in config["particle_variables"]:
pvars[name] = f.variables[name][:warm_particle_count]
else:
warm_particle_count = 0
# initital number of particles
if config["start"] == "warm":
init_released = warm_particle_count
else:
init_released = 0
particles_released = [init_released] + [df['mult'].sum() for df in self._B]
# Loop through the releases, collect particle variable data
mult = A['mult'].values
for name in config['particle_variables']:
val = np.repeat(A[name].values, mult)
if config['release_dtype'][name] == np.datetime64:
val = (val - config["reference_time"]) / np.timedelta64(1, 's')
pvars[name] = np.concatenate((pvars[name], val))
self.total_particle_count = warm_particle_count + np.sum(mult)
self.particle_variables = pvars
logging.info("Total particle count = {}".format(self.total_particle_count))
self.particles_released = particles_released
# Export total particle count
# Ugly to write back to config
# Easier way to give this piece of information to the
# output initialization?
config["total_particle_count"] = self.total_particle_count
# Reset the counter after the particle counting
self._index = 0 # Index of next release
self._particle_count = warm_particle_count
def __next__(self) -> pd.DataFrame:
"""Perform the next particle release
Return a DataFrame with the release info,
repeating mult times
"""
# This should not happen
if self._index >= len(self.times):
raise StopIteration
# Skip first release if warm start (should be present in start file)
# Not always, make better test
# Moving test to state.py
# if self._index == 0 and self._particle_count > 0: # Warm start
# return
# rel_time = self.times[self._index]
# file_time = self._file_times[self._file_index]
V = self._B[self._index]
nnew = V.mult.sum()
# Workaround, missing repeat method for pandas DataFrame
V0 = V.to_records(index=False)
V0 = V0.repeat(V.mult)
V = | pd.DataFrame(V0) | pandas.DataFrame |
"""
utility for working with DataFrames
"""
import pandas as pd
import numpy as np
class Edit:
"""
this class lets you edit a dataframe
"""
def __init__(self,df = pd.DataFrame(np.ones(5))):
self.df = df
def add_col(self,df,lst,name = "New_column"):
"""
this function will add a new column to the end of a dataframe
Parameters:
df: a dataframe
lst: a list of values to use as column cells
name: the name of the column
"""
series = pd.Series(lst)
df[name] = series
return df
def get_data():
"""
gives you options for loading a data frame
"""
print("Options")
print("1) URL")
print("2) file upload")
print("3) use dummy data")
ans = eval(input("Choose an option by selecting the corresponding number: "))
print()
if ans == 1: #allows you to enter a url and use it as the dataframe
url = input("enter the URL: ")
df = | pd.read_csv(url) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# Built-ins
import os, sys, time, datetime, uuid, pickle, gzip, bz2, zipfile, requests, operator, warnings, functools
from collections import OrderedDict, defaultdict, Mapping
from io import TextIOWrapper
import xml.etree.ElementTree as ET
import importlib
# Version-specific modules
if sys.version_info.major == 2:
import pathlib2 as pathlib
import string
import bz2file
setattr(bz2, "open", bz2file.open) # Hack to use open with bz2
else:
import pathlib
# External
from tqdm import tqdm, tqdm_notebook, tqdm_gui
import pandas as pd
import numpy as np
# =====
# Formatting
# =====
# Remove pairwise nan
def remove_pairwise_nan(a, b, checks=True):
"""
Remove nan values for a pairwise function
Benchmark:
data:150 dimensionality pd.Series with 1 nan in a
checks=True: 177 µs ± 14.3 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
checks=False: 111 µs ± 1.91 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
"""
if checks:
assert isinstance(a, (np.ndarray, pd.Series))
assert type(a) is type(b), "`a` and `b` must be the same type"
assert a.size == b.size, "`a` and `b` must be the same size. The sizes are {} and {}, respectively".format(a.size, b.size)
if isinstance(a, pd.Series):
assert np.all(a.index == b.index), "`a` and `b` must be the same index"
index = None
name_a = None
name_b = None
if isinstance(a, pd.Series):
index = a.index
name_a = a.name
name_b = b.name
a = a.values
b = b.values
mask_nan = ~np.logical_or(np.isnan(a), np.isnan(b))
a = a[mask_nan]
b = b[mask_nan]
if index is not None:
a = pd.Series(a, index=index[mask_nan], name=name_a)
b = pd.Series(b, index=index[mask_nan], name=name_b)
return (a,b)
# Format pairwise
def format_pairwise(a, b, nans_ok=True, assert_not_empty=True):
"""
Prepare two pd.Series for a pairwise operation by getting overlapping indices and droping nan
"""
# Assert a and b are series
assert isinstance(a, pd.Series)
assert isinstance(b, pd.Series)
# Get overlap of index
index = a.index & b.index
if assert_not_empty:
assert index.size > 0, "There are no overlapping elements between a.index and b.index"
a = a[index]
b = b[index]
number_of_nans = pd.concat([a.isnull(), b.isnull()]).sum()
if number_of_nans > 0:
if nans_ok:
a, b = remove_pairwise_nan(a,b,checks=False)
else:
raise ValueError("`nans_ok=False` and there are {} total `nan` between `a` and `b`".format(number_of_nans))
return a, b
# Format memory
def format_memory(B, unit="infer", return_units=True):
"""
Return the given bytes as a human readable KB, MB, GB, or TB string
1 KB = 1024 Bytes
Adapted from the following source (@whereisalext):
https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb/52379087
"""
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
# Human readable
size_in_b = int(B)
size_in_kb = B/KB
size_in_mb = B/MB
size_in_gb = B/GB
size_in_tb = B/TB
if return_units:
size_in_b = '{0} B'.format(size_in_b)
size_in_kb = '{0:.3f} KB'.format(size_in_kb)
size_in_mb = '{0:.3f} MB'.format(size_in_mb)
size_in_gb = '{0:.3f} GB'.format(size_in_gb)
size_in_tb = '{0:.3f} TB'.format(size_in_tb)
unit = unit.lower()
assert_acceptable_arguments(unit.lower(), {"infer", "b", "kb", "mb", "gb", "tb"})
if unit != "infer":
return {"b":size_in_b, "kb":size_in_kb, "mb":size_in_mb, "gb":size_in_gb, "tb":size_in_tb}[unit]
else:
if B < KB:
return size_in_b
elif KB <= B < MB:
return size_in_kb
elif MB <= B < GB:
return size_in_mb
elif GB <= B < TB:
return size_in_gb
elif TB <= B:
return size_in_tb
# Get duration
def format_duration(t0):
"""
Adapted from @john-fouhy:
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
"""
duration = time.time() - t0
hours, remainder = divmod(duration, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02}:{:02}:{:02}".format(int(hours), int(minutes), int(seconds))
# Format file path
def format_path(path, into=str, absolute=False):
assert not is_file_like(path), "`path` cannot be file-like"
if hasattr(path, "absolute"):
path = str(path.absolute())
if hasattr(path, "path"):
path = str(path.path)
if absolute:
path = os.path.abspath(path)
return into(path)
# Format header for printing
def format_header(text, line_character="=", n=None):
if n is None:
n = len(text)
line = n*line_character
return "{}\n{}\n{}".format(line, text, line)
# ============
# Dictionaries
# ============
# Dictionary as a tree
def dict_tree():
"""
Source: https://gist.github.com/hrldcpr/2012250
"""
return defaultdict(dict_tree)
# Reverse a dictionary
def dict_reverse(d):
into = type(d)
data = [(v,k) for k,v in d.items()]
return into(data)
# Expand dictionary
def dict_expand(d, into=pd.Series, **kwargs):
"""
Convert {group:[elements]} ==> {element[i]:group[j]}
"""
return into(OrderedDict((r,p) for p,q in d.items() for r in q), **kwargs)
# Fill dictionary
def dict_fill(d, index, filler_value=np.nan, into=dict):
data = [(k,filler_value) for k in index if k not in d] + list(d.items())
return into(data)
# Build a dictionary from repeated elements
def dict_build(input_data, into=dict):
"""
input_data: [(value, iterable)]
d_output: {key_in_iterable:value}
"""
d_output = OrderedDict()
for value, iterable in input_data:
for key in iterable:
d_output[key] = value
return into(d_output)
# Fold dictionary
def dict_collapse(d, into=dict):
"""
Folds dictionary into dict of lists
"""
d_collapsed = defaultdict(list)
for k,v in d.items():
d_collapsed[v].append(k)
return into(d_collapsed)
# Subset a dictionary
def dict_filter(d, keys, into=dict):
"""
keys can be an iterable or function
"""
if hasattr(keys, "__call__"):
f = keys
keys = filter(f, d.keys())
return into(map(lambda k:(k,d[k]), keys))
# Convert python dictionary to bash
def dict_py_to_bash(d, bash_obj_name="DICT"):
"""
Adapted from source:
* https://stackoverflow.com/questions/1494178/how-to-define-hash-tables-in-bash
Converts a Python dictionary or pd.Series to a bash dictionary.
"""
bash_placeholder = "declare -A {}=(".format(bash_obj_name)
for k,v in d.items():
bash_placeholder += ' ["{}"]="{}"'.format(k,v)
bash_placeholder += ")"
return bash_placeholder
# ===========
# Assertions
# ===========
def assert_acceptable_arguments(query, target, operation="le", message="Invalid option provided. Please refer to the following for acceptable arguments:"):
"""
le: operator.le(a, b) : <=
eq: operator.eq(a, b) : ==
ge: operator.ge(a, b) : >=
"""
# If query is not a nonstring iterable or a tuple
if any([
not is_nonstring_iterable(query),
isinstance(query,tuple),
]):
query = [query]
query = set(query)
target = set(target)
func_operation = getattr(operator, operation)
assert func_operation(query,target), "{}\n{}".format(message, target)
# Check packages
def check_packages(packages, namespace=None, language="python", import_into_backend=False, verbose=False):
"""
Check if packages are available (and import into global namespace)
Handles python and R packages via rpy2
If package is a tuple then imports as follows: ("numpy", "np") where "numpy" is full package name and "np" is abbreviation
If R packages are being checked, please install rpy2
To import packages into current namespace: namespace = globals()
To import packages in backend, e.g. if this is used in a module/script, use `import_into_backend`
packages: str, non-tuple iterable
usage:
@check_packages(["sklearn", "scipy"], language="python")
def f():
pass
@check_packages(["ape"], language="r")
def f():
pass
"""
# Force packages into sorted non-redundant list
if isinstance(packages,(str, tuple)):
packages = [packages]
packages = set(packages)
# Set up decorator for Python imports
if language.lower() == "python":
import_package = importlib.import_module
importing_error = ImportError
# Set up decorator for R imports
if language.lower() == "r":
try:
import rpy2
except ImportError:
raise Exception("Please install 'rpy2' to import R packages")
from rpy2.robjects.packages import importr
from rpy2 import __version__ as rpy2_version
rpy2_version_major = int(rpy2_version.split(".")[0])
assert rpy2_version_major > 1, "Please update your rpy2 version"
if rpy2_version_major == 2:
from rpy2.rinterface import RRuntimeError
importing_error = RRuntimeError
if rpy2_version_major == 3:
# from rpy2.rinterface_lib.embedded import RRuntimeError
from rpy2.robjects.packages import PackageNotInstalledError
importing_error = PackageNotInstalledError
import_package = importr
# Wrapper
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
missing_packages = []
for pkg in packages:
if isinstance(pkg, tuple):
assert len(pkg) == 2, "If a package is tuple type then it must have 2 elements e.g. ('numpy', 'np')"
pkg_name, pkg_variable = pkg
else:
pkg_name = pkg_variable = pkg
try:
package = import_package(pkg_name)
if import_into_backend:
globals()[pkg_variable] = package
if namespace is not None:
namespace[pkg_variable] = package
if verbose:
print("Importing {} as {}".format(pkg_name, pkg_variable), True, file=sys.stderr)
except importing_error:
missing_packages.append(pkg_name)
if verbose:
print("Cannot import {}:".format(pkg_name), False, file=sys.stderr)
assert not missing_packages, "Please install the following {} packages to use this function:\n{}".format(language.capitalize(), ", ".join(missing_packages))
return func(*args, **kwargs)
return wrapper
return decorator
# ===========
# Types
# ===========
def is_function(obj):
return hasattr(obj, "__call__")
def is_file_like(obj):
return hasattr(obj, "read")
def is_dict(obj):
return isinstance(obj, Mapping)
def is_nonstring_iterable(obj):
condition_1 = hasattr(obj, "__iter__")
condition_2 = not type(obj) == str
return all([condition_1,condition_2])
def is_dict_like(obj):
condition_1 = is_dict(obj)
condition_2 = isinstance(obj, pd.Series)
return any([condition_1, condition_2])
def is_all_same_type(iterable):
iterable_types = set(map(lambda obj:type(obj), iterable))
return len(iterable_types) == 1
def is_number(x, num_type = np.number):
return np.issubdtype(type(x), num_type)
def is_query_class(x,query, case_sensitive=False):
# Format single search queries
if type(query) == str:
query = [query]
# Remove case if necessary
x_classname = str(x.__class__)
if not case_sensitive:
x_classname = x_classname.lower()
query = map(lambda q:q.lower(),query)
# Check if any of the tags in query are in the input class
verdict = any(q in x_classname for q in query)
return verdict
def is_path_like(obj, path_must_exist=True):
condition_1 = type(obj) == str
condition_2 = hasattr(obj, "absolute")
condition_3 = hasattr(obj, "path")
obj_is_path_like = any([condition_1, condition_2, condition_3])
if path_must_exist:
if obj_is_path_like:
return os.path.exists(obj)
else:
return False
else:
return obj_is_path_like
def is_in_namespace(variable_names, namespace, func_logic=all):
"""
Check if variable names are in the namespace (i.e. globals())
"""
assert hasattr(variable_names, "__iter__"), "`variable_names` should be either a single string on an object or an iterable of strings of variable names"
if isinstance(variable_names, str):
variable_names = [variable_names]
namespace = set(namespace)
return func_logic(map(lambda x: x in namespace, variable_names))
def is_symmetrical(X, tol=None):
assert len(X.shape) == 2 , "`X` must be 2-dimensional"
assert X.shape[0] == X.shape[1], "`X` must be square"
X = X.copy()
if isinstance(X, pd.DataFrame):
X = X.values
np.fill_diagonal(X, 0)
if tol is None:
return np.all(np.tril(X) == np.triu(X).T)
if tol:
return (np.tril(X) - np.triu(X).T).ravel().min() < tol
def is_graph(obj, attr="has_edge"):
return hasattr(obj, attr)
@check_packages(["matplotlib"])
def is_color(obj):
from matplotlib.colors import to_rgba
# Note: This can't handle values that are RGB in (0-255) only (0,1)
try:
to_rgba(obj)
return True
except ValueError:
verdict = False
if is_nonstring_iterable(obj):
if all(isinstance(c, (float, int)) for c in obj):
# Check [0,1]
if all(0 <= c <= 1 for c in obj):
verdict = True
# # Check [0,255]
# if not verdict:
# if all(0 <= c <= 255 for c in obj):
# verdict = True
return verdict
@check_packages(["matplotlib"])
def get_color_midpoint(a, b, alpha=1.0, return_type="hex"):
from matplotlib.colors import to_rgba, to_hex
assert_acceptable_arguments(return_type, {"rgba", "rgb", "hex"})
a = to_rgba(a, alpha=alpha)
b = to_rgba(b, alpha=alpha)
c = tuple(np.stack([a,b]).mean(axis=0))
if return_type == "rgba":
return c
if return_type == "rgb":
return c[:-1]
if return_type == "hex":
return to_hex(c)
# =======
# Utility
# =======
# Infer compression
def infer_compression(path):
path = format_path(path)
compression = None
ext_zip = (".zip")
ext_gzip = (".gz", ".gzip", ".pgz")
ext_bz2 = (".bz2", ".bzip2", ".pbz2")
if path.endswith(ext_gzip):
compression= "gzip"
if path.endswith(ext_bz2):
compression = "bz2"
if path.endswith(ext_zip):
compression = "zip"
return compression
# Get current timestamp
def get_timestamp(fmt="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.utcnow().strftime(fmt)
# Wrapper for tqdm
def pv(iterable, description=None, version=None, total=None, unit='it'):
"""
Progress viewer
Wrapper for `tqdm`:
https://github.com/tqdm/tqdm
"""
assert_acceptable_arguments([version], {None, "gui", "notebook"})
func = tqdm
if version == "notebook":
func = tqdm_notebook
if version == "gui":
func = tqdm_gui
return tqdm(
iterable,
desc=description,
total=total,
unit=unit,
)
# Creates a unique identifier
def get_unique_identifier():
return uuid.uuid4().hex
def contains(query, include, exclude=None):
"""
Is anything from `include` in `query` that doesn't include anything from `exclude`
`query` can be any iterator that is not a generator
"""
if isinstance(include, str):
include = [include]
condition_A = any(x in query for x in include)
if exclude is not None:
if type(exclude) == str:
exclude = [exclude]
condition_B = all(x not in query for x in exclude)
return all([condition_A, condition_B])
else:
return condition_A
# Consecutive replace on a string
def consecutive_replace(x, *patterns):
if len(patterns) == 1:
patterns = patterns[0]
for (a,b) in patterns:
x = x.replace(a,b)
return x
# Boolean
def boolean(x, true_values={"true", "t", "yes", "1"}, false_values={"false", "f", "no", "0"}, assertion_message="Please choose either: 'True' or 'False'"):
"""
Not case sensitive
"""
x = str(x).lower()
option = None
if x in list(map(str,true_values)):
option = True
if x in list(map(str,false_values)):
option = False
assert option is not None, assertion_message
return option
# Truncate a float by a certain precision
def to_precision(x, precision=5, into=float):
return into(("{0:.%ie}" % (precision-1)).format(x))
# Left padding
def pad_left(x, block_size=3, fill=0):
"""
Pad a string representation of digits
"""
if len(x) > block_size:
return x
else:
right = np.array(list(str(x)))
left = np.repeat(str(fill), block_size - right.size )
return "".join(np.concatenate([left, right]))
# Join as strings
def join_as_strings(delimiter="_", *args):
return delimiter.join(list(map(str, args)))
# =============
# Iterables
# =============
# Fragment a sequence string
def fragment(seq, K=5, step=1, overlap=False):
K = int(K)
step = int(step)
if not overlap:
step = K
iterable = range(0, len(seq) - K + 1, step)
for i in iterable:
frag = seq[i:i+K]
yield frag
# Get depth of an iterable
def iterable_depth(arg, exclude=None):
# Adapted from the following SO post:
# https://stackoverflow.com/questions/6039103/counting-depth-or-the-deepest-level-a-nested-list-goes-to
# @marco-sulla
exclude = set([str])
if exclude is not None:
if not hasattr(exclude, "__iter__"):
exclude = [exclude]
exclude.update(exclude)
if isinstance(arg, tuple(exclude)):
return 0
try:
if next(iter(arg)) is arg: # avoid infinite loops
return 1
except TypeError:
return 0
try:
depths_in = map(lambda x: iterable_depth(x, exclude), arg.values())
except AttributeError:
try:
depths_in = map(lambda x: iterable_depth(x, exclude), arg)
except TypeError:
return 0
try:
depth_in = max(depths_in)
except ValueError:
depth_in = 0
return 1 + depth_in
# Flatten nested iterables
def flatten(nested_iterable, into=list, unique=False, **kwargs_iterable):
# Adapted from @wim:
# https://stackoverflow.com/questions/16312257/flatten-an-iterable-of-iterables
def _func_recursive(nested_iterable):
for x in nested_iterable:
if is_nonstring_iterable(x):
for element in flatten(x):
yield element
else:
yield x
# Unpack data
data_flattened = list(_func_recursive(nested_iterable))
if unique:
data_flattened = sorted(set(data_flattened))
# Convert type
return into(data_flattened, **kwargs_iterable)
# Range like input data
def range_like(data, start=0):
return np.arange(len(data)) + start
# Set Intersection
def intersection(*iterables, **kwargs):
sets = map(set, iterables)
if "into" in kwargs: # Py2/3 Compatability
into = kwargs.pop("into")
else:
into = set
return into(set.intersection(*sets), **kwargs)
# Set Union
def union(*iterables, **kwargs):
sets = map(set, iterables)
if "into" in kwargs: # Py2/3 Compatability
into = kwargs.pop("into")
else:
into = set
return into(set.union(*sets), **kwargs)
# =========
# I/O
# =========
def read_from_clipboard(sep="\n", into=list):
data = pd.io.clipboard.clipboard_get()
if sep is not None:
return into(filter(bool, map(lambda x:x.strip(), data.split(sep))))
else:
return data
# Read dataframe
def read_dataframe(path, sep="infer", index_col=0, header=0, compression="infer", pickled="infer", func_index=None, func_columns=None, evaluate_columns=None, engine="c", verbose=False, excel="infer", infer_series=False, sheet_name=None, **kwargs):
start_time= time.time()
path = format_path(path, str)
dir , ext = os.path.splitext(path)
ext = ext.lower()
if excel == "infer":
if ext in {".xlsx", ".xls"}:
excel = True
else:
excel = False
if excel:
if "sheetname" in kwargs:
sheet_name = kwargs.pop("sheetname")
warnings.warn("Use `sheet_name` instead of `sheetname`", warnings.DeprecationWarning)
df = pd.read_excel(path, sheet_name=sheet_name, index_col=index_col, header=header, **kwargs)
else:
# Seperator
if any(list(map(lambda x:ext.endswith(x),[".csv", "csv.gz", "csv.zip"]))):
sep = ","
else:
sep = "\t"
# Serialization
if pickled == "infer":
if ext in {".pkl", ".pgz", ".pbz2"}:
pickled = True
else:
pickled = False
# Compression
if compression == "infer":
if pickled:
if ext == ".pkl":
compression = None
if ext == ".pgz":
compression = "gzip"
if ext == ".pbz2":
compression = "bz2"
else:
if ext == ".gz":
compression = "gzip"
if ext == ".bz2":
compression = "bz2"
if ext == ".zip":
compression = "zip"
if pickled:
df = pd.read_pickle(path, compression=compression, **kwargs)
else:
df = pd.read_csv(path, sep=sep, index_col=index_col, header=header, compression=compression, engine=engine, **kwargs)
condition_A = any([(excel == False), (sheet_name is not None)])
condition_B = all([(excel == True), (sheet_name is None)])
if condition_A:
# Map indices
if func_index is not None:
df.index = df.index.map(func_index)
if func_columns is not None:
df.columns = df.columns.map(func_columns)
if evaluate_columns is not None:
for field_column in evaluate_columns:
try:
df[field_column] = df[field_column].map(eval)
except ValueError:
if verbose:
print("Could not use `eval` on column=`{}`".format(field_column), file=sys.stderr)
if verbose:
print("{} | Dimensions: {} | Time: {}".format(
path.split('/')[-1],
df.shape,
format_duration(start_time),
), file=sys.stderr)
if condition_B:
if verbose:
print("{} | Sheets: {} | Time: {}".format(
path.split('/')[-1],
len(df),
format_duration(start_time),
), file=sys.stderr)
if infer_series:
if df.shape[1] == 1:
df = df.iloc[:,0]
return df
# Write dataframe
def write_dataframe(data, path, sep="\t", compression="infer", pickled="infer", excel="infer", **kwargs):
start_time = time.time()
path = format_path(path, str)
_ , ext = os.path.splitext(path)
dir, filename = os.path.split(path)
if not os.path.exists(dir):
dir = str(pathlib.Path(dir).absolute())
os.makedirs(dir, exist_ok=True)
# Excel
if excel == "infer":
if ext in {".xlsx", ".xls"}:
excel = True
else:
excel = False
if excel:
if not is_dict(data):
data = {"Sheet1":data}
writer = pd.ExcelWriter(path)
for sheet_name, df in data.items():
df.to_excel(writer, sheet_name=str(sheet_name), **kwargs)
writer.save()
else:
# Serialization
if pickled == "infer":
if ext in {".pkl", ".pgz", ".pbz2"}:
pickled = True
else:
pickled = False
# Compression
if compression == "infer":
if pickled:
if ext == ".pkl":
compression = None
if ext == ".pgz":
compression = "gzip"
if ext == ".pbz2":
compression = "bz2"
else:
compression = None
if ext == ".gz":
compression = "gzip"
if ext == ".bz2":
compression = "bz2"
if pickled:
data.to_pickle(path, compression=compression, **kwargs)
else:
data.to_csv(path, sep=sep, compression=compression, **kwargs)
# Create file object
def get_file_object(path, mode="infer", compression="infer", safe_mode="infer", verbose=True):
"""
with get_file_object("./test.txt.zip", mode="infer", verbose=False) as f_read:
with get_file_object("./test_write.txt.bz2", mode="write", verbose=False) as f_write:
for line in f_read.readlines():
line = str(line.strip())
print(line, file=f_write)
"""
# Init
f = None
file_will_be_written = False
# Paths
path = format_path(path)
path_exists = os.path.exists(path)
if compression == "infer":
compression = infer_compression(path)
if verbose:
print("Inferring compression:", compression, file=sys.stderr)
# Inferring mode
if mode == "infer": # Create new function for this? infer_filemode?
if path_exists:
mode = "read"
else:
mode = "write"
assert mode != "infer", "The mode should be inferred by this point. Please specify mode manually."
assert compression != "infer", "The compression should be inferred by this point. Please specify compression manually."
# Generic read write
if mode in ["write", "read"]:
if mode == "write":
mode = "w"
if mode == "read":
mode = "r"
if compression in ["gzip", "bz2"]:
mode = mode + "b"
if verbose:
print("Inferring mode:", mode, file=sys.stderr)
if verbose:
if mode == "r":
print("Reading file:",path, file=sys.stderr)
if mode == "w":
print("Writing file:",path, file=sys.stderr)
# Will a file be written?
if "w" in mode:
file_will_be_written = True
# Ensure zip is not being written
if file_will_be_written:
assert compression != "zip", "Currently cannot handle writing zip files. Please use gzip, bz2, or None."
# Future do this:
# https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.open
# Safe mode
if safe_mode == "infer":
if file_will_be_written:
safe_mode = True
else:
safe_mode = False
assert safe_mode in {True,False}, "Please choose either True or False for `safe_mode`"
if safe_mode:
if all([file_will_be_written, path_exists]):
raise Exception("Safe Mode: Please explicitly provide a writeable mode ['w', 'wb', or 'write'] because `{}` already exists and will be rewritten.".format(path))
# GZIP compression
if compression == "gzip":
f = gzip.open(path, mode)
f.read1 = f.read # Hack from https://github.com/kislyuk/eight/issues/1
# BZ2 compression
if compression == "bz2":
f = bz2.open(path, mode)
if compression == "zip":
filename, ext = os.path.splitext(os.path.split(path)[-1])
f = zipfile.ZipFile(path,mode).open(filename)
# No compression
if f is None:
return open(path, mode)
# Reading and writing compressed files
else:
return TextIOWrapper(f, encoding="utf-8")
# Text reading wrapper
def read_textfile(path, enum=False, generator=True, mode="read", compression="infer", into=pd.Series):
"""
2018-May-29
Edits: 2018-December-27: Added `get_file_object` dependency
"""
assert mode not in ["w", "wb", "a"], "`mode` should not be in {w, wb, a} because it will overwrite"
assert compression in ["infer", "gzip", "bz2", "zip", None], "Valid `compression` types are 'infer', 'gzip', 'bz2', 'zip'"
# Format file path
path = format_path(path, str)
# Get file object
handle = get_file_object(path=path, mode=mode, compression=compression, safe_mode=False, verbose=False)
# Nested functions
def run_return_list(handle):
data = handle.read().split("\n")
handle.close()
if into == pd.Series:
return pd.Series(data, name=path)
else:
if enum:
return into(list(enumerate(data)))
if not enum:
return into(data)
def run_return_iterable(handle):
if enum:
for i,line in enumerate(handle):
yield i, line.strip()
if not enum:
for line in handle:
yield line.strip()
handle.close()
# Controller
if generator:
return run_return_iterable(handle=handle)
if not generator:
return run_return_list(handle=handle)
# Reading serial object
def read_object(path, compression="infer", serialization_module=pickle):
path = format_path(path, str)
if compression == "infer":
_ , ext = os.path.splitext(path)
if (ext == ".pkl") or (ext == ".dill"):
compression = None
if ext in {".pgz", ".gz"}:
compression = "gzip"
if ext in {".pbz2", ".bz2"}:
compression = "bz2"
if compression is not None:
if compression == "gzip":
f = gzip.open(path, "rb")
if compression == "bz2":
f = bz2.open(path, "rb")
else:
f = open(path, "rb")
obj = serialization_module.load(f)
f.close()
return obj
# Writing serial object
def write_object(obj, path, compression="infer", serialization_module=pickle, protocol=None, *args):
"""
Extensions:
pickle ==> .pkl
dill ==> .dill
gzipped-pickle ==> .pgz
bzip2-pickle ==> .pbz2
"""
assert obj is not None, "Warning: `obj` is NoneType"
path = format_path(path, str)
# Use infer_compression here
if compression == "infer":
_ , ext = os.path.splitext(path)
if ext in {".pkl", ".dill"}: # if ext in (ext == ".pkl") or (ext == ".dill"):
compression = None
if ext in {".pgz", ".gz"}:
compression = "gzip"
if ext in {".pbz2", ".bz2"}:
compression = "bz2"
if compression is not None:
if compression == "bz2":
f = bz2.BZ2File(path, "wb")
if compression == "gzip":
f = gzip.GzipFile(path, "wb")
else:
f = open(path, "wb")
serialization_module.dump(obj, f, protocol=protocol, *args)
f.close()
# Reading html from website
def read_url(url, params=None, **kwargs):
"""
Future:
Make wrapper for dynamic html and phantom js
"""
dynamic = False
if not dynamic:
r = requests.get(url, params=params, **kwargs)
return r.text
else:
print("Unavailable: Need to make wrapper for dynamic HTML using PhantomJS", file=sys.stderr)
# Importing a functions from a module
def read_script_as_module(name_module, path):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
path = format_path(path, str)
if sys.version_info.major == 2:
module = imp.load_source(name_module, path)
else:
spec = importlib.util.spec_from_file_location(name_module, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# Read Fasta File
@check_packages(["Bio"], import_into_backend=False)
def read_fasta(path, description=True, case=None, func_header=None, into=pd.Series, compression="infer", name=None, verbose=False):
"""
Reads in a single fasta file or a directory of fasta files into a dictionary.
"""
from Bio.SeqIO.FastaIO import SimpleFastaParser
# Get path
path = format_path(path)
# Assign pathname as name if there isn't one
if name is None:
name = path
# Open file object
f = get_file_object(path, mode="read", compression=compression, safe_mode=False, verbose=False)
# Read in fasta
d_id_seq = OrderedDict()
if verbose:
seq_records = pv(SimpleFastaParser(f), "Reading sequence file: {}".format(path))
else:
seq_records = SimpleFastaParser(f)
# Verbose but faster
if description:
if case == "lower":
for header, seq in seq_records:
seq = seq.lower()
d_id_seq[header] = seq
if case == "upper":
for header, seq in seq_records:
seq = seq.upper()
d_id_seq[header] = seq
if case is None:
for header, seq in seq_records:
d_id_seq[header] = seq
if not description:
if case == "lower":
for header, seq in seq_records:
seq = seq.lower()
header = header.split(" ")[0]
d_id_seq[header] = seq
if case == "upper":
for header, seq in seq_records:
seq = seq.upper()
header = header.split(" ")[0]
d_id_seq[header] = seq
if case is None:
for header, seq in seq_records:
header = header.split(" ")[0]
d_id_seq[header] = seq
# Close File
f.close()
# Transform header
if func_header is not None:
d_id_seq = OrderedDict( [(func_header(id),seq) for id, seq in d_id_seq.items()])
sequences = into(d_id_seq)
if hasattr(sequences, "name"):
sequences.name = name
return sequences
# Writing sequence files
def write_fasta(sequences, path, compression="infer"):
"""
Sequence stats:
count 29999.000000
mean 310.621754
std 1339.422833
min 56.000000
25% 75.000000
50% 111.000000
75% 219.000000
max 54446.000000
Benchmarks:
No compression: 47.2 ms ± 616 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
Gzip: 9.85 s ± 261 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Bzip2: 864 ms ± 16.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
# path = format_path(path)
# if compression == "infer":
# compression = infer_compression(path)
if is_query_class(path, ["stdout", "stderr", "streamwrapper"]):
path.writelines(">{}\n{}\n".format(id, seq) for id, seq in sequences.items())
else:
with get_file_object(path, mode="write", compression=compression, safe_mode=False, verbose=False) as f:
f.writelines(">{}\n{}\n".format(id, seq) for id, seq in sequences.items())
# Read blast output
def read_blast(path, length_query=None, length_subject=None, sort_by="bitscore"):
"""
if 12: ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
if 15 assumes: -outfmt '6 std qlen slen stitle': ["std", "qlen", "slen", "stitle"]
####################################################
Column NCBI name Description
1 qseqid Query Seq-id (ID of your sequence)
2 sseqid Subject Seq-id (ID of the database hit)
3 pident Percentage of identical matches
4 length Alignment length
5 mismatch Number of mismatches
6 gapopen Number of gap openings
7 qstart Start of alignment in query
8 qend End of alignment in query
9 sstart Start of alignment in subject (database hit)
10 send End of alignment in subject (database hit)
11 evalue Expectation value (E-value)
12 bitscore Bit score
13 sallseqid All subject Seq-id(s), separated by a ';'
14 score Raw score
15 nident Number of identical matches
16 positive Number of positive-scoring matches
17 gaps Total number of gaps
18 ppos Percentage of positive-scoring matches
19 qframe Query frame
20 sframe Subject frame
21 qseq Aligned part of query sequence
22 sseq Aligned part of subject sequence
23 qlen Query sequence length
24 slen Subject sequence length
25 salltitles All subject title(s), separated by a '<>'
Example inputs:
* blat -prot Yeast/Saccharomyces_cerevisiae.R64-1-1.pep.all.processed.fa Phaeodactylum_tricornutum.ASM15095v2.pep.all.processed.fa -out=blast8 yeast-pt.blast8
* diamond blastp -d ../../../../reference_data/references/gracilibacteria/reference_proteins.nmnd -q ./prodigal_output/orfs.faa -f 6 -o ./diamond_output/output.blast6
"""
path = format_path(path)
idx_default_fields = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
# ["query_id", "subject_id", "identity", "alignment_length", "mismatches", "gap_openings", "query_start", "query_end", "subject_start", "subject_end", "e-value", "bit_score"]
df_blast = pd.read_csv(path, header=None, sep="\t")
if df_blast.shape[1] == 12:
df_blast.columns = idx_default_fields
if df_blast.shape[1] == 15:
df_blast.columns = idx_default_fields + ["qlen", "slen", "stitle"]
# Length of query
if length_query is not None:
if is_path_like(length_query):
length_query = read_fasta(length_query, description=False, verbose=False)
if isinstance(length_query[0], str):
length_query = length_query.map(len)
df_blast["qlength"] = df_blast["qseqid"].map(lambda id: length_query[id])
df_blast["qratio"] = (df_blast["qend"] - df_blast["qstart"])/df_blast["qlength"]
# Length of subject
if length_subject is not None:
if is_path_like(length_subject):
length_subject = read_fasta(length_subject, description=False, verbose=False)
if isinstance(length_subject[0], str):
length_subject = length_subject.map(len)
df_blast["slength"] = df_blast["sseqid"].map(lambda id: length_subject[id])
df_blast["sratio"] = (df_blast["send"] - df_blast["sstart"])/df_blast["slength"]
if sort_by is not None:
df_blast = df_blast.sort_values(by=sort_by, ascending=False).reset_index(drop=True)
return df_blast
# Helper function for reading gtf and gff3
def read_gtf_gff_base(path, compression, record_type, verbose):
# Read the gff3 file
with get_file_object(path, mode="read", compression=compression, safe_mode=False, verbose=False) as f:
if verbose:
iterable_lines = pv(f.readlines(), "Processing lines")
else:
iterable_lines = f.readlines()
data= list()
if record_type is None:
for line in iterable_lines:
if not line.startswith("#"):
line = line.strip("\n")
if bool(line):
base_fields = line.split("\t")
data.append(base_fields)
else:
for line in iterable_lines:
if not line.startswith("#"):
if "{}_id".format(record_type) in line:
line = line.strip("\n")
base_fields = line.split("\t")
data.append(base_fields)
# Generate table
df_base = pd.DataFrame(data)
df_base.columns = ["seq_record", "source", "seq_type", "pos_start", "pos_end", ".1", "sense", ".2", "data_fields"]
return df_base
def read_gff3(path, compression="infer", record_type=None, verbose = True, reset_index=False, name=True):
def f(x):
fields = x.split(";")
data = dict()
for item in fields:
k, v = item.split("=")
data[k] = v
return data
path = format_path(path)
if verbose:
print("Reading gff3 file:",path,sep="\t", file=sys.stderr)
accepted_recordtypes = {"exon", "gene", "transcript", "protein", None}
assert record_type in accepted_recordtypes, "Unrecognized record_type. Please choose from the following: {}".format(accepted_recordtypes)
# Read the gff3 file
df_base = read_gtf_gff_base(path, compression, record_type, verbose)
try:
df_fields = pd.DataFrame(df_base["data_fields"].map(f).to_dict()).T
df_gff3 = | pd.concat([df_base[["seq_record", "source", "seq_type", "pos_start", "pos_end", ".1", "sense", ".2"]], df_fields], axis=1) | pandas.concat |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from plateau.utils.pandas import (
aggregate_to_lists,
concat_dataframes,
drop_sorted_duplicates_keep_last,
is_dataframe_sorted,
mask_sorted_duplicates_keep_last,
merge_dataframes_robust,
sort_dataframe,
)
class TestConcatDataframes:
@pytest.fixture(params=[True, False])
def dummy_default(self, request):
if request.param:
return pd.DataFrame(data={"a": [-2, -3], "b": 1.0}, columns=["a", "b"])
else:
return None
@pytest.fixture(params=[True, False])
def maybe_iter(self, request):
if request.param:
return iter
else:
return list
def test_many(self, dummy_default, maybe_iter):
dfs = [
pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(
data={"a": [2, 3], "b": 2.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(data={"a": [4, 5], "b": 3.0}, columns=["a", "b"]),
]
expected = pd.DataFrame(
{"a": [0, 1, 2, 3, 4, 5], "b": [1.0, 1.0, 2.0, 2.0, 3.0, 3.0]},
columns=["a", "b"],
)
actual = concat_dataframes(maybe_iter(dfs), dummy_default)
pdt.assert_frame_equal(actual, expected)
def test_single(self, dummy_default, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([df.copy()]), dummy_default)
pdt.assert_frame_equal(actual, df)
def test_default(self, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([]), df)
pdt.assert_frame_equal(actual, df)
def test_fail_no_default(self, maybe_iter):
with pytest.raises(ValueError) as exc:
concat_dataframes(maybe_iter([]), None)
assert str(exc.value) == "Cannot concatenate 0 dataframes."
@pytest.mark.parametrize(
"dfs",
[
[pd.DataFrame({"a": [0, 1]})],
[pd.DataFrame({"a": [0, 1]}), pd.DataFrame({"a": [2, 3]})],
],
)
def test_whipe_list(self, dfs):
concat_dataframes(dfs)
assert dfs == []
@pytest.mark.parametrize(
"dfs,expected",
[
(
# dfs
[pd.DataFrame(index=range(3))],
# expected
pd.DataFrame(index=range(3)),
),
(
# dfs
[pd.DataFrame(index=range(3)), pd.DataFrame(index=range(2))],
# expected
pd.DataFrame(index=range(5)),
),
],
)
def test_no_columns(self, dfs, expected):
actual = concat_dataframes(dfs)
pdt.assert_frame_equal(actual, expected)
def test_fail_different_colsets(self, maybe_iter):
dfs = [ | pd.DataFrame({"a": [1]}) | pandas.DataFrame |
from connection import cer_connection
import pandas as pd
import numpy as np
import os
from errors import IdError
from datetime import date
import io
def execute_sql(path, query_name, db='tsql23cap'):
query_path = os.path.join(path, query_name)
conn, engine = cer_connection(db=db)
def utf16open(query_path):
file = io.open(query_path, mode='r', encoding="utf-16", errors='ignore')
query = file.read()
file.close()
return query
def no_encoding_open(query_path):
file = io.open(query_path, mode='r', errors='ignore')
query = file.read()
file.close()
return query
try:
query = utf16open(query_path)
except:
query = no_encoding_open(query_path)
df = pd.read_sql_query(query, con=conn)
conn.close()
return df
def most_common(df,
meta,
col_name,
meta_key,
top=1,
dtype="dict",
lower=True,
joinTies=True):
what_list = []
for what in df[col_name]:
what = str(what)
if ',' in what:
what_list.extend(what.split(','))
else:
what_list.append(what)
what_list = [x.strip() for x in what_list]
what_list = [x for x in what_list if x not in ['To be determined', '', "Other", "Not Specified", "Sans objet", "Autre"]]
dft = pd.DataFrame(what_list, columns=["entries"])
dft['records'] = 1
dft = dft.groupby(by="entries").sum().reset_index()
dft = dft.sort_values(by=['records', 'entries'], ascending=[False, True])
if joinTies:
dft = dft.groupby(by="records").agg({"entries": " & ".join}).reset_index()
dft = dft.sort_values(by=['records'], ascending=False)
dft = dft.head(top)
counter = {}
for name, count in zip(dft['entries'], dft['records']):
counter[name] = count
if lower:
counter = {k.lower(): counter[k] for k in list(counter)}
if dtype != "dict":
counter = list(counter.keys())
if top == 1:
counter = list(counter.keys())[0]
meta[meta_key] = counter
return meta
def normalizeBool(df, cols, normType="Y/N"):
for col in cols:
df[col] = [str(x).strip() for x in df[col]]
if normType == "T/F":
df[col] = df[col].replace({"True": "T",
"False": "F"})
elif normType == "Y/N":
df[col] = df[col].replace({"True": "Yes",
"False": "No"})
return df
def normalize_dates(df, date_list, short_date=False):
for date_col in date_list:
df[date_col] = pd.to_datetime(df[date_col], errors='raise')
if short_date:
df[date_col] = df[date_col].dt.date
return df
def normalize_text(df, text_list):
for text_col in text_list:
df[text_col] = df[text_col].astype(object)
df[text_col] = [str(x).strip() for x in df[text_col]]
return df
def normalize_numeric(df, num_list, decimals):
for num_col in num_list:
df[num_col] = pd.to_numeric(df[num_col], errors='coerce')
df[num_col] = df[num_col].round(decimals)
return df
def pipeline_names():
read_path = os.path.join(os.getcwd(), 'raw_data/','NEB_DM_PROD - 1271412 - Pipeline Naming Conventions.XLSX')
df = pd.read_excel(read_path, sheet_name='Pipeline Naming Conventions')
df = df.rename(columns={x: x.strip() for x in df.columns})
df['old name'] = [x.strip() for x in df['Company List maintained by <NAME> https://www.cer-rec.gc.ca/bts/whwr/cmpnsrgltdbnb-eng.html']]
df['new name'] = [x.strip() for x in df['Suggested Pipeline Name for ALL Future External Publications']]
return {old_name: new_name for old_name, new_name in zip(df['old name'],
df['new name'])}
def daysInYear(year):
d1 = date(year, 1, 1)
d2 = date(year + 1, 1, 1)
return (d2 - d1).days
def saveJson(df, write_path, precision=2):
df.to_json(write_path,
orient='records',
double_precision=precision,
compression='infer')
def get_company_names(col):
return sorted(list(set(col)))
def company_rename():
names = {'Westcoast Energy Inc., carrying on business as Spectra Energy Transmission': 'Westcoast Energy Inc.',
'Kingston Midstream Limited': 'Kingston Midstream Westspur Limited',
'Trans Québec and Maritimes Pipeline Inc.': 'Trans Quebec and Maritimes Pipeline Inc.',
'Enbridge Southern Lights GP Inc. on behalf of Enbridge Southern Lights LP': 'Southern Lights Pipeline',
'Alliance Pipeline Ltd as General Partner of Alliance Pipeline Limited Partnership': 'Alliance Pipeline Ltd.',
'Trans Mountain Pipeline Inc.': 'Trans Mountain Pipeline ULC',
'Kinder Morgan Cochin ULC': 'PKM Cochin ULC',
'Enbridge Bakken Pipeline Company Inc., on behalf of Enbridge Bakken Pipeline Limited Partnership': 'Enbridge Bakken Pipeline Company Inc.',
'TEML Westspur Pipelines Limited': 'Kingston Midstream Westspur Limited',
'Plains Marketing Canada, L.P.': 'Plains Midstream Canada ULC'}
return names
def conversion(df, commodity, dataCols, rounding=False, fillna=False):
if commodity == 'gas':
conv = 28316.85
elif commodity == "oil":
conv = 6.2898
for col in dataCols:
if fillna:
df[col] = df[col].fillna(fillna)
if commodity == "oil":
df[col] = [x*conv if not pd.isnull(x) else x for x in df[col]]
else:
df[col] = [x/conv if not | pd.isnull(x) | pandas.isnull |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
import statsmodels.api as sm
df=pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Danishkron.txt", sep= '\t', engine='python')
def replacebyefe(array, newvalue):
array[array==0]=newvalue
df_copy=df.copy()
df_asarray=np.asarray(df_copy.VALUE)
replacebyefe(df_asarray,np.nan)
df_copy['VALUE']=df_asarray
df_copy['NEWVALUE']=df_copy.VALUE.interpolate()
#print(df_copy)
size = len(df_copy)
head = df_copy[0:5]
tail = df_copy[size-5:]
train = df_copy[0:size-201]
test = df_copy[size-200:]
df_copy.DATE = pd.to_datetime(df_copy.DATE,format="%Y-%m-%d")
df_copy.index = df_copy.DATE
train.DATE = pd.to_datetime(train.DATE,format="%Y-%m-%d")
train.index = train.DATE
test.DATE = | pd.to_datetime(train.DATE,format="%Y-%m-%d") | pandas.to_datetime |
import multiprocessing
import pandas as pd
import numpy as np
import difflib
import os.path
import pickle
import nltk
import re
from joblib import Parallel, delayed
from nltk.corpus import stopwords
from autocorrect import Speller
from unidecode import unidecode
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
nltk.download('stopwords')
def eliminate_nrs(frame):
frame = frame.replace({
'nr':'',
'nan':'',
'NR':'',
'NaN':'',
'nr ':'',
'nan ':'',
'NR ':'',
'NaN ':'',
'nr ':'',
' nan':'',
' NR':'',
' NaN':'',
' nr':'',
' nan ':'',
' NR ':'',
' NaN ':'',
' nr ':'',
np.nan:''}
)
return frame
def last_sanity_check(frame, id , indexes = False, pass_cols = None):
nrs = [
'nr','nan','NR','NaN','nr ','nan ','NR ',
'NaN ','nr ',' nan',' NR',' NaN',' nr',
' nan ',' NR ',' NaN ',' nr ',np.nan
]
frame_copy = frame.copy()
id_test = frame[frame[id] == '']
if indexes:
ind_diag_test = frame[(frame[indexes[0]] == '') & (frame[indexes[1]] == '')]
else:
ind_diag_test = []
if pass_cols:
for col in pass_cols:
frame_copy.drop(col, inplace=True, axis=1)
testnr = frame_copy[frame_copy.isin(nrs).any(axis=1)]
return (id_test, ind_diag_test, testnr)
def str_to_int(x):
try:
x = int(float(x))
except ValueError:
if x == '0.0':
x = 0
else:
x = ''
return x
def check_string(x):
if len(str(x)) <=1:
x = ''
return x
def to_unicode(column):
if isinstance(column, list):
column = pd.Series(column)
column = column.apply(lambda x: unidecode(str(x)).lower())
return column.values[0]
if isinstance(column, pd.Series):
column = column.apply(lambda x: unidecode(str(x)).lower())
return column
if isinstance(column, pd.DataFrame):
for c in column.columns:
column[c] = column[c].apply(lambda x: unidecode(str(x).lower()))
return column
return column
def tokenize(column):
""" Tokenize a given column
Args:
column ([Serie]): a pandas column
Return:
a pandas column with tockens lists
"""
def step(cell):
# Remove special characters
cell = unidecode(str(cell))
# Lowercase
cell = cell.lower()
# Extract only words
rfilter = r'[a-z]+'
finds = re.findall(rfilter, cell)
# Remove one-letters words
finds = [f for f in finds if len(f)>1]
# Remove stop words
finds = [f for f in finds \
if f not in stopwords.words('spanish')]
if finds == ['nr'] or finds == [] or finds==['nan'] \
or finds == ['NR'] or finds == ['NaN'] or finds == [np.nan]:
finds = ''
#strip_tokens
for index, token in enumerate(finds):
finds[index] = token.strip()
return finds
assert isinstance(column, pd.Series), \
'Column should be a pandas Serie. {} was received instead'.format(type(column))
column = column.apply(lambda x: step(x))
return column
def replace_col(frame, column):
"""Replace a column in a frame
Args:
frame ([pandas Dataframe]): [pandas dataframe]
column ([pandas Series]): [new column]
"""
col_name = column.name
col_indices = column.index.values
new_frame = [frame.iloc[i]
for i in col_indices\
for _ in range(len(column.iloc[i]))]
new_values = [v for i in col_indices for v in column.iloc[i]]
new_frame = pd.concat(new_frame, 1)
new_frame = new_frame.transpose()
new_frame[col_name] = new_values
return new_frame
def check_spelling(column):
""" Corrects col spelling automatically.
Right now this function only works on words
(no sentences)
Args:
column ([Series]): [a frame column]
Returns:
[type]: [description]
"""
spell = Speller(lang='es')
for i, cell in enumerate(column):
if isinstance(cell, list):
corr = []
for w in cell:
w_corr = spell(w)
corr.append(w_corr)
else:
# by word case
corr = spell(cell)
column.iloc[i] = corr
return column
def equivalent_words(column, values=None, num_cores=None):
""" Replace words by similarity.
We calculate similarity by setting letter weights.
This function works only on words (no sentences)
Args:
column ([Serie]): [a pandas column]
Returns:
[Serie]: [the same column with similar words changed]
"""
num_cores = multiprocessing.cpu_count() if num_cores is None else num_cores
if values is None:
values = [v for v in column.values]
def step(k, v):
if isinstance(v, list):
words = []
for w in v:
c = difflib.get_close_matches(w,
values, # this should be changed
n=2)
if c == []:
words.append(w)
else:
words.append(c[0])
return words
else:
# here we can use Diego's dictonary
c = difflib.get_close_matches(v,
values, # this should be changed
n=2)
return c[-1]
equivalents = Parallel(n_jobs=num_cores)(delayed(step)(k, v) \
for k, v in enumerate(column.values))
df = pd.DataFrame()
df[column.name] = equivalents
return df
def remove_nans(frame):
""" Remove rows with nan values
Args:
frame (pandas Series or Dataframe): column(s)
Returns:
frame: the same data structure without nans
"""
frame = frame.dropna()
return frame
def get_google_sheet(sheetid, rangex):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetid,
range=rangex).execute()
values = result.get('values', [])
df = pd.DataFrame(values[1:], columns=values[0])
return df
def short_words(column):
""" Extract short words such as:
acronyms, abbreviations...
To use this method you need credentials
contact with Cristobal (<EMAIL>)
if you need it
Args:
column (Serie): a column with words or sentences
Returns:
[list]: a list with short words detected
"""
sheetid = '1vk3CPLCRZbToaqJQA1_4_1KeH9s2Lfi0cCz0hHewh9s'
masterdic = get_google_sheet(sheetid, 'A:C')
acronimos = masterdic[masterdic['clase'] == 'sigla']['palabra'].values
filter = r'[A-z | \.]+'
acron_detected = []
for cell in column:
cell = unidecode(str(cell))
# Lowercase
cell = cell.lower()
finds=re.findall(filter, cell)
finds = [f.replace('.', '') for f in finds]
finds = ' '.join(finds)
finds = [f.upper() for f in finds.split() if f.upper() in acronimos]
acron_detected += finds
return acron_detected
def combine_versions(frame1, frame2, on='id_user', which=None):
if which is not None:
frame2 = frame2[[on]+which]
print(frame2)
if frame1.shape[0] != frame2.shape[0]:
print('[WARNING] Dataframes have not equal size.')
frame1 = frame1.set_index(on)
frame2 = frame2.set_index(on)
result = pd.concat([frame1, frame2], axis=1)
result.reset_index(level=0, inplace=True)
return result
def stratify_frame_by_age(frame):
# Stratify
etiquetas = ['0 a 5', '6 a 14', '15 a 19', '20 a 25', '26 a 30', '31 a 39',
'40 a 49', '50 a 59', '60 a 69', '70 o más']
range_values = [0, 6, 15, 20, 26, 31, 40, 50, 60, 70, 150]
frame['age_range'] = | pd.cut(frame['age'], range_values, right=False, labels=etiquetas) | pandas.cut |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
| pd.Timestamp("2015-01-15") | pandas.Timestamp |
from Clean_Json import clean_json
from Find_Best_Hyperparameters import get_best_lr
import numpy as np
from Pycox import evaluate_model
from Train_models import train_MTL_model
from Make_h5_trained_models.py import Extract_Image_Features
import warnings
import pandas
import os
from extract_variables import extract_variables
extract_variables()
clean_json()
for Clinical_Set in ['Efficientnet_fine', 'Resnet_fine', 'Resnet_pretrained','Efficientnet_pretrained']:
train_MTL_model(Clinical_Set)
Extract_Image_Features(Clinical_Set)
LRs = 1 / (10 ** (np.arange(4, 15) / 4))
visits = [['00', '04', '06'],['00']]
Clinical_Sets = ['a','b','Efficientnet_fine', 'Resnet_fine', 'Resnet_pretrained','Efficientnet_pretrained']
best_lrs = {}
best_lrs_df = pandas.DataFrame()
model_types = ['LSTM','MLP','CoxPH'] #
for model_type in model_types:
for visit in visits:
for Clinical_Set in Clinical_Sets:
if(model_type == 'LSTM' and len(visit) == 1):
best_lrs[Clinical_Set.upper()+str(len(visit))] = 0
else:
results = evaluate_model(top_dir = os.getcwd()+'/data/', visits = visit, Clinical_Set = Clinical_Set, model_type=model_type, test='dev', LRs = LRs)
best_lr = get_best_lr(results)
best_lrs[Clinical_Set.upper()+str(len(visit))] = best_lr
print(best_lrs)
best_lrs_df = pandas.concat([best_lrs_df, pandas.DataFrame.from_dict(best_lrs, orient='index', columns = [model_type])], axis=1)
best_lrs_df.to_csv('data/Best_LRs.csv')
best_lrs_df = pandas.read_csv('data/Best_LRs.csv', index_col=0)
test_results = pandas.DataFrame()
for model_type in model_types:
for visit in visits:
for Clinical_Set in Clinical_Sets:
if(model_type == 'LSTM' and len(visit) == 1):
pass
else:
lr = [best_lrs_df.loc[Clinical_Set.upper()+str(len(visit))][model_type]]
results = evaluate_model(top_dir = '/Users/gregory/Documents/Weill_Cornell/Wang_Lab/AMD/AMIA/Coding_Files/data/', visits = visit, Clinical_Set = Clinical_Set, model_type=model_type, test='test', LRs = lr)
#results.to_csv(model_type+'_'+Clinical_Set.upper()+str(len(visit))+'.csv')
results.columns = [model_type+'_'+Clinical_Set.upper()+str(len(visit))]
test_results = | pandas.concat([test_results, results], axis=1) | pandas.concat |
import warnings
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = | MultiIndex.from_tuples([("bar", "two")]) | pandas.MultiIndex.from_tuples |
from utils.utils import Config, EmailWrapper, FileWrapper, LoggerWrapper
import utils.utils as utils
from sqlalchemy import create_engine, text, exc
import sys
import pandas as pd
import requests
import time
import json
from subprocess import call
from unittest.mock import MagicMock
import os
ARTIST_RESULT_FILE = utils.ARTIST_RESULT_FILE
ARTIST_ID_FILE = utils.ARTIST_ID_FILE
MISSING_SONG_ATTRIBUTES_FILE = utils.MISSING_SONG_ATTRIBUTES_FILE
TWITTER_USER_QUEUE_FILE = utils.TWITTER_USER_QUEUE_FILE
SPOTIFY_MISSING_TWITTER_FILE = utils.SPOTIFY_MISSING_TWITTER_FILE
SECOND_TIER_USER_FILE = utils.SECOND_TIER_USER_FILE
SECOND_TIER_FOLLOWERS_FILE = utils.SECOND_TIER_FOLLOWERS_FILE
LOG_PATH = utils.LOG_PATH
DATA_PATH = utils.DATA_PATH
ARTIST_RESULT_CSV = "/code/prescraped/artist_result.csv"
# ARTIST_RESULT_CSV = "prescraped/artist_result.csv"
SECOND_TIER_CSV = "/code/prescraped/missing_twitter_with_handles.csv"
FOLLOWER_ITER_CAP = 5
config = Config()
# engine = None
engine = create_engine(config.SQLALCHEMY_DATABASE_URI, execution_options={"isolation_level": "SERIALIZABLE"})
logger = LoggerWrapper()
# logger = MagicMock()
headers = {"Authorization": "Bearer {:}".format(config.TWITTER_BEARER)}
# headers = {"Authorization": "Bearer {:}".format('')}
spotify_to_twitter = {}
#####################
# SPOTIFY ENDPOINTS #
#####################
# Retrieve and return a twitter username given a spotify ID
def extract_twitter_id(spotify_id, missing_file = SPOTIFY_MISSING_TWITTER_FILE):
r = requests.get('https://open.spotify.com/artist/{:}'.format(spotify_id))
if r.status_code != 200:
logger.twitter_warn("Unable to perform HTTP request for ID: {:}".format(spotify_id))
return -1
# {"name":"TWITTER","url":"https://twitter.com/justinbieber"}
try:
twitter_id = r.text.split('{"name":"TWITTER","url":"https://twitter.com/')[1].split('"')[0].split('?')[0]
except IndexError:
logger.twitter_warn("User has not connected their Spotify to Twitter")
FileWrapper.appendToFile(missing_file, "{:},,".format(spotify_id))
return -1
return twitter_id
#####################
# TWITTER ENDPOINTS #
#####################
# Parses a dictionary of data about a Twitter user and write to db
def parse_twitter_user_and_write(data_obj, engine=engine):
twitter_id = int(data_obj.get('id'))
twitter_username = data_obj.get('username')
twitter_name = data_obj.get('name')
bio = data_obj.get('description')
verified = data_obj.get('verified')
protected = data_obj.get('protected')
public_metrics = data_obj['public_metrics']
followers_count = public_metrics.get('followers_count')
following_count = public_metrics.get('following_count')
tweet_count = public_metrics.get('tweet_count')
listed_count = public_metrics.get('listed_count')
twitter_user_obj = [twitter_id, twitter_username, twitter_name, bio, verified, protected, followers_count, following_count, tweet_count, listed_count]
try:
with engine.connect() as conn:
result = conn.execute(text("""
INSERT INTO twitter_user(twitter_id, twitter_username, twitter_name, verified, protected, followers_count, following_count, tweet_count, listed_count)
VALUES(:twitter_id, :twitter_username, :twitter_name, :verified, :protected, :followers_count, :following_count, :tweet_count, :listed_count)
RETURNING twitter_id
""").params(
twitter_id=twitter_id,
twitter_username=twitter_username,
twitter_name=twitter_name,
verified=verified,
protected=protected,
followers_count=followers_count,
following_count=following_count,
tweet_count=tweet_count,
listed_count=listed_count))
except exc.IntegrityError:
logger.twitter_warn("There was a duplicate twitter user for {:} {:}".format(twitter_id, twitter_username))
return twitter_user_obj
# Retrieve and return a twitter id and write to database for an artist
def extract_base_twitter_info(twitter_username, spotify_id, engine=engine):
# 300 calls per 15 minutes
twitter_id_request_string = 'https://api.twitter.com/2/users/by/username/{:}?user.fields=id,name,verified,description,protected,public_metrics,location'
user_id_r = requests.get(twitter_id_request_string.format(twitter_username), headers=headers)
if user_id_r.status_code != 200:
logger.twitter_warn("Unable to perform HTTP request for ID: {:}".format(twitter_username))
return user_id_r.status_code
json_data = json.loads(user_id_r.text)
if "errors" in json_data:
logger.twitter_warn("Error twitter response for user_id query: {:}".format(twitter_username))
return -1
data_obj = json_data['data']
parse_twitter_user_and_write(data_obj)
twitter_id = int(data_obj.get('id'))
try:
with engine.connect() as conn:
result = conn.execute(text("""
INSERT INTO artist(twitter_id, spotify_id)
VALUES(:twitter_id, :spotify_id)
RETURNING twitter_id
""").params(
twitter_id=twitter_id,
spotify_id=spotify_id))
except exc.IntegrityError:
logger.twitter_warn("There was a duplicate artist for twitter {:} and spotify {:}".format(twitter_id, spotify_id))
return twitter_id
# Retrieve and write a following list to database for an artist
def extract_twitter_following_info(twitter_id, next_token="", engine=engine):
# 15 calls per 15 minutes
initial_followers_request_string = 'https://api.twitter.com/2/users/{:}/following?user.fields=id,name,username,verified,description,protected,public_metrics&max_results=1000'
subsequent_followers_request_string = 'https://api.twitter.com/2/users/{:}/following?user.fields=id,name,username,verified,description,protected,public_metrics&max_results=1000&pagination_token={:}'
if next_token == "":
followers_r = requests.get(initial_followers_request_string.format(twitter_id), headers=headers)
else:
followers_r = requests.get(subsequent_followers_request_string.format(twitter_id, next_token), headers=headers)
if followers_r.status_code != 200:
logger.twitter_warn("Unable to perform HTTP request for ID: {:}".format(twitter_id))
return followers_r.status_code, None
if "errors" in followers_r:
logger.twitter_warn("Error twitter response for followers query: {:} {:}".format(twitter_id, followers_r['errors']))
return -1, None
json_data = json.loads(followers_r.text)
meta_obj = json_data.get('meta')
next_token = json_data.get('next_token')
data_obj = json_data.get('data')
following_user_list = []
if data_obj is None:
return [], None
for following_user in data_obj:
following_user_data = parse_twitter_user_and_write(following_user)
following_user_list.append(following_user_data)
try:
with engine.connect() as conn:
result = conn.execute(text("""
INSERT INTO following(follower_id, followed_id)
VALUES(:follower_id, :followed_id)
RETURNING follower_id
""").params(
follower_id=twitter_id,
followed_id=following_user_data[0]))
except exc.IntegrityError:
logger.twitter_warn("There was a duplicate following directed pair {:} to {:}".format(twitter_id, following_user_data[0]))
return following_user_list, next_token
# timebox and extract twitter information
def extract_all(artist_result_offset=0, artist_following_offset=0):
artist_result_df = pd.read_csv(ARTIST_RESULT_CSV, header=None, skiprows=artist_result_offset)
artist_result_follower_df = pd.read_csv(ARTIST_RESULT_CSV, header=None, skiprows=artist_following_offset)
artist_result_max = artist_result_df.index.stop
artist_result_follower_max = artist_result_max
u_count = 0
f_count = 0
f_ind_follower_iter = 0
last_user_time = 0.0
last_following_time = 0.0
next_token = ""
while True:
# for k in range(1):
# user queries
# 300 per 15 minute, 20 per minute, 1 per 3 seconds
for k in range(20):
# print(u_count)
# ever ~5 mins
if u_count % 100 == 0:
logger.twitter_debug("Exporting counts for safety at count of {:} and {:}".format(u_count, f_count))
FileWrapper.writeValToFile(ARTIST_RESULT_FILE, u_count+artist_result_offset)
FileWrapper.writeValToFile(ARTIST_ID_FILE, f_count+artist_following_offset)
curr_time = time.time()
diff_delay = curr_time - last_user_time - 3
if diff_delay < 0:
time.sleep(-1*diff_delay + 0.1)
u_ind = u_count
if u_ind >= artist_result_max:
break
u_spotify_id = artist_result_df.iloc[u_ind, 0]
u_spotify_name = artist_result_df.iloc[u_ind, 1]
if u_count % 10 == 0:
logger.twitter_debug("Parsing {:}th artist for HTML, {:}".format(u_count, u_spotify_name))
# spotify_id -> twitter_username
u_twitter_username = extract_twitter_id(u_spotify_id)
if u_twitter_username is None or u_twitter_username == -1:
u_twitter_username = u_spotify_name.strip().replace(" ", "").replace(",", "").replace("&", "")
if u_twitter_username == -1:
u_count += 1
continue
# twitter_username, spotify_id -> twitter_id, @write(twitter_user)
u_twitter_id = extract_base_twitter_info(u_twitter_username, u_spotify_id)
last_user_time = time.time()
if u_twitter_id == 429:
logger.twitter_warn("Rate limit exceeded for users at {:}".format(u_count))
continue
if u_twitter_id == -1:
u_count += 1
continue
# add to dictionary for easier follower queries
spotify_to_twitter[u_spotify_id] = u_twitter_id
u_count += 1
# print(u_spotify_id, u_spotify_name, u_twitter_id)
# follower_queries
f_ind = f_count
if f_ind >= artist_result_follower_max:
break
f_spotify_id = artist_result_follower_df.iloc[f_ind, 0]
f_spotify_name = artist_result_follower_df.iloc[f_ind, 1]
if f_count % 10 == 0:
logger.twitter_debug("Parsing {:}th artist for followings, {:}".format(f_count, f_spotify_name))
curr_time = time.time()
diff_delay = curr_time - last_following_time - 60
if diff_delay < 0:
time.sleep(-1*diff_delay + 0.1)
# 15 per 15 minute, 1 per minute, 1 per 60 seconds
f_twitter_id = spotify_to_twitter.get(f_spotify_id)
if f_twitter_id is None:
try:
with engine.connect() as conn:
result = conn.execute(text("""
SELECT twitter_id
FROM artist
WHERE :spotify_id = spotify_id
""").params(
spotify_id=f_spotify_id)).first()
if (result is not None) and len(result) > 0:
f_twitter_id = result[0]
spotify_to_twitter[f_spotify_id] = f_twitter_id
except exc.IntegrityError:
logger.twitter_warn("There was an error retrieving twitter_id for spotify_id {:}", f_spotify_id)
if f_twitter_id is None:
f_count += 1
f_ind_follower_iter = 0
next_token = None
logger.twitter_info("Can't get follower info for nonexistent twitter id {:} {:}".format(f_spotify_id, f_spotify_name))
FileWrapper.appendToFile(SPOTIFY_MISSING_TWITTER_FILE, "{:},{:},".format(f_spotify_id, f_spotify_name))
continue
if next_token is None:
next_token = ""
# twitter_id, next_token='' -> following_user_list, next_token, @write(twitter_user, following)
following_user_list, next_token = extract_twitter_following_info(f_twitter_id, next_token)
last_following_time = time.time()
if following_user_list == 429:
logger.twitter_warn("Rate limit exceeded for followers at {:}".format(f_count))
else:
f_ind_follower_iter += 1
# also accounts for other error case
if next_token == None or next_token == "":
f_count += 1
f_ind_follower_iter = 0
elif f_ind_follower_iter >= FOLLOWER_ITER_CAP: # practical cap of followings reached
f_count += 1
f_ind_follower_iter = 0
next_token = None
# print(f_spotify_id, f_spotify_name, f_twitter_id, following_user_list[:10])
# logging/saving
# every ~30 mins
if u_count % 300 == 0: # 600
logger.twitter_debug("Exporting csvs for safety at count of {:} and {:}".format(u_count, f_count))
rc = call("/code/db/export.sh")
time.sleep(3)
file_names = ["twitter_user.csv", "following.csv", "spotify_artist.csv", "artist.csv"]
for f in file_names:
topdir = FileWrapper.getMostRecentDir(DATA_PATH)
curr_file_name = os.path.join(topdir, f)
EmailWrapper.sendEmail("{:}: Sending logged csv".format(time.time()), subject="Twitifynd Alert {:}".format(f), attachment=curr_file_name)
# end of while loop
# end of func
# timebox and extract twitter information
def extract_second_tier(artist_result_offset=0, artist_following_offset=0):
artist_result_df = pd.read_csv(SECOND_TIER_CSV, header=None, skiprows=artist_result_offset)
artist_result_follower_df = | pd.read_csv(SECOND_TIER_CSV, header=None, skiprows=artist_following_offset) | pandas.read_csv |
## 5. Generating Regression Data ##
from sklearn.datasets import make_regression
import pandas as pd
data = make_regression(n_samples =100, n_features = 3, random_state =1)
features = pd.DataFrame(data[0])
labels = | pd.Series(data[1]) | pandas.Series |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# from QUANTAXIS.QAData.data_fq import QA_data_make_qfq, QA_data_make_hfq
# 基于Pytdx的数据接口,好处是可以在linux/mac上联入通达信行情
# 具体参见rainx的pytdx(https://github.com/rainx/pytdx)
#
import datetime
import numpy as np
import pandas as pd
from pytdx.exhq import TdxExHq_API
from pytdx.hq import TdxHq_API
from retrying import retry
from QUANTAXIS.QAFetch.base import _select_market_code, _select_index_code, _select_type, _select_bond_market_code
from QUANTAXIS.QAUtil import (QA_Setting, QA_util_date_stamp, QA_util_code_tostr,
QA_util_date_str2int, QA_util_date_valid,
QA_util_get_real_date, QA_util_get_real_datelist,
QA_util_future_to_realdatetime, QA_util_tdxtimestamp,
QA_util_future_to_tradedatetime,
QA_util_get_trade_gap, QA_util_log_info,
QA_util_time_stamp, QA_util_web_ping,
exclude_from_stock_ip_list, future_ip_list,
stock_ip_list, trade_date_sse)
from QUANTAXIS.QAUtil.QASetting import QASETTING
from QUANTAXIS.QASetting.QALocalize import log_path
from QUANTAXIS.QAUtil import Parallelism
from QUANTAXIS.QAUtil.QACache import QA_util_cache
def init_fetcher():
"""初始化获取
"""
pass
def ping(ip, port=7709, type_='stock'):
api = TdxHq_API()
apix = TdxExHq_API()
__time1 = datetime.datetime.now()
try:
if type_ in ['stock']:
with api.connect(ip, port, time_out=0.7):
res = api.get_security_list(0, 1)
if res is not None:
if len(api.get_security_list(0, 1)) > 800:
return datetime.datetime.now() - __time1
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
elif type_ in ['future']:
with apix.connect(ip, port, time_out=0.7):
res = apix.get_instrument_count()
if res is not None:
if res > 20000:
return datetime.datetime.now() - __time1
else:
print('️Bad FUTUREIP REPSONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
else:
print('️Bad FUTUREIP REPSONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
except Exception as e:
if isinstance(e, TypeError):
print(e)
print('Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题')
print('pip uninstall pytdx')
print('pip install pytdx')
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
def select_best_ip():
QA_util_log_info('Selecting the Best Server IP of TDX')
# 删除exclude ip
import json
null = None
qasetting = QASETTING
exclude_ip = {'ip': '1.1.1.1', 'port': 7709}
default_ip = {'stock': {'ip': None, 'port': None},
'future': {'ip': None, 'port': None}}
alist = []
alist.append(exclude_ip)
ipexclude = qasetting.get_config(
section='IPLIST', option='exclude', default_value=alist)
exclude_from_stock_ip_list(ipexclude)
ipdefault = qasetting.get_config(
section='IPLIST', option='default', default_value=default_ip)
ipdefault = eval(ipdefault) if isinstance(ipdefault, str) else ipdefault
assert isinstance(ipdefault, dict)
if ipdefault['stock']['ip'] == None:
best_stock_ip = get_ip_list_by_ping(stock_ip_list)
else:
if ping(ipdefault['stock']['ip'], ipdefault['stock']['port'],
'stock') < datetime.timedelta(0, 1):
print('USING DEFAULT STOCK IP')
best_stock_ip = ipdefault['stock']
else:
print('DEFAULT STOCK IP is BAD, RETESTING')
best_stock_ip = get_ip_list_by_ping(stock_ip_list)
if ipdefault['future']['ip'] == None:
best_future_ip = get_ip_list_by_ping(future_ip_list, _type='future')
else:
if ping(ipdefault['future']['ip'], ipdefault['future']['port'],
'future') < datetime.timedelta(0, 1):
print('USING DEFAULT FUTURE IP')
best_future_ip = ipdefault['future']
else:
print('DEFAULT FUTURE IP {} is BAD, RETESTING'.format(ipdefault))
best_future_ip = get_ip_list_by_ping(future_ip_list,
_type='future')
ipbest = {'stock': best_stock_ip, 'future': best_future_ip}
qasetting.set_config(
section='IPLIST', option='default', default_value=ipbest)
QA_util_log_info(
'=== The BEST SERVER ===\n stock_ip {} future_ip {}'.format(
best_stock_ip['ip'], best_future_ip['ip']))
return ipbest
def get_ip_list_by_ping(ip_list=[], _type='stock'):
best_ip = get_ip_list_by_multi_process_ping(ip_list, 1, _type)
return best_ip[0]
def get_ip_list_by_multi_process_ping(ip_list=[], n=0, _type='stock',
cache_age=86400):
''' 根据ping排序返回可用的ip列表
2019 04 09 增加_type缓存时间cache_age
2019 03 31 取消参数filename
:param ip_list: ip列表
:param n: 最多返回的ip数量, 当可用ip数量小于n,返回所有可用的ip;n=0时,返回所有可用ip
:param _type: ip类型
:param cache_age: ip类型缓存时间(秒),默认为一天(86400秒)
:return: 可以ping通的ip列表
'''
cache = QA_util_cache()
results = cache.get(_type)
if results:
# read the data from cache
print('loading ip list from {} cache.'.format(_type))
else:
ips = [(x['ip'], x['port'], _type) for x in ip_list]
ps = Parallelism()
ps.run(ping, ips)
data = list(ps.get_results())
results = []
for i in range(len(data)):
# 删除ping不通的数据
if data[i] < datetime.timedelta(0, 9, 0):
results.append((data[i], ip_list[i]))
# 按照ping值从小大大排序
results = [x[1] for x in sorted(results, key=lambda x: x[0])]
if _type:
# store the data as binary data stream
cache.set(_type, results, age=cache_age)
print('saving ip list to {} cache {}'.format(_type, len(results)))
if len(results) > 0:
if n == 0 and len(results) > 0:
return results
else:
return results[:n]
else:
print('ALL IP PING TIMEOUT!')
return [{'ip': None, 'port': None}]
global best_ip
best_ip = {
'stock': {
'ip': None, 'port': None
},
'future': {
'ip': None, 'port': None
}
}
# return 1 if sh, 0 if sz
def get_extensionmarket_ip(ip, port):
global best_ip
if ip is None and port is None and best_ip['future']['ip'] is None and \
best_ip['future']['port'] is None:
best_ip = select_best_ip()
ip = best_ip['future']['ip']
port = best_ip['future']['port']
elif ip is None and port is None and \
best_ip['future']['ip'] is not None and \
best_ip['future']['port'] is not None:
ip = best_ip['future']['ip']
port = best_ip['future']['port']
else:
pass
return ip, port
def get_mainmarket_ip(ip, port):
"""[summary]
Arguments:
ip {[type]} -- [description]
port {[type]} -- [description]
Returns:
[type] -- [description]
"""
global best_ip
if ip is None and port is None and best_ip['stock']['ip'] is None and \
best_ip['stock']['port'] is None:
best_ip = select_best_ip()
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
elif ip is None and port is None and \
best_ip['stock']['ip'] is not None and \
best_ip['stock']['port'] is not None:
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
else:
pass
return ip, port
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_security_bars(code, _type, lens, ip=None, port=None):
"""按bar长度推算数据
Arguments:
code {[type]} -- [description]
_type {[type]} -- [description]
lens {[type]} -- [description]
Keyword Arguments:
ip {[type]} -- [description] (default: {best_ip})
port {[type]} -- [description] (default: {7709})
Returns:
[type] -- [description]
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat([api.to_df(
api.get_security_bars(_select_type(_type), _select_market_code(
code), code, (i - 1) * 800, 800)) for i in
range(1, int(lens / 800) + 2)], axis=0, sort=False)
data = data \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=_type, code=str(code)) \
.set_index('datetime', drop=False, inplace=False).tail(lens)
if data is not None:
return data
else:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_day(code, start_date, end_date, if_fq='00',
frequence='day', ip=None, port=None):
"""获取日线及以上级别的数据
Arguments:
code {str:6} -- code 是一个单独的code 6位长度的str
start_date {str:10} -- 10位长度的日期 比如'2017-01-01'
end_date {str:10} -- 10位长度的日期 比如'2018-01-01'
Keyword Arguments:
if_fq {str} -- '00'/'bfq' -- 不复权 '01'/'qfq' -- 前复权 '02'/'hfq' -- 后复权 '03'/'ddqfq' -- 定点前复权 '04'/'ddhfq' --定点后复权
frequency {str} -- day/week/month/quarter/year 也可以是简写 D/W/M/Q/Y
ip {str} -- [description] (default: None) ip可以通过select_best_ip()函数重新获取
port {int} -- [description] (default: {None})
Returns:
pd.DataFrame/None -- 返回的是dataframe,如果出错比如只获取了一天,而当天停牌,返回None
Exception:
如果出现网络问题/服务器拒绝, 会出现socket:time out 尝试再次获取/更换ip即可, 本函数不做处理
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
try:
with api.connect(ip, port, time_out=0.7):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['quarter', 'Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
start_date = str(start_date)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
data = pd.concat([api.to_df(
api.get_security_bars(frequence, _select_market_code(
code), code, (int(lens / 800) - i) * 800, 800)) for i in
range(int(lens / 800) + 1)], axis=0, sort=False)
# 这里的问题是: 如果只取了一天的股票,而当天停牌, 那么就直接返回None了
if len(data) < 1:
return None
data = data[data['open'] != 0]
data = data.assign(
date=data['datetime'].apply(lambda x: str(x[0:10])),
code=str(code),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(str(x)[0:10]))) \
.set_index('date', drop=False, inplace=False)
end_date = str(end_date)[0:10]
data = data.drop(
['year', 'month', 'day', 'hour', 'minute', 'datetime'],
axis=1)[
start_date:end_date]
if if_fq in ['00', 'bfq']:
return data
else:
print('CURRENTLY NOT SUPPORT REALTIME FUQUAN')
return None
# xdxr = QA_fetch_get_stock_xdxr(code)
# if if_fq in ['01','qfq']:
# return QA_data_make_qfq(data,xdxr)
# elif if_fq in ['02','hfq']:
# return QA_data_make_hfq(data,xdxr)
except Exception as e:
if isinstance(e, TypeError):
print('Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题')
print('pip uninstall pytdx')
print('pip install pytdx')
else:
print(e)
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_min(code, start, end, frequence='1min', ip=None,
port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
type_ = ''
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens
if lens > 20800:
lens = 20800
with api.connect(ip, port):
data = pd.concat(
[api.to_df(
api.get_security_bars(
frequence, _select_market_code(
str(code)),
str(code),
(int(lens / 800) - i) * 800, 800)) for i
in range(int(lens / 800) + 1)], axis=0, sort=False)
data = data \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code),
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_).set_index('datetime', drop=False,
inplace=False)[start:end]
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_latest(code, frequence='day', ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
code = [code] if isinstance(code, str) else code
api = TdxHq_API(multithread=True)
if frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif frequence in ['5', '5m', '5min', 'five']:
frequence = 0
elif frequence in ['1', '1m', '1min', 'one']:
frequence = 8
elif frequence in ['15', '15m', '15min', 'fifteen']:
frequence = 1
elif frequence in ['30', '30m', '30min', 'half']:
frequence = 2
elif frequence in ['60', '60m', '60min', '1h']:
frequence = 3
else:
frequence = 9
with api.connect(ip, port):
data = pd.concat([api.to_df(api.get_security_bars(
frequence, _select_market_code(item), item, 0, 1)).assign(
code=item) for item in code], axis=0, sort=False)
return data \
.assign(date=pd.to_datetime(data['datetime']
.apply(lambda x: x[0:10])).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
date_stamp=data['datetime']
.apply(lambda x: QA_util_date_stamp(str(x[0:10])))) \
.set_index('date', drop=False) \
.drop(['year', 'month', 'day', 'hour', 'minute', 'datetime'],
axis=1)
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_realtime(code=['000001', '000002'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_realtime(code=['000001'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_index_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_realtime(code=['010107'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_bond_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
data = data.assign(last_close=data.last_close/10, open=data.open/10, high=data.high/10, low=data.low/10,
price= data.price/10,
ask1=data.ask1/10, ask2=data.ask2/10, ask3=data.ask3/10, ask4=data.ask4/10, ask5=data.ask5/10,
bid1=data.bid1/10, bid2=data.bid2/10, bid3=data.bid3/10, bid4=data.bid4/10, bid5=data.bid5/10)
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_depth_market_data(code=['000001', '000002'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data['datetime'] = datetime.datetime.now()
data = __data
# data = __data[['datetime', 'active1', 'active2', 'last_close', 'code', 'open', 'high', 'low', 'price', 'cur_vol',
# 's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',
# 'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',
# 'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]
return data.set_index(['datetime', 'code'], drop=False, inplace=False)
'''
沪市
010xxx 国债
001×××国债现货;
110×××120×××企业债券;
129×××100×××可转换债券;
201×××国债回购;
310×××国债期货;
500×××550×××基金;
600×××A股;
700×××配股;
710×××转配股;
701×××转配股再配股;
711×××转配股再转配股;
720×××红利;
730×××新股申购;
735×××新基金申购;
737×××新股配售;
900×××B股。
深市
第1位 第二位 第3-6位 含义
0 0 XXXX A股证券
0 3 XXXX A股A2权证
0 7 XXXX A股增发
0 8 XXXX A股A1权证
0 9 XXXX A股转配
1 0 XXXX 国债现货
1 1 XXXX 债券
1 2 XXXX 可转换债券
1 3 XXXX 国债回购
1 7 XXXX 原有投资基金
1 8 XXXX 证券投资基金
2 0 XXXX B股证券
2 7 XXXX B股增发
2 8 XXXX B股权证
3 0 XXXX 创业板证券
3 7 XXXX 创业板增发
3 8 XXXX 创业板权证
3 9 XXXX 综合指数/成份指数
深市A股票买卖的代码是以000打头,如:顺鑫农业:股票代码是000860。
B股买卖的代码是以200打头,如:深中冠B股,代码是200018。
中小板股票代码以002打头,如:东华合创股票代码是002065。
创业板股票代码以300打头,如:探路者股票代码是:300005
更多参见 issue https://github.com/QUANTAXIS/QUANTAXIS/issues/158
@yutiansut
'''
def for_sz(code):
"""深市代码分类
Arguments:
code {[type]} -- [description]
Returns:
[type] -- [description]
"""
if str(code)[0:2] in ['00', '30', '02']:
return 'stock_cn'
elif str(code)[0:2] in ['39']:
return 'index_cn'
elif str(code)[0:2] in ['15']:
return 'etf_cn'
elif str(code)[0:3] in ['101', '104', '105', '106', '107', '108', '109',
'111', '112', '114', '115', '116', '117', '118', '119',
'123', '127', '128',
'131', '139', ]:
# 10xxxx 国债现货
# 11xxxx 债券
# 12xxxx 可转换债券
# 123
# 127
# 12xxxx 国债回购
return 'bond_cn'
elif str(code)[0:2] in ['20']:
return 'stockB_cn'
else:
return 'undefined'
def for_sh(code):
if str(code)[0] == '6':
return 'stock_cn'
elif str(code)[0:3] in ['000', '880']:
return 'index_cn'
elif str(code)[0:2] == '51':
return 'etf_cn'
# 110×××120×××企业债券;
# 129×××100×××可转换债券;
# 113A股对应可转债 132
elif str(code)[0:3] in ['102', '110', '113', '120', '122', '124',
'130', '132', '133', '134', '135', '136',
'140', '141', '143', '144', '147', '148']:
return 'bond_cn'
else:
return 'undefined'
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_list(type_='stock', ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# if type_ in ['stock', 'gp']:
# res = pd.read_csv('http://data.yutiansut.com/stock_code.csv')
# return res.assign(code=res.code.apply(lambda x: QA_util_code_tostr(x)))
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
# data.code = data.code.apply(int)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
if type_ in ['stock', 'gp']:
# res = pd.read_csv('http://data.yutiansut.com/stock_code.csv')
# return res.assign(code=res.code.apply(lambda x: QA_util_code_tostr(x)))
return pd.concat([sz, sh], sort=False).query(
'sec=="stock_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
elif type_ in ['index', 'zs']:
return pd.concat([sz, sh], sort=False).query(
'sec=="index_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
# .assign(szm=data['name'].apply(lambda x: ''.join([y[0] for y in lazy_pinyin(x)])))\
# .assign(quanpin=data['name'].apply(lambda x: ''.join(lazy_pinyin(x))))
elif type_ in ['etf', 'ETF']:
return pd.concat([sz, sh], sort=False).query(
'sec=="etf_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
else:
return data.assign(
code=data['code'].apply(lambda x: str(x))).assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
# .assign(szm=data['name'].apply(lambda x: ''.join([y[0] for y in lazy_pinyin(x)])))\
# .assign(quanpin=data['name'].apply(lambda x: ''.join(lazy_pinyin(x))))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_list(ip=None, port=None):
"""获取指数列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
return pd.concat([sz, sh], sort=False).query(
'sec=="index_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_list(ip=None, port=None):
"""bond
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
# data.code = data.code.apply(int)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
return | pd.concat([sz, sh], sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 13:16:21 2021
@author: freeridingeo
"""
from pathlib import Path
import pandas as pd
import numpy as np
from datetime import datetime as dt, timedelta
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('bmh')
def cum_mean(arr):
cum_sum = np.cumsum(arr, axis=0)
for i in range(cum_sum.shape[0]):
if i == 0:
continue
cum_sum[i] = cum_sum[i] / (i + 1)
return cum_sum
def cum_sum(arr):
cum_sum = np.cumsum(arr, axis=0)
for i in range(cum_sum.shape[0]):
if i == 0:
continue
return cum_sum
class TimeSeries(object):
def __init__(self, path):
if isinstance(path, str):
self.path = Path(path)
elif isinstance(path, Path):
self.path = path
self.ext = self.path.suffix
def read_file(self, separation = "\t"):
if self.ext in [".csv", ".txt"]:
self.data_df = pd.read_csv(self.path, sep = separation)
return self.data_df
def get_column_names(self):
columnames = self.data_df.columns
if not columnames:
len_cols = len(self.data_df)
columnames = [str(col) for col in np.arange(len_cols)]
return columnames
def plot_specific_column(self, columname, timeunit = "month"):
plt.figure(figsize=(16,5))
plt.plot(self.data_df.index, self.data_df, color='tab:blue')
plt.gca().set(xlabel="Time", ylabel=columname)
plt.show()
fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80)
sns.boxplot(x='year', y=columname, data=self.data_df, ax=axes[0])
axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18)
axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18)
plt.show()
fig = plt.figure(figsize=(20,8))
self.data_df.groupby(['Date'])[columname].sum().plot(figsize=(10,6), style='o')
plt.xlabel("Time")
plt.ylabel(columname)
plt.title("Scattered values of "+str(columname))
plt.show()
def prepare_timeseries_yr_m_day_doyr(self):
self.data_df = self.data_df.assign(
date = lambda x: pd.to_datetime(x['date']),
year = lambda x: x['date'].dt.year,
month = lambda x: x['date'].dt.month,
day = lambda x: x['date'].dt.day,
dayofyear = lambda x: x['date'].dt.dayofyear
)
return self.data_df
def extract_monthofyear(self, columname):
self.data_df['Month_Year'] =\
self.data_df.index.map(lambda d: d.strftime('%m-%Y'))
monthly_stats = self.data_df.groupby(by='Month_Year')[columname].\
aggregate([np.mean, np.median, np.std])
monthly_stats.reset_index(inplace=True)
monthly_stats['Year'] = monthly_stats['Month_Year']\
.map(lambda m: | pd.to_datetime(m, format='%m-%Y') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
def setup(year,hist,hist_year,operating_horizon,perfect_foresight):
# year = 0
# hist = 0
# hist_year = 2010
#read generator parameters into DataFrame
df_gen = pd.read_csv('CA_data_file/generators.csv',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('CA_data_file/paths.csv',header=0)
#calendar
df_calendar = pd.read_excel('CA_data_file/calendar.xlsx',header=0)
#list zones
zones = ['PGE_valley', 'PGE_bay', 'SCE', 'SDGE']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zones]
df_load = df_load.loc[year*8760:year*8760+8759]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro_PGE = pd.read_csv('Hydro_setup/CA_dispatchable_PGE.csv',header=0)
df_hydro_SCE = pd.read_csv('Hydro_setup/CA_dispatchable_SCE.csv',header=0)
##time series of wind generation for each zone
df_wind = | pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0) | pandas.read_csv |
#!/opt/miniconda/bin/python
import os
import re
import subprocess
import sys
from time import localtime, strftime
import pandas as pd
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import AlignIO
#import shutil
from Bio.Phylo.TreeConstruction import DistanceCalculator
d2a = {'A': 'A', 'C': 'C', 'G': 'G', 'T': 'T', 'AG': 'R', 'CT': 'Y', 'AC': 'M', 'GT': 'K', 'CG': 'S', 'AT': 'W',
'ACT': 'H', 'CGT': 'B', 'ACG': 'V', 'AGT': 'D', 'ACGT': 'N'}
def make_cons(filein):
"""Take a MSA in fasta format and return a data frame with position, nucleotide, frequencies."""
from warnings import warn
from collections import Counter
msa = AlignIO.read(filein, 'fasta')
m = len(msa) # rows, number of sequences
n = msa.get_alignment_length() # columns, number of positions
cons = []
for j in range(n):
c = Counter(b.upper() for b in msa[:, j])
if c['-'] > 0.5 * m:
continue
#print(c)
if '-' in c:
print('gap in alignment!!!!!')
del c['-'] # removes gap from positions with less than 50% gaps to add the most frequent base at this position
#bases = ''.join(sorted([b for b, counts in c.items() if counts >= 0.25 * m]))
bases = ''.join(sorted(c, key=c.get, reverse=True)) #sorted by frequency, will not work for wobble calling with d2a!
#print(bases)
bases = bases[0] # max frequency base instead of wobbles!
try:
cons.append(d2a[bases])
except KeyError:
warn(str(c))
return ''.join(cons)
def extract_genes(df):
"""Split V/J genes and alleles at the first *.
Homsap IGHV3-7*01 F, or Homsap IGHV3-7*02 F -> Homsap IGHV3-7
"""
vgene = df['V-GENE and allele'].apply(lambda x: "_or_".join(sorted(set(re.findall('IG.V.-[0-9]+', x)))))
jgene = df['J-GENE and allele'].apply(lambda x: "_or_".join(sorted(set(re.findall('IG.J[0-9]+', x)))))
df = df.copy()
df['VGENE'] = vgene
df['JGENE'] = jgene
return df
def extract_barcode(df):
"""First 21 nt of sequence are barcode."""
bc = df['Sequence'].apply(lambda x: str(x)[:21])
df = df.copy()
df['barcode'] = bc
return df
def run_child(cmd):
"""use subrocess.check_output to run an external program with arguments."""
try:
output = subprocess.check_output(cmd, universal_newlines=True, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ee:
sys.exit("Execution of %s failed with returncode %d: %s" % (cmd, ee.returncode, ee.output))
sys.exit(cmd)
output = None
return output
def check_clustering_and_trim(msa_outfile):
"""Check if a cluster includes different sub-groups of antibodies by checking
the following criteria:
- break the cluster into sub-groups if there is a distance above 10%
return a list of file-names
"""
# calculate the distance matrix
aln = AlignIO.read(msa_outfile, "fasta")
calculator = DistanceCalculator('identity')
dm = calculator.get_distance(aln)
# extract the sequential pairwise distance
off_diag = []
for i in range(1, len(dm)):
off_diag.append(dm[i,-2])
greater_idx = []
for i in range(0, len(off_diag)):
if off_diag[i] > 0.1:
greater_idx.append(i)
clusters_lists = []
prev_idx = 0
for idx in greater_idx:
sub_cluster = aln[prev_idx:idx+1, :]
prev_idx = idx+1 # index in distance array is 1 less than index in the alignment list (aln)
clusters_lists.append(sub_cluster)
# Take care of the last piece after the last greater element
if(idx == greater_idx[-1]):
clusters_lists.append(aln[(greater_idx[-1]+1):, :])
# number of pairwise distances is one less than number of aligned sequences
if clusters_lists:
total_seqs = 0
for ls in clusters_lists:
total_seqs += len(ls._records)
assert (len(off_diag) +1) == total_seqs, "number of sequences in sub-clusters %d \
is not equal to number of seqs %d in \
original cluster" %(total_seqs, (len(off_diag) +1))
sub_clusters_file_dict = {}
msa_filename = os.path.splitext(msa_outfile)[0]
for idx , sub_clusters in enumerate(clusters_lists):
sub_msa_file = "%s_%d.fasta" %(msa_filename, idx)
AlignIO.write(sub_clusters, sub_msa_file, "fasta")
sub_clusters_file_dict.update({sub_msa_file:len(sub_clusters)})
return sub_clusters_file_dict
def main():
folder_name = sys.argv[1]
#folder_name = '/data/AbX/experiments/161006/IMGT_download/AK170_1_S1_IgG1_ECBC_panda_a'
chain = sys.argv[2]
if chain not in ['HC', 'LC']:
sys.exit('Usage: %s folder_path [HC|LC]' % sys.argv[0])
summary_file = os.path.join(folder_name, '1_Summary.txt')
nt_file = os.path.join(folder_name, '3_Nt-sequences.txt')
sel_cols = ['Sequence ID', 'V-GENE and allele', 'J-GENE and allele', 'Sequence',
'CDR3-IMGT length']
now = strftime("%Y-%m-%d %H:%M:%S", localtime())
print('%s reading summary file' % now, file=sys.stderr)
sys.stderr.flush()
imgt_ann = | pd.read_csv(filepath_or_buffer=summary_file, delimiter='\t', header=0, low_memory=True) | pandas.read_csv |
import pandas as pd
from pathlib import Path
from datetime import datetime
import os
import numpy as np
project_id = "learning-agendas"
# Download data and save to a CSV. If data needs to be refreshed the CSV needs to be deleted.
filename = "./data/raw/test_attendance_data.csv"
table_name = "gathered_test_attendance_at_data"
if os.path.exists(filename):
# Read from CSV if already written to avoid repeatedly transferring data from BigQuery
df = pd.read_csv(filename)
df = df.drop("Unnamed: 0", axis=1)
else:
query = "SELECT * FROM `learning-agendas.math_tutoring_2020." + table_name + "`"
df = pd.read_gbq(query, project_id=project_id)
df.to_csv(filename)
sat_to_act_math = pd.read_csv("./data/raw/sat_to_act_math.csv")
sat_to_act_english_reading = pd.read_csv("./data/raw/sat_to_act_english_reading.csv")
def convert_sat_to_act(sat_score, conversion_file, act_score):
try:
conversion_score = (
conversion_file[sat_score >= conversion_file.SAT]
.reset_index()
.iloc[0]["ACT"]
)
if pd.isna(act_score):
return conversion_score
elif conversion_score >= act_score:
return conversion_score
else:
return act_score
except:
return act_score
df["max_converted_math"] = df.apply(
lambda x: convert_sat_to_act(x["max_sat_math"], sat_to_act_math, x["max_act_math"]),
axis=1,
)
df["max_converted_english_reading"] = df.apply(
lambda x: convert_sat_to_act(
x["max_sat_english"], sat_to_act_english_reading, x["max_act_english_reading"]
),
axis=1,
)
df["max_converted_english"] = df.apply(
lambda x: convert_sat_to_act(
x["max_sat_english"], sat_to_act_math, x["max_act_english"]
),
axis=1,
)
df["max_converted_reading"] = df.apply(
lambda x: convert_sat_to_act(
x["max_sat_english"], sat_to_act_math, x["max_act_reading"]
),
axis=1,
)
df.loc[pd.isna(df.mod_duration), "mod_duration_filled"] = (
df.loc[pd.isna(df.mod_duration), "Attendance_Numerator"] * 60
)
df.loc[~ | pd.isna(df.mod_duration) | pandas.isna |
"""
"""
import sys
import argparse
import os
import time
import collections
import re
from six.moves import StringIO
import pandas
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
import shellinford
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"peptides",
metavar="FILE.csv",
help="CSV of mass spec hits")
parser.add_argument(
"reference_csv",
metavar="FILE.csv",
help="CSV of protein sequences")
parser.add_argument(
"reference_index",
metavar="FILE.fm",
help="shellinford index over protein sequences")
parser.add_argument(
"--out",
metavar="OUT.csv",
help="Out file path")
parser.add_argument(
"--flanking-length",
metavar="N",
type=int,
default=15,
help="Length of flanking sequence to include")
parser.add_argument(
"--debug-max-rows",
metavar="N",
type=int,
default=None,
help="Max rows to process. Useful for debugging. If specified an ipdb "
"debugging session is also opened at the end of the script")
def run():
args = parser.parse_args(sys.argv[1:])
df = | pandas.read_csv(args.peptides) | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[ | Timestamp("20130101") | pandas.Timestamp |
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
tm.assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = pd.Series(data_ns).to_frame()
result["new"] = data_ns
expected = pd.DataFrame(
{0: [1, None], "new": [1, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = pd.DataFrame(
{0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range("1/1/2000", "1/1/2001")
pr = period_range("1/1/2000", "1/1/2001")
df = DataFrame(np.random.randn(len(dr), K), index=dr)
df["mix"] = "a"
pts = df.to_period()
exp = df.copy()
exp.index = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M")
tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M", axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq("M"))
msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>"
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize(self, fn):
l0 = date_range("20140701", periods=5, freq="D")
l1 = date_range("20140701", periods=5, freq="D")
int_idx = Index(range(5))
if fn == "tz_convert":
l0 = l0.tz_localize("UTC")
l1 = l1.tz_localize("UTC")
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)("US/Pacific")
l1_expected = getattr(idx, fn)("US/Pacific")
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)("US/Pacific")
tm.assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)("US/Pacific", level=0)
assert not df3.index.levels[0].equals(l0)
tm.assert_index_equal(df3.index.levels[0], l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)("US/Pacific", level=1)
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
| tm.assert_index_equal(df3.index.levels[1], l1_expected) | pandas.util.testing.assert_index_equal |
"""Performs growth and exchange analysis for several models."""
from cobra.util.solver import interface_to_str, OptimizationError
from micom import load_pickle
from micom.annotation import annotate_metabolites_from_exchanges
from micom.logger import logger
from micom.media import minimal_medium
from micom.workflows.core import workflow, GrowthResults
from micom.workflows.media import process_medium
from os import path
import pandas as pd
DIRECTION = | pd.Series(["import", "export"], index=[0, 1]) | pandas.Series |
import pickle
import math, re, os, datetime
from os import path
from tqdm import tqdm
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import tensorflow as tf
import tensorflow.keras as keras
from keras_preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras import mixed_precision
import numpy as np
from tensorflow.keras.models import Model
import efficientnet.tfkeras as efn
# Seed value
# Apparently you may use different seed values at each stage
seed_value = 12234
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED'] = str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.random.set_seed(seed_value)
import pandas as pd
import shutil
import pathlib
import leave_data as ld
import leave_plot as lp
import leave_mixup as lm
import cosine_lr as clr
# mixed_precision.set_global_policy('mixed_float16')
EPOCHS = 30
BATCH_SIZE = 16
IMG_SIZE = (512, 512)
# BASE_FOLDER = '/kaggle/input/cassava-leaf-disease-classification/'
# WORKING_FOLDER = '/kaggle/working/'
BASE_FOLDER = './data/'
WORKING_FOLDER = './'
CSV_LOCATION = f'{BASE_FOLDER}merged_data.csv'
TRAINING_IMAGES_INPUT = f'{BASE_FOLDER}train_images/'
TEST_IMAGES_INPUT = f'{BASE_FOLDER}test_images/'
SUBMISSION_FILE = f'{WORKING_FOLDER}submission.csv'
def create_combined_model():
pre_trained_efn = efn.EfficientNetB4(input_shape=(*IMG_SIZE, 3),
include_top=False,
weights='noisy-student')
# freeze the batch normalisation layers
for layer in reversed(pre_trained_efn.layers):
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
else:
layer.trainable = True
x = pre_trained_efn.output
x = layers.Dropout(0.25)(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.25)(x)
prediction1 = layers.Dense(5, activation='softmax')(x)
model_efn = Model(inputs=pre_trained_efn.input, outputs=prediction1)
pre_trained_resnet = tf.keras.applications.ResNet50V2(input_shape=(*IMG_SIZE, 3),
include_top=False,
weights='imagenet')
# freeze the batch normalisation layers
for layer in reversed(pre_trained_resnet.layers):
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
else:
layer.trainable = True
x = pre_trained_resnet.output
x = layers.Dropout(0.25)(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.25)(x)
prediction2 = layers.Dense(5, activation='softmax')(x)
model_res = Model(inputs=pre_trained_resnet.input, outputs=prediction2)
merged = layers.Concatenate([model_efn.output, model_res.output])
merged = layers.Flatten()(merged)
merged = layers.Dropout(0.5)(merged)
merged = layers.Dense(1024, activation='relu')(merged)
merged = layers.Dense(5, activation='softmax')(merged)
optimizer = tf.keras.optimizers.Adam()
loss = tf.keras.losses.CategoricalCrossentropy()
model_fusion = Model([model_efn.input, model_res.input], merged)
model_fusion.compile(optimizer=optimizer, loss=loss, metrics='accuracy')
print(model_fusion.summary())
return model_fusion
def create_callbacks(log_dir):
# early_stopping = EarlyStopping(patience=4, monitor='val_loss', verbose=1)
lr_schedule = LearningRateScheduler(clr.lrfn, verbose=1)
model_name = f'./output/models/best-model.hdf5'
model_checkpoint = ModelCheckpoint(monitor='val_loss',
filepath=model_name,
save_best_only=True,
verbose=1,
pooling='average')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
callbacks = [
lr_schedule,
model_checkpoint,
tensorboard_callback,
]
return callbacks
def train_model_naive_split():
inp_train_gen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=120,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.2,
zoom_range=0.4,
brightness_range=[0.2, 1.0],
horizontal_flip=True,
vertical_flip=True,
validation_split=0.2,
fill_mode='reflect',
)
train_data = pd.read_csv('./data/merged_data.csv')
# 9500
to_remove = np.random.choice(train_data[train_data['label'] == 3].index, size=9500, replace=False)
train_data = train_data.drop(to_remove)
train_data['label'] = train_data['label'].astype(str)
Y = train_data[['label']]
train_iterator = inp_train_gen.flow_from_dataframe(train_data,
x_col='image_id',
y_col='label',
directory='./data/train_images/',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='training',
shuffle=True)
validation_iterator = inp_train_gen.flow_from_dataframe(train_data,
x_col='image_id',
y_col='label',
directory='./data/train_images/',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='validation',
shuffle=True)
model = create_combined_model()
log_dir = "./output/logs/fit/" + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
history = model.fit(train_iterator,
validation_data=validation_iterator,
epochs=EPOCHS,
callbacks=create_callbacks(log_dir))
return history
def train_model_naive_split_mixup():
inp_train_gen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=260,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect',
validation_split=0.25
)
train_data = pd.read_csv('./data/merged_data.csv')
train_data['label'] = train_data['label'].astype(str)
Y = train_data[['label']]
train_iterator = lm.MixupImageDataGenerator(
generator=inp_train_gen,
directory='./data/train_images/',
img_width=IMG_SIZE[0],
img_height=IMG_SIZE[1],
batch_size=BATCH_SIZE,
subset='training'
)
validation_iterator = inp_train_gen.flow_from_dataframe(train_data,
x_col='image_id',
y_col='label',
directory='./data/train_images/',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
color_mode='rgb',
subset='validation',
shuffle=True)
model = create_cnn_model()
log_dir = "./output/logs/fit/" + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
history = model.fit(train_iterator,
validation_data=validation_iterator,
epochs=EPOCHS,
callbacks=create_callbacks(log_dir))
return history
def load_and_predict(model):
test_generator = ImageDataGenerator(rescale=1. / 255,
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
ids = []
tta_predictions = []
for i in tqdm(range(10)):
test_iterator = test_generator.flow_from_directory(
'./test/',
target_size=IMG_SIZE,
shuffle=False,
class_mode='categorical',
batch_size=1)
if i == 1:
for filename in test_iterator.filenames:
print(filename)
ids.append(filename.split('/')[1])
predict_result = model.predict(test_iterator, steps=len(test_iterator.filenames))
tta_predictions.append(predict_result)
result = []
predictions = np.mean(tta_predictions, axis=0)
for index, prediction in enumerate(predictions):
classes = np.argmax(prediction)
result.append([ids[index], classes])
result.sort()
return result
def store_prediction():
model = keras.models.load_model('./output/models/best-model.hdf5', compile=True)
pathlib.Path(f'./test/1/').mkdir(parents=True, exist_ok=True)
test_images = os.listdir(TEST_IMAGES_INPUT)
ld.copy_test_images(test_images, TEST_IMAGES_INPUT)
predictions = load_and_predict(model)
# clean temp files
if os.path.exists("./train"):
shutil.rmtree('./train')
if os.path.exists("./test"):
shutil.rmtree('./test')
df = | pd.DataFrame(data=predictions, columns=['image_id', 'label']) | pandas.DataFrame |
__author__ = "<NAME>"
__project__ = "gme.estimate"
__created__ = "05-24-2018"
__edited__ = "<NAME>"
import pandas as pd
import numpy as np
from typing import List
from typing import Union
import statsmodels.api as sm
import time as time
import traceback
#-----------------------------------------------------------------------------------------#
# This file contains the underlying functions for the .estimate method in EstimationModel #
#-----------------------------------------------------------------------------------------#
# -----------
# Main function: Sequences the other routines
# -----------
def _estimate_ppml(data_frame,
meta_data,
specification,
fixed_effects: List[Union[str,List[str]]] = [],
drop_fixed_effect: List[Union[str,List[str]]] = []):
'''
Performs sector by sector GLM estimation with PPML diagnostics
Args:
data_frame: (Pandas.DataFrame) A dataframe containing data for estimation
meta_data: (obj) a MetaData object from gme.EstimationData
specification: (obj) a Specification object from gme.EstimationModel
fixed_effects: (List[Union[str,List[str]]]) A list of variables to construct fixed effects based on.
Can accept single string entries, which create fixed effects corresponding to that variable or lists of
strings that create fixed effects corresponding to the interaction of the list items. For example,
fixed_effects = ['importer',['exporter','year']] would create a set of importer fixed effects and a set of
exporter-year fixed effects.
drop_fixed_effect: (List[Union[str,List[str]]]) The fixed effect category from which to drop a fixed effect.
The entry should be a subset of the list supplied for fixed_effects. In each case, the last fixed effect is
dropped.
Returns: (Dict[GLM.fit], Pandas.DataFrame, Dict[DataFrame])
1. Dictionary of statsmodels.GLM.fit objects with sectors as the keys.
2. Dataframe with diagnostic information by sector
3. Dictionary of estimation DataFrames + predicted trade values with sectors as the keys.
'''
post_diagnostics_data_frame_dict = {}
results_dict = {}
diagnostics_log = pd.DataFrame([])
start_time = time.time()
print('Estimation began at ' + time.strftime('%I:%M %p on %b %d, %Y'))
if not specification.sector_by_sector:
data_frame = data_frame.reset_index(drop=True)
fixed_effects_df = _generate_fixed_effects(data_frame, fixed_effects, drop_fixed_effect)
estimating_data_frame = pd.concat(
[data_frame[specification.lhs_var], data_frame[specification.rhs_var], fixed_effects_df], axis=1)
model_fit, post_diagnostics_data_frame, diagnostics_log = _regress_ppml(estimating_data_frame, specification)
results_dict['all'] = model_fit
end_time = time.time()
diagnostics_log.at['Completion Time'] = str(round((end_time - start_time)/60,2)) + ' minutes'
post_diagnostics_data_frame_dict['all'] = post_diagnostics_data_frame
else:
sector_groups = data_frame.groupby(meta_data.sector_var_name)
sector_list = _sectors(data_frame, meta_data)
iteration_count = 1
for sector in sector_list:
sector_start_time = time.time()
print('Sector ' + str(sector) + ' began at ' + time.strftime('%I:%M %p on %b %d, %Y'))
sector_data_frame = sector_groups.get_group(sector)
sector_data_frame = sector_data_frame.reset_index(drop=True)
# Create fixed effects
fixed_effects_df = _generate_fixed_effects(sector_data_frame, fixed_effects, drop_fixed_effect)
# Dataframe for estimations
sector_data_frame = pd.concat(
[sector_data_frame[specification.lhs_var], sector_data_frame[specification.rhs_var], fixed_effects_df],
axis=1)
model_fit, post_diagnostics_data_frame, diagnostics_output = _regress_ppml(sector_data_frame, specification)
# Timing reports
sector_end_time = time.time()
diagnostics_output.at['Sector Completion Time'] = (str(round((sector_end_time - sector_start_time) / 60, 2))
+ ' minutes')
if iteration_count > 1:
average_time = ((time.time() - start_time) / 60) / iteration_count
completion_time = (len(sector_list) - iteration_count) * average_time
print("Average iteration time: " + str(average_time) + " minutes")
print("Expected time to completion: " + str(completion_time) + " minutes ("+ str(completion_time/60)
+ " hours)\n")
# Store results
post_diagnostics_data_frame_dict[str(sector)] = post_diagnostics_data_frame
results_dict[str(sector)] = model_fit
diagnostics_log = pd.concat([diagnostics_log, diagnostics_output.rename(str(sector))], axis=1)
iteration_count+=1
print("Estimation completed at " + time.strftime('%I:%M %p on %b %d, %Y'))
return results_dict, diagnostics_log, post_diagnostics_data_frame_dict
# --------------
# Prep for Estimation Functions
# --------------
def _generate_fixed_effects(data_frame,
fixed_effects: List[Union[str,List[str]]] = [],
drop_fixed_effect: List[Union[str,List[str]]] = []):
'''
Create fixed effects for single and interacted categorical variables.
Args:
data_frame: Pandas.DataFrame
A DataFrame containing data for estimation.
fixed_effects: List[Union[str,List[str]]]
A list of variables to construct fixed effects based on.
Can accept single string entries, which create fixed effects corresponding to that variable or lists of
strings, which create fixed effects corresponding to the interaction of the list items. For example,
fixed_effects = ['importer',['exporter','year']] would create a set of importer fixed effects and a set of
exporter-year fixed effects.
drop_fixed_effect: (optional) List[Union[str,List[str]]]
The fixed effect category from which to drop a fixed effect.
The entry should be a subset of the list supplied for fixed_effects. In each case, the last fixed effect
is dropped.
Returns: Pandas.DataFrame
A DataFrame of fixed effects to be concatenated with the estimating DataFrame
'''
fixed_effect_data_frame = pd.DataFrame([])
# Get list for separate and combine fixed effect
combined_fixed_effects = []
separate_fixed_effects = []
for item in fixed_effects:
if type(item) is list:
combined_fixed_effects.append(item)
else:
separate_fixed_effects.append(item)
# Construct simple fixed effects
for category in separate_fixed_effects:
name = category + '_fe'
temp_fe = pd.get_dummies(data_frame[category], prefix=name)
if category in drop_fixed_effect:
temp_fe.drop(temp_fe.columns[[temp_fe.shape[1] - 1]], axis=1, inplace=True)
fixed_effect_data_frame = pd.concat((fixed_effect_data_frame, temp_fe), axis=1)
# Construct multiple fixed effects
for item in combined_fixed_effects:
if len(item) < 1:
raise ValueError('A fixed_effects list element cannot be an empty list [].')
if len(item) == 1:
name = '_'.join(item) + '_fe'
temp_fe = | pd.get_dummies(data_frame[item[0]], prefix=name) | pandas.get_dummies |
"""
Convolutional Neural Network for kaggle facial keypoints detection contest.
Note: Only the labels contain missing values in all of the data.
"""
# THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python CNN.py
from __future__ import division, print_function
import time
import theano
import lasagne
import logging
import numpy as np
import pandas as pd
import theano.tensor as T
from sklearn.cross_validation import train_test_split
def set_verbosity(verbose_level=3):
"""Set the level of verbosity of the Preprocessing."""
if not type(verbose_level) == int:
raise TypeError("verbose_level must be an int")
if verbose_level < 0 or verbose_level > 4:
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def imputate(frame):
"""Deal with missing values in a DataFrame."""
start_time = time.time()
frame[frame.isnull().any(axis=1)].to_csv("train_incomplete.csv",
index=False)
frame.dropna(inplace=True)
time_diff = time.time() - start_time
logging.info("Imputation completed in " + str(time_diff) + " seconds")
def parse_data(train_file="training.csv", test_file="test.csv"):
"""
Parse training and test data;
split Image and labels and convert Image column to DataFrame.
"""
start_time = time.time()
train = pd.read_csv(train_file)
imputate(train)
test = pd.read_csv(test_file)
# Get y_train then scale between [-1, 1]
y_train = train.ix[:, :-1]
y_max = np.max(np.array(y_train))
y_min = np.min(np.array(y_train))
y_train = (2 * (y_train - y_min) / (y_max - y_min)) - 1
# Convert Image column in train into DataFrame
pixel_columns = ["pixel" + str(i + 1) for i in range(96 * 96)]
parsed_train_images = []
for image in list(train["Image"]):
parsed_train_images.append(map(lambda x: int(x) / 255, image.split()))
X_train = | pd.DataFrame(parsed_train_images, columns=pixel_columns) | pandas.DataFrame |
#!/usr/bin/env python3.6
#
# Created Date: 11/09/2018
# Modified Date: 12/09/2018
#
# Implements the Early Warning Alert Algorithm of Heatwave Crisis Classification module
# based on the forecasting weather data from FMI. It calculates the Disconfort Index.
# Also, it calculates the Heatwave Overall Crisis Level (HOCL).
#
#----------------------------------------------------------------------------------------------------------
# Inputs: a) FMI weather forecasts from a region defined by a list of points in the
# Thessaloniki region
#
# Outputs: TOP104_METRIC_REPORT which contains the ....
#
# Early Warning Alert Algorithm from Crisis Classification (based on FMI data)
#----------------------------------------------------------------------------------------------------------
#
from bus.bus_producer import BusProducer
import json, time, re
import os, errno
from pathlib import Path
import pandas as pd
from pandas import read_csv, DataFrame, concat, ExcelWriter, read_excel
from datetime import datetime, timedelta
from collections import OrderedDict
import urllib.request
import xmltodict
import numpy as np
from CRCL.HeatwaveCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report
from CRCL.HeatwaveCRisisCLassification.topic104HeatWave import topic104HeatWave
from CRCL.HeatwaveCRisisCLassification.Auxiliary_functions import Heatwave_Disconfort_Index, Heatwave_Overall_Crisis_Level
from CRCL.HeatwaveCRisisCLassification.Create_Queries_HeatwavePilot import extract_forecasts_latlng
from CRCL.HeatwaveCRisisCLassification.parse_XML_dict import parse_XML_to_dict_querylist
def CrisisClassificationHeatwave_PreEmerg():
ver = 'Ver6_2nd_Period'
# Create a directory to store the output files and TOPICS
# Create a path
current_dirs_parent = os.getcwd()
root_path = current_dirs_parent + "/" + "CRCL/HeatwaveCRisisCLassification" + "/"
#root_path = Path.cwd()
now = datetime.now()
directory = root_path + "TOPICS" + "_" + ver + "_" + str(now.year) + "_" + str(now.month) + "_" + str(now.day)
os.makedirs(directory, exist_ok=True)
# Start Timing Step 1
start_step1 = time.time()
# Store the time steps
time_duration_step = []
#-----------------------------------------------------------------------------------
# STEP 1: Fetch data from the FMI OpenData Web Feature Service (WFS)
#
# Query to FMI to extract data from a set of points in Thessaloniki region
#
# point 1 Euosmos {'lat':40.664281, 'long':22.898388},
# point 2 Pl. Aristotelous {'lat': 40.632903, 'long': 22.940400}
# point 3 Faliro {'lat':40.619788, 'long':22.957993}
# point 4 Konstantinopolitika {'lat':40.611343, 'long':22.992180}
# point 5 Thermi - Xortiatis {'lat':40.581375, 'long':23.097996}
# point 6 Aerodromio {'lat':40.514354, 'long':22.986642}
#
fmi_addr = 'http://data.fmi.fi/fmi-apikey/'
my_api_key = '<KEY>'
data_format = 'multipointcoverage' #'timevaluepair'
points = [ {'name':'Euosmos', 'lat':40.664281, 'long':22.898388},
{'name':'Aristotelous Sq.', 'lat':40.632903, 'long':22.940400},
{'name':'Faliro', 'lat':40.619788, 'long':22.957993},
{'name':'Konstantinopolitika', 'lat':40.611343, 'long':22.992180},
{'name':'Thermi-Xortiatis', 'lat':40.581375, 'long':23.097996},
{'name':'Airport','lat':40.514354, 'long':22.986642}
]
parameters = ["Temperature", "Humidity"]
time_interval = 54
# Call function to create query list
qrlist = extract_forecasts_latlng(fmi_addr, my_api_key, data_format, parameters, points, time_interval)
# End Timing Step 1
end_step1 = time.time()
time_duration_step.append( end_step1 - start_step1 )
#---------------------------------------------------------------------------------------
# STEP 2:
# Call function to parse XLM and extract the data
# Start Timing Step 2
start_step2 = time.time()
#res = parse_XML_to_dict_querylist(qrlist, points, directory)
# Store data frame to xlsx file
# dfxls = pd.ExcelWriter(directory + "/" + "DataFrame_Results.xlsx")
# res.to_excel(dfxls,'Sheet1', index=False)
# dfxls.save()
#PATCH for review, after review use STEP 2
#*************************** read an allready xlsx for standard values patch ********************
xls_name = root_path + 'DataFrame_FAKE.xlsx'
result = pd.read_excel(xls_name)
dt = result['DateTime']
newdt = []
start_dt = datetime.strptime(dt[0], "%Y-%m-%dT%H:%M:%S")
diff_days = datetime.now().day - start_dt.day
for i in range(len(dt)):
temp_dt = datetime.strptime(dt[i], "%Y-%m-%dT%H:%M:%S")
newdt.append((temp_dt + timedelta(diff_days)).isoformat())
result = result.drop('DateTime', 1)
result['DateTime'] = newdt
# Store data frame to xlsx file
dfxls = pd.ExcelWriter(directory + "/" + "DataFrame_FAKE_Results.xlsx")
result.to_excel(dfxls,'Sheet1', index=False)
dfxls.save()
#******************************************************************************************
# End Timing Step 2
end_step2 = time.time()
time_duration_step.append( end_step2 - start_step2 )
#----------------------------------------------------------------------------------------------
# STEP 3: Calculates the Disconfort Index (DI) for heatwave based on Temperature and Humidity
#----------------------------------------------------------------------------------------------
#
# Start Timing Step 3
start_step3 = time.time()
# constant threshold for temperature variable (from papers ~ 14.5)
InitTemp = 25 #14.5
model_param = 0.55
#*************************************************************
#For PATCH
DSDI = Heatwave_Disconfort_Index(result, InitTemp, model_param)
#******************************************************************
# For Step 2
#DSDI = Heatwave_Disconfort_Index(res, InitTemp, model_param)
dfxls = pd.ExcelWriter(directory + "/" + "DSDI_Results.xlsx")
DSDI.to_excel(dfxls,'Sheet1', index=False)
dfxls.save()
#--------------------------------------------------------------------------------------------
# STEP 3.1: Calculate the Heatwave Overall Crisis Level per day over the 6 points
#--------------------------------------------------------------------------------------------
#
unMeasNum = DSDI['Measurement_Number'].unique()
HOCL = []
# Center of the points of interest
center_points = []
N = float(len(points))
avglat = 0
avgln = 0
for p in points:
avglat = avglat + p['lat']
avgln = avgln + p['long']
center_points = [ round(avglat/N,5), round(avgln/N,5) ]
#print(center_points)
# list of days
days = []
unique_dates = list(DSDI['DateTime'].unique())
for i in range(len(unique_dates)):
days.append(unique_dates[i].split('T')[0].split('-')[2])
days = list(set(days))
#print(days)
# Instead the above code:
# datetime.strptime(df['DataTime'].iloc[0], "%Y-%m-%dT%H:%M:%S")
for d in range(len(days)):
# Split DSDI per day
ds = | DataFrame() | pandas.DataFrame |
# coding: utf-8
ipy = False
try:
from IPython import get_ipython
ipython = get_ipython()
if ipython:
ipy = True
except:
pass
if not ipy:
print(f"iPy: {ipy}")
import warnings
# https://stackoverflow.com/questions/40845304/runtimewarning-numpy-dtype-size-changed-may-indicate-binary-incompatibility
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import os
import sys
import datetime
import numbers
import subprocess
import uuid
import string
import json
import requests
from io import StringIO
import re
import math
import types
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn import linear_model
import plotly.offline as py
import plotly.graph_objs as go
import plotly.graph_objs.layout as gol
if ipy:
py.init_notebook_mode()
from pathlib import Path
from bs4 import BeautifulSoup
from pyfinance import ols
import statsmodels.api as sm
from bidi import algorithm as bidialg
import matplotlib
import matplotlib.pyplot as plt
if ipy:
get_ipython().run_line_magic('matplotlib', 'inline')
#matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # Make plots bigger
plt.rcParams['figure.figsize'] = [12.0, 8.0]
# In[ ]:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.float_format', lambda x: '{:,.4f}'.format(x))
pd.set_option('display.max_rows', 30)
pd.set_option('display.max_columns', 30)
from IPython.display import clear_output, display
from framework.utils import *
from framework.base import *
from framework.pca import *
from framework.meta_data import *
from framework.stats_basic import *
from framework.stats import *
from framework.draw_downs import *
from framework.RpySeries import *
from framework.asset_classes import *
from framework.zscores_table import *
from framework.yields import *
from framework.data_sources_special import *
import framework.meta_data_dfs as meta_dfs
import framework.conf as conf
import framework.cefs as cefs
import framework.etfs as etfs
import framework.etfs_high_yield as etfs_high_yield
# In[ ]:
def pd_from_dict(d):
return pd.DataFrame.from_dict(d, orient='index').T.sort_index()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
import scipy.optimize
from datetime import datetime as dt
def xnpv(rate, values, dates):
'''Equivalent of Excel's XNPV function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xnpv(0.1, values, dates)
-966.4345...
'''
if rate <= -1.0:
return float('inf')
d0 = dates[0] # or min(dates)
return sum([ vi / (1.0 + rate)**((di - d0).days / 365.0) for vi, di in zip(values, dates)])
def xirr(values, dates):
'''Equivalent of Excel's XIRR function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xirr(values, dates)
0.0100612...
'''
# we prefer to try brentq first as newton keeps outputting tolerance warnings
try:
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates), -1.0, 1e10)
#return scipy.optimize.newton(lambda r: xnpv(r, values, dates), 0.0, tol=0.0002)
except RuntimeError: # Failed to converge?
return scipy.optimize.newton(lambda r: xnpv(r, values, dates), 0.0, tol=0.0002)
#return scipy.optimize.brentq(lambda r: xnpv(r, values, dates), -1.0, 1e10)
#xirr([-100, 100, 200], [dt(2000, 1, 1), dt(2001, 1, 1), dt(2002, 1, 1)])
# In[ ]:
def curr_price(symbol):
if symbol in conf.ignoredAssets: return 0
return get(symbol)[-1]
#def getForex(fromCur, toCur):
# if fromCur == toCur: return 1
# if toCur == "USD":
# return get(fromCur + "=X", "Y")
# if fromCur == "USD":
# return get(toCur + "=X", "Y").map(lambda x: 1.0/x)
# In[ ]:
# In[ ]:
# conf_cache_fails = False
# conf_cache_memory = False
# conf_cache_disk = False
# conf = GetConf(splitAdj=True, divAdj=True, cache=True, mode="TR", source="TASE", secondary=None)
# ds = TASEDataSource("TASE")
# df = ds.get(Symbol("01116441"), conf)
# df = ds.get(Symbol("05117478"), conf)
# df = ds.get(Symbol("137"), conf)
# df
# In[ ]:
# fetching data
if not "Wrapper" in locals():
class Wrapper(object):
def __init__(self, s):
#self.s = s
object.__setattr__(self, "s", s)
def __getattr__(self, name):
attr = self.s.__getattribute__(name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
result = attr(*args, **kwargs)
if type(result) is pd.Series:
result = Wrapper(result)
return result
return newfunc
if type(attr) is pd.Series:
attr = Wrapper(attr)
return attr
def __setattr__(self, name, value):
self.s.__setattr__(name, value)
def __getitem__(self, item):
return wrap(self.s.__getitem__(item), self.s.name)
# def __truediv__(self, other):
# divisor = other
# if type(other) is Wrapper:
# divisor = other.s
# series = self.s / divisor
# name = self.name
# if type(other) is Wrapper:
# name = self.s.name + " / " + other.s.name
# return wrap(series, name)
def __truediv__(self, other):
return Wrapper.doop(self, other, "/", lambda x, y: x / y)
def __rtruediv__(self, other):
return Wrapper.doop(self, other, "/", lambda x, y: x / y, right=True)
def doop(self, other, opname, opLambda, right=False):
divisor = other
if type(other) is Wrapper:
divisor = other.s
if right:
series = opLambda(divisor, self.s)
else:
series = opLambda(self.s, divisor)
name = self.name
if type(other) is Wrapper:
if right:
name = other.s.name + " " + opname + " " + self.s.name
else:
name = self.s.name + " " + opname + " " + other.s.name
return wrap(series, name)
def __sub__(self, other):
return Wrapper.doop(self, other, "-", lambda x, y: x - y)
#def __rsub__(self, other):
# return Wrapper.doop(self, other, "-", lambda x, y: x - y, right=True)
def __mul__(self, other):
return Wrapper.doop(self, other, "*", lambda x, y: x * y)
def __rmul__(self, other):
return Wrapper.doop(self, other, "*", lambda x, y: x * y, right=True)
# error in ('raise', 'ignore'), ignore will return None
# plotting
from plotly.graph_objs import *
def createVerticalLine(xval):
shape = {
'type': 'line',
#'xref': 'x',
'x0': xval,
'x1': xval,
'yref': 'paper',
'y0': 0,
'y1': 1,
#'fillcolor': 'blue',
'opacity': 1,
'line': {
'width': 1,
'color': 'red'
}
}
return shape
def createHorizontalLine(yval):
shape = {
'type': 'line',
'xref': 'paper',
'x0': 0,
'x1': 1,
#'yref': 'x',
'y0': yval,
'y1': yval,
#'fillcolor': 'blue',
'opacity': 1,
'line': {
'width': 1,
'color': 'red'
}
}
return shape
def plot(*arr, log=True, title=None, legend=True, lines=True, markers=False, annotations=False, xlabel=None, ylabel=None, show_zero_point=False, same_ratio=False):
data = []
shapes = []
mode = ''
if lines and markers:
mode = 'lines+markers'
elif lines:
mode = 'lines'
elif markers:
mode = 'markers'
if annotations:
mode += '+text'
hlines = []
min_date = None
for val in arr:
# series
if is_series(val):
val = unwrap(val)
name = get_pretty_name(val.name)
text = name
try:
text = lmap(get_pretty_name ,val.names)
except:
pass
# we add .to_pydatetime() since in Windows (and Chrome mobile iOS?) we get a numeric axis instead of date axis without this
x = val.index
if isinstance(x, pd.DatetimeIndex):
x = x.to_pydatetime()
data.append(go.Scatter(x=x, y=val, name=name, text=text, mode=mode, textposition='middle right', connectgaps=False))
start_date = s_start(val)
if start_date:
if min_date is None:
min_date = start_date
elif start_date:
min_date = min(min_date, start_date)
# vertical date line
elif isinstance(val, datetime.datetime):
shapes.append(createVerticalLine(val))
# vertical date line
elif isinstance(val, np.datetime64):
shapes.append(createVerticalLine(val.astype(datetime.datetime)))
# horizontal value line
elif isinstance(val, numbers.Real):
shapes.append(createHorizontalLine(val))
if val == 0:
log = False
elif is_named_number(val):
hlines.append(val)
else:
raise Exception("unsupported value type: " + str(type(val)))
for val, txt in hlines:
shapes.append(createHorizontalLine(val))
data.append(go.Scatter(x=[min_date], y=[val], text=txt, mode='text', textposition='top right', showlegend=False))
if val == 0:
log = False
for d in data:
d = d.y
if isinstance(d, tuple): # for named numbers
continue
if d is None:
continue
if isinstance(d, np.ndarray):
d = d[~pd.isnull(d)]
if np.any(d <= 0):
log = False
mar = 30
margin=gol.Margin(
l=mar,
r=mar,
b=mar,
t=mar,
pad=0
)
#bgcolor='#FFFFFFBB',bordercolor='#888888',borderwidth=1,
if legend:
legendArgs=dict(x=0,y=-0.06,traceorder='normal', orientation='h', yanchor='top',
bgcolor='rgb(255,255,255,50)',bordercolor='#888888',borderwidth=1,
font=dict(family='sans-serif',size=12,color='#000'),
)
else:
legendArgs = {}
yaxisScale = "log" if log else None
rangemode = "tozero" if show_zero_point else "normal"
yaxis = dict(rangemode=rangemode, type=yaxisScale, autorange=True, title=ylabel)
if same_ratio:
yaxis['scaleanchor'] = 'x'
yaxis['scaleratio'] = 1
layout = go.Layout(legend=legendArgs,
showlegend=legend,
margin=margin,
yaxis=yaxis, # titlefont=dict(size=18)
xaxis=dict(rangemode=rangemode, title=xlabel), # titlefont=dict(size=18)
shapes=shapes,
title=title,
hovermode = 'closest')
fig = go.Figure(data=data, layout=layout)
if not ipy:
warn("not plotting, no iPython env")
return
py.iplot(fig)
# simple X, Y scatter
def plot_scatter_xy(x, y, names=None, title=None, xlabel=None, ylabel=None, show_zero_point=False, same_ratio=False):
ser = pd.Series(y, x)
if names:
ser.names = names
plot(ser, lines=False, markers=True, annotations=True, legend=False, log=False, title=title, xlabel=xlabel, ylabel=ylabel, show_zero_point=show_zero_point, same_ratio=same_ratio)
# this also supports line-series and single points
# each point must be a series with length=1
def plot_scatter(*lst, title=None, xlabel=None, ylabel=None, show_zero_point=False, same_ratio=False):
plot(*lst, lines=True, markers=True, annotations=True, legend=False, log=False, title=title, xlabel=xlabel, ylabel=ylabel, show_zero_point=show_zero_point, same_ratio=same_ratio)
# show a stacked area chart normalized to 100% of multiple time series
def plotly_area(df, title=None):
tt = df.div(df.sum(axis=1), axis=0)*100 # normalize to summ 100
tt = tt.reindex(tt.mean().sort_values(ascending=False).index, axis=1) # sort columns by mean value
tt = tt.sort_index()
tt2 = tt.cumsum(axis=1) # calc cum-sum
data = []
for col in tt2:
s = tt2[col]
x = s.index
if isinstance(x, pd.DatetimeIndex):
x = x.to_pydatetime()
trace = go.Scatter(
name=col,
x=x,
y=s.values,
text=["{:.1f}%".format(v) for v in tt[col].values], # use text as non-cumsum values
hoverinfo='name+x+text',
mode='lines',
fill='tonexty'
)
data.append(trace)
mar = 30
margin=gol.Margin(l=mar,r=mar,b=mar,t=mar,pad=0)
legend=dict(x=0,y=1,traceorder='reversed',
#bgcolor='#FFFFFFBB',bordercolor='#888888',borderwidth=1,
bgcolor='rgb(255,255,255,50)',bordercolor='#888888',borderwidth=1,
font=dict(family='sans-serif',size=12,color='#000'),
)
layout = go.Layout(margin=margin, legend=legend, title=title,
showlegend=True,
xaxis=dict(
type='date',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='stacked-area-plot')
# In[ ]:
# data processing
def doClean(data):
return [s.dropna() if is_series(s) else s for s in data]
def try_parse_date(s, format):
try:
return datetime.datetime.strptime(s, format)
except ValueError:
return None
def easy_try_parse_date(s):
return try_parse_date(s, "%d/%m/%Y") or try_parse_date(s, "%d.%m.%Y") or try_parse_date(s, "%d-%m-%Y")
def do_sort(data):
sers = lfilter(is_series, data)
non_sers = lfilter(is_not_series, data)
sers = sorted(sers, key=lambda x: x.index[0])
return sers + non_sers
def show(*data, trim=True, trim_end=False, align=True, align_base=None, ta=True, cache=None, mode=None, source=None, remode=None, untrim=None, silent=False, sort=True, drop_corrupt=False, **plotArgs):
getArgs = {}
if not mode is None:
getArgs["mode"] = mode
if not cache is None:
getArgs["cache"] = cache
if not source is None:
getArgs["source"] = source
if not remode is None:
getArgs["remode"] = remode
if not untrim is None:
getArgs["untrim"] = untrim
if not drop_corrupt is None:
getArgs["drop_corrupt"] = drop_corrupt
data = flattenLists(data)
items = []
for x in data:
if x is None:
continue
if isinstance(x, pd.DataFrame):
items += [x[c] for c in x]
elif isinstance(x, datetime.datetime) or isinstance(x, np.datetime64):
items.append(x)
elif isinstance(x, str) and easy_try_parse_date(x):
items.append(easy_try_parse_date(x))
elif isinstance(x, numbers.Real):
items.append(x)
elif is_named_number(x):
items.append(x)
else:
x = get(x, **getArgs)
items.append(x)
data = items
data = doClean(data)
data = [s for s in data if not is_series(s) or len(s) > 0]
dataSeries = [s for s in data if is_series(s)]
if ta == False:
trim = False
align = False
elif ta == 'rel':
trim = False
align = 'rel'
if any([s[unwrap(s)<0].any() for s in dataSeries]):
align = False
if trim:
data = doTrim(data, trim=trim, trim_end=trim_end)
if align:
if align == "rel":
data = align_rel(data, base=align_base)
else:
data = doAlign(data)
if sort:
data = do_sort(data)
if not silent:
plot(*data, **plotArgs)
else:
return dataSeries
def show_series(s, **args):
show_scatter(range(len(s)), s.values, lines=True, annotations=s.index, show_zero_point=False, **args)
def show_scatter(xs, ys, setlim=True, lines=False, color=None, annotations=None, xlabel=None, ylabel=None, label=None, same_ratio=False, show_zero_point=False, fixtext=False, figure=False):
def margin(s, m=0.05, show_zero_point=False):
mn = min(s)
mx = max(s)
rng = mx-min(mn, 0)
mn = mn - rng*m
mx = mx + rng*m
if show_zero_point:
mn = min(0, mn)
return mn, mx
if len(xs) == 0 or len(ys) == 0:
return
if annotations is None:
if "name" in dir(xs[0]) or "s" in dir(xs[0]):
annotations = [s.name for s in xs]
if figure:
if same_ratio:
plt.figure(figsize=(12, 12))
else:
plt.figure(figsize=(16, 12))
if lines:
plt.plot(xs, ys, marker="o", color=color, label=label)
else:
plt.scatter(xs, ys, color=color, label=label)
if setlim:
if same_ratio:
xmin, xmax = margin(xs, show_zero_point=show_zero_point)
ymin, ymax = margin(ys, show_zero_point=show_zero_point)
mn, mx = min(xmin, ymin), max(xmax, ymax)
plt.xlim(mn, mx)
plt.ylim(mn, mx)
else:
plt.xlim(*margin(xs, show_zero_point=show_zero_point))
plt.ylim(*margin(ys, show_zero_point=show_zero_point))
plt.axhline(0, color='gray', linewidth=1)
plt.axvline(0, color='gray', linewidth=1)
if xlabel: plt.xlabel(xlabel, fontsize=16)
if ylabel: plt.ylabel(ylabel, fontsize=16)
if not annotations is None:
for i, txt in enumerate(annotations):
if fixtext:
txt = globals()["fixtext"](txt)
plt.annotate(txt, (xs[i], ys[i]), fontsize=14)
def show_modes(*lst, **args):
show(*lmap(modes, lst), **args, title="PR/NTR/TR modes")
def show_modes_comp(a, b, show_zero=True):
show([sdiv(x, y) for x,y in zip(modes(a), modes(b))], 1, 0 if show_zero else None, title="relative modes")
def show_scatter_returns(y_sym, x_sym, freq=None):
x_sym, y_sym = get(x_sym), get(y_sym)
x, y = doTrim([x_sym, y_sym])
x, y = sync(x, y)
if freq:
x = x.asfreq(freq)
y = y.asfreq(freq)
x, y = logret(x), logret(y)
show_scatter(x, y, same_ratio=True, xlabel=x_sym.name, ylabel=y_sym.name)
def reduce_series(lst, g_func=None, y_func=None, x_func=None, trim=True):
# first we must set the trim limits for all assets
if not trim is None:
lst = get(lst, trim=trim)
# only them we apply the get function (it will preserve the trims by default)
if not isinstance(g_func, list):
lst = lmap(g_func, lst)
if isinstance(g_func, list):
ys = [[y_func(gf(s)) for gf in g_func] for s in lst]
xs = [[x_func(gf(s)) for gf in g_func] for s in lst]
elif isinstance(y_func, list):
ys = [[yf(s) for yf in y_func] for s in lst]
xs = [[x_func(s)] * len(y_func) for s in lst]
elif isinstance(x_func, list):
xs = [[xf(s) for xf in x_func] for s in lst]
ys = [[y_func(s)] * len(x_func) for s in lst]
res = [pd.Series(y, x) for y, x in zip(ys, xs)]
res = [name(r, get_name(s, nick_or_name=True)) for r, s in zip(res, lst)]
for r, s in zip(res, lst):
r.name = start_year_full_with_name(s)
r.names = [''] * r.shape[0]
r.names[0] = str(s.name)
return res
# def reduce_series(lst, g_func=None, y_func=None, x_func=None, trim=trim):
# if not isinstance(g_func, list):
# lst = lmap(g_func, lst)
# if isinstance(g_func, list):
# ys = [[y_func(gf(s)) for gf in g_func] for s in lst]
# xs = [[x_func(gf(s)) for gf in g_func] for s in lst]
# elif isinstance(y_func, list):
# ys = [[yf(s) for yf in y_func] for s in lst]
# xs = [[x_func(s)] * len(y_func) for s in lst]
# elif isinstance(x_func, list):
# xs = [[xf(s) for xf in x_func] for s in lst]
# ys = [[y_func(s)] * len(x_func) for s in lst]
# res = [pd.Series(y, x) for y, x in zip(ys, xs)]
# res = [name(r, get_name(s, nick_or_name=True)) for r, s in zip(res, lst)]
# for r, s in zip(res, lst):
# r.name = start_year_full_with_name(s)
# r.names = [''] * r.shape[0]
# r.names[0] = str(s.name)
# return res
# experimental
def show_rr2(*lst, g_func=None, y_func=None, x_func=None, risk_func=None, ret_func=None, ret_func_names=None, trim=True, **args):
y_func = y_func or ret_func
x_func = x_func or risk_func
# if g_func is None:
# starts = set(map(_start, filter(is_series, lst)))
# if len(starts) > 1:
# warn("show_rr2 called with untrimmed data, not trimming, results may be inconsistent")
y_func = y_func or cagr
x_func = x_func or ulcer
g_func = g_func or get
sers = lfilter(is_series_or_str, lst)
non_ser = lfilter(is_not_series_or_str, lst)
r = reduce_series(sers, g_func=g_func, y_func=y_func, x_func=x_func, trim=trim)
def f_names(f):
if isinstance(f, list):
return " ➜ ".join(lmap(lambda x: get_func_name(x), f))
return get_func_name(f)
ylabel = " ➜ ".join(ret_func_names) if not ret_func_names is None else f_names(y_func)
set_if_none(args, 'xlabel', f_names(x_func))
set_if_none(args, 'ylabel', ylabel)
set_if_none(args, 'title', f"{sers[0].name.mode} {args['ylabel']} <==> {args['xlabel']} [{f_names(g_func)}]")
plot_scatter(*r, *non_ser, show_zero_point=True, **args)
# e.g.:
# show_risk_return2(*all, g_func=[ft.partial(get, despike=False), get])
def show_rr(*lst, ret_func=None, risk_func=None, trim=True, mode_names=False, lr_fit=False, same_ratio=False, **args):
if ret_func is None: ret_func = cagr
if risk_func is None: risk_func = ulcer
non_ser = lfilter(lambda x: not(is_series_or_str(x) or isinstance(x, list)), lst)
lst = lfilter(lambda x: is_series_or_str(x) or isinstance(x, list), lst)
lst = get(lst, trim=trim)
lst = [x if isinstance(x, list) else [x] for x in lst]
res = [get_risk_return_series(x, ret_func=ret_func, risk_func=risk_func, mode_names=mode_names) for x in lst]
if lr_fit:
xs = [s.index[0] for s in res]
ys = [s.iloc[0] for s in res]
fit = lr(ys, xs, print_r2=True)
if len(fit) > 1:
fit = fit.iloc[::len(fit)-1] # keep first and last only
fit.name = ''
res.insert(0, fit)
if same_ratio:
xs = [s.index[0] for s in res]
ys = [s.iloc[0] for s in res]
rng = [min(min(ys), 0), max(ys)]
f = pd.Series(rng, rng)
f.name = ''
res.insert(0, f)
args['show_zero_point'] = True
set_if_none(args, 'title', f"{get_mode(lst[0][0])} {get_func_name(ret_func)} <==> {get_func_name(risk_func)}")
set_if_none(args, 'xlabel', get_func_name(risk_func))
set_if_none(args, 'ylabel', get_func_name(ret_func))
set_if_none(args, 'same_ratio', same_ratio)
plot_scatter(*non_ser, *res, **args)
showRiskReturn = show_rr # legacy
def get_risk_return_series(lst, ret_func, risk_func, mode_names, **args):
if len(lst) == 0:
return
lst = [get(s) for s in lst]
ys = [ret_func(unwrap(s)) for s in lst]
xs = [risk_func(unwrap(s)) for s in lst]
names = [get_name(s.name, use_sym_name=False, nomode=not mode_names, nick_or_name=True) for s in lst]
res = pd.Series(ys, xs)
res.name = names[0]
res.names = names
return res
#plot(pd.Series(xs, ys), pd.Series(xs, ys+1), lines=False, markers=True)
# def showRiskReturnUtil(lst, ret_func=None, risk_func=None, **args):
# if len(lst) == 0:
# return
# if ret_func is None: ret_func = cagr
# if risk_func is None: risk_func = ulcer
# lst = [get(s) for s in lst]
# ys = [ret_func(s) for s in lst]
# xs = [risk_func(s) for s in lst]
# names = None
# if args.get("annotations", None) is None:
# if "name" in dir(lst[0]) or "s" in dir(lst[0]):
# names = [s.name for s in lst]
# elif args.get("annotations", None) != False:
# names = args.get("annotations", None)
# if names is None:
# names = ["unnamed"] * len(lst)
# names = ["nan" if n is None else n for n in names]
# df = pd.DataFrame({"x": xs, "y": ys, "name": names})
# nans = df[df.isnull().any(axis=1)]["name"]
# if nans is None:
# nans = []
# if len(nans) > 0:
# print(f'dropping series with nan risk/return: {" | ".join(nans)}')
# df = df.dropna()
# xs = df["x"].values
# ys = df["y"].values
# names = df["name"].values
# if args.get("annotations", None) == False:
# names = None
# args['annotations'] = names
# xlabel=risk_func.__name__
# ylabel=ret_func.__name__
# args = set_if_none(args, "show_zero_point", True)
# show_scatter(xs, ys, xlabel=xlabel, ylabel=ylabel, **args)
def show_rr_capture_ratios(*all):
show_rr(*all, ret_func=get_upside_capture_SPY, risk_func=get_downside_capture_SPY, same_ratio=True, lr_fit=True)
def show_rr_modes(*lst, ret_func=None, risk_func=None, modes=['TR', 'NTR', 'PR'], title=None):
def get_data(lst, mode):
return get(lst, mode=mode, trim=True)
data_lst = [get_data(lst, mode) for mode in modes]
all = [list(tup) for tup in zip(*data_lst)]
ret_func = ret_func or cagr
risk_func = risk_func or ulcer
title = title or f"modes {get_func_name(ret_func)} vs {get_func_name(risk_func)}"
show_rr(*all, ret_func=ret_func, risk_func=risk_func, title=title, mode_names=True)
#showRiskReturn(*ntr, ret_func=ret_func)
#for a, b in zip(tr, ntr):
# showRiskReturn([a, b], setlim=False, lines=True, ret_func=ret_func, annotations=False)
def show_rr_modes_mutual_dd_risk_rolling_SPY(*all):
title = f"modes CAGR vs PR mutual_dd_risk_rolling_SPY"
show_rr_modes(*all, risk_func=mutual_dd_rolling_SPY)
def show_rr__cagr__mutual_dd_risk_rolling_pr_SPY(*all, lr_fit=False):
title = f"{all[0].name.mode} CAGR vs PR mutual_dd_risk_rolling_SPY"
show_rr(*all, risk_func=mutual_dd_rolling_pr_SPY, title=title, lr_fit=lr_fit)
def show_rr__yield__mutual_dd_risk_rolling_pr_SPY(*all, yield_func=None):
yield_func = yield_func or get_curr_yield_min2
title = f"{all[0].name.mode} Yield vs PR mutual_dd_risk_rolling_SPY"
show_rr(2, 3, 4, 5, *all, ret_func=yield_func, risk_func=mutual_dd_rolling_pr_SPY, title=title, ylabel=f"{all[0].name.mode} {get_func_name(yield_func)}")
def show_rr_yield(*all, yield_func=None, risk_func=None):
yield_func = yield_func or get_curr_yield_min2
show_rr(2, 3, 4, 5, *all, ret_func=yield_func, risk_func=risk_func)
def show_rr__yield_range__mutual_dd_rolling_pr_SPY(*all):
show_rr2(*all, 2, 3, 4, 5, ret_func=[get_curr_yield_max, get_curr_yield_min], ret_func_names=['max', 'min'], risk_func=mutual_dd_rolling_pr_SPY)
def show_rr__yield_types__ulcer(*lst, ret_func=None, types=['true', 'normal', 'rolling'], mode="TR", title=None):
def get_data(lst, type):
yld = [get_curr_yield(s, type=type) for s in lst]
rsk = lmap(ulcer, lst)
return pd.Series(yld, rsk)
lst = get(lst, mode=mode, trim=True)
res = []
for s in lst:
yld = []
rsk = []
for type in types:
yld.append(get_curr_yield(s, type=type))
rsk.append(ulcer(s))
ser = pd.Series(yld, rsk)
ser.name = s.name
ser.names = [f"{s.name} {t}" for t in types]
res.append(ser)
title = title or f"Risk - {mode} Yield Types"
plot_scatter(*res, title=title, xlabel="ulcer", ylabel="current yield", show_zero_point=True)
def show_risk_itr_pr(*lst, title=None):
def get_data(lst, type):
yld = [get_curr_yield(s, type=type) for s in lst]
rsk = lmap(ulcer, lst)
return pd.Series(yld, rsk)
lst = get(lst, mode=mode, trim=True)
res = []
for s in lst:
pr = get(s, mode="PR")
itr = get(s, mode="ITR")
pr_ulcer = ulcer(pr)
x = [pr_ulcer, pr_ulcer]
y = [cagr(pr), cagr(itr)]
ser = pd.Series(y, index=x)
ser.name = s.name
ser.names = [s.name, '']
res.append(ser)
title = title or f"PR Risk - ITR Return"
plot_scatter(*res, title=title, xlabel="ulcer", ylabel="cagr", show_zero_point=True)
def show_rr_yield_tr_ntr(*lst, title="Risk - 12m Yield TR-NTR"):
show_rr_modes(*lst, ret_func=get_curr_yield_rolling, modes=['TR', 'NTR'], title=title)
def show_min_max_bands(symbol, n=365, show_symbol=True, ma_=False, elr_fit=True, rlr_fit=True, lr_fit=False, log=True):
n = int(n)
x = get(symbol)
if log:
x = np.log(x)
a = name(mmax(x, n), 'max')
b = name(mmin(x, n), 'min')
c = name(mm(x, n), 'median')
if not show_symbol:
x = None
_ma = ma(x, n) if ma_ else None
_lr = lr(x) if lr_fit else None
_elr = lr_expanding(x, freq="W") if elr_fit else None
_rlr = lr_rolling(x, n // 7, freq="W") if rlr_fit else None
show(c, a, b, x, _ma, _lr, _elr, _rlr, ta=False, sort=False, log=not log)
def show_rolling_beta(target, sources, window=None, rsq=True, betaSum=False, pvalue=False, freq=None, extra=None):
if not isinstance(sources, list):
sources = [sources]
target = get(target)
sources = get(sources)
names = [s.name for s in sources]
target = logret(target)
sources = lmap(logret, sources)
target = unwrap(target)
sources = lmap(unwrap, sources)
sources = pd.DataFrame(sources).T.dropna()
target, sources = sync(target, sources)
if freq:
target = target.asfreq(freq)
sources = sources.asfreq(freq)
if window is None:
window = int(get_anlz_factor(freq))
else:
if window is None:
window = 365
rolling = ols.PandasRollingOLS(y=target, x=sources, window=window)
#rolling.beta.head()
#rolling.alpha.head()
#rolling.pvalue_alpha.head()
#type(rolling.beta["feature1"])
res = []
if pvalue:
_pvalue = rolling.pvalue_beta
_pvalue.columns = [s + " pvalue" for s in names]
res += [_pvalue, 0.05]
if rsq:
rsq = rolling.rsq
rsq.name = "R^2"
res += [rsq]
_beta = rolling.beta
_beta.columns = [s + " beta" for s in names]
res += [_beta]
if betaSum and len(names) > 1:
_betaSum = rolling.beta.sum(axis=1)
_betaSum.name = "beta sum"
res += [_betaSum]
res += [-1, 0, 1]
if not extra is None:
if isinstance(extra, list):
res += extra
else:
res += [extra]
show(res, ta=False)
def mix(s1, s2, n=10, do_get=False, **getArgs):
part = 100/n
res = []
for i in range(n+1):
x = {s1: i*part, s2: (100-i*part)}
port = dict_to_port_name(x, drop_zero=True, drop_100=True, use_sym_name=True)
name = dict_to_port_name(x, drop_zero=True, drop_100=True, use_sym_name=False)
if i > 0 and i < n:
name = ''
x = f"{port}={name}"
if do_get:
x = get(x, **getArgs)
#x.name = name
# else:
# x = f"{port}={name}"
res.append(x)
return lmap(unwrap, res)
# https://stackoverflow.com/questions/38878917/how-to-invoke-pandas-rolling-apply-with-parameters-from-multiple-column
# https://stackoverflow.com/questions/18316211/access-index-in-pandas-series-apply
def roll_ts(s, func, n, dropna=True):
# note that rolling auto-converts int to float: https://github.com/pandas-dev/pandas/issues/15599
# i_ser = pd.Series(range(s.shape[0]))
# res = i_ser.rolling(n).apply(lambda x: func(pd.Series(s.values[x.astype(int)], s.index[x.astype(int)])))
res = s.rolling(n).apply(func, raw=False) # with raw=False, we get a rolling Series :)
res = pd.Series(res.values, s.index)
if dropna:
res = res.dropna()
return res
# In[ ]:
from scipy.optimize import minimize
def prep_as_df(target, sources, mode, as_logret=False, as_geom_value=False, freq=None):
if not isinstance(sources, list):
sources = [sources]
target = get(target, mode=mode)
sources = get(sources, mode=mode)
names = [s.name for s in sources]
if freq:
target = target.asfreq(freq).dropna()
sources = [s.asfreq(freq).dropna() for s in sources]
if as_logret:
target = logret(target)
sources = lmap(logret, sources)
target = unwrap(target)
sources = lmap(unwrap, sources)
sources = pd.DataFrame(sources).T.dropna()
target, sources = sync(target, sources)
if as_geom_value:
target = target/target[0]
sources = sources.apply(lambda x: x/x[0], axis=0)
return target, sources
import sklearn.metrics
def lrret(target, sources, pos_weights=True, sum_max1=True, sum1=True, fit_values=True,
return_res=False, return_ser=True, return_pred=False, return_pred_fit=False, res_pred=False, show_res=True, freq=None, obj="sum_sq_log", mode=None, do_align=True):
def apply(x, bias):
res = x[0]*int(bias) + i_logret((sources_logret * x[1:]).sum(axis=1))
return res
def value_objective(x):
pred = apply(x, bias=True)
if obj == "log_sum_sq":
# using log seem to work really well, it solves "Positive directional derivative for linesearch" issue
# for lrret(sc, lc).
# https://stackoverflow.com/questions/11155721/positive-directional-derivative-for-linesearch
# here it's mentioned that large valued objective functions can cause this
# thus we use the log
# add +1 to avoid log(0)
return np.log(1+np.sum((target - pred) ** 2))
if obj == "sum_sq":
return np.sum((target - pred) ** 2)
# this provides a better overall fit, avoiding excess weights to later (larger) values
if obj == "sum_sq_log":
return np.sum((np.log(target) - np.log(pred)) ** 2)
raise Exception("invalid obj type: " + obj)
def returns_objective(x):
pred = apply(x, bias=False)
return np.sum((logret(target) - logret(pred)) ** 2)
# prep data
if not isinstance(sources, list):
sources = [sources]
sources = [s for s in sources if (not s is target) and getName(s) != getName(target)]
orig_sources = sources
orig_target = get(target, mode=mode)
target, sources = prep_as_df(target, sources, mode, as_geom_value=fit_values and do_align, freq=freq)
sources_logret = sources.apply(lambda x: logret(x, dropna=False), axis=0)
n_sources = sources_logret.shape[1]
# miniization args
cons = []
bounds = None
if pos_weights:
# using bounds, instead of cons, works much better
#cons.append({'type': 'python ', 'fun' : lambda x: np.min(x[1:])})
if sum1:
x_bound = (0, 1)
else:
x_bound = (0, None)
bounds = [(None, None)] + ([x_bound] * n_sources)
if sum1:
if sum_max1:
cons.append({'type': 'ineq', 'fun' : lambda x: 1-np.sum(x[1:])}) # sum<=1 same as 1-sum>=0
else:
cons.append({'type': 'eq', 'fun' : lambda x: np.sum(x[1:])-1})
objective = value_objective if fit_values else returns_objective
def run_optimize(rand_x0):
n = sources_logret.shape[1]
if rand_x0:
x0 = np.random.rand(n+1)
if sum1:
x0 /= np.sum(x0)
else:
x0 = np.full(n+1, 1/n)
#x0 += np.random.randn(n+1)*(1/n)
#x0 = np.maximum(x0, 0)
x0[0] = 0
# minimize, to use constrains, we can choose from COBYLA / SLSQP / trust-constr
# COBYLA: results are not stable, and vary greatly from run to run
# also doesn't support equality constraint (sum1)
#options={'rhobeg': 0.1, 'maxiter': 10000, 'disp': True, 'catol': 0.0002}
#res = minimize(objective, x0, constraints=cons, method="COBYLA", options=options)
# SLSQP: provides stable results from run to run, and support eq constraints (sum1)
# using a much smaller eps than default works better (more stable and better results)
options={'maxiter': 1000, 'ftol': 1e-06, 'iprint': 1, 'disp': False, 'eps': 1.4901161193847656e-08/1000}
res = minimize(objective, x0, constraints=cons, method="SLSQP", options=options, bounds=bounds)
return res
def getR2_vanilla(y, f):
return sklearn.metrics.r2_score(y, f)
def getR2(y, f):
ssTot = np.sum((y-np.mean(y))**2)
ssRes = np.sum((y-f)**2)
return 1-ssRes/ssTot
def getR2(y, f):
y_mean_logret = mean_logret_series(y)
ssTot = np.sum((y-y_mean_logret)**2)
ssRes = np.sum((y-f)**2)
return 1-ssRes/ssTot
def getR2_lr(y, f):
y_lr = lr(y)
ssTot = np.sum((y-y_lr)**2)
ssRes = np.sum((y-f)**2)
return 1-ssRes/ssTot
def getR2_lr_log(y, f):
y_lr = lr(y)
y_log = np.log(y)
ssTot = np.sum((y_log - np.log(y_lr))**2)
ssRes = np.sum((y_log - np.log(f))**2)
return 1-ssRes/ssTot
def finalize(res):
# results
pred = apply(res.x, bias=fit_values)
pred.name = "~" + target.name + " - fit"
if np.isnan(res.x).any():
r2 = np.nan
print("nan R^2")
else:
if fit_values:
r2 = getR2_lr_log(target, pred)
else:
r2 = getR2_vanilla(logret(target), logret(pred))
r2 = np.exp(r2) / math.e
# calc adjusted R2
n = sources.shape[0]
k = sources.shape[1]
r2 = 1-((1-r2)*(n-1)/(n-k-1))
res["R^2"] = r2
return pred
# uniform x0 works best usually, but when it doesn't random seems to work well
res = run_optimize(rand_x0=False)
if not res.success:
silent = False
if not sum1 and sum_max1 and res["message"] == "Positive directional derivative for linesearch":
silent = True
#if not silent:
print("lrret: 1st attempt failed with: " + res["message"])
res2 = run_optimize(rand_x0=True)
if not res2.success:
silent = False
if not sum1 and sum_max1 and res2["message"] == "Positive directional derivative for linesearch":
silent = True
#if not silent:
print("lrret: 2nd attempt failed with: " + res2["message"])
if res["R^2"] > res2["R^2"] and not np.isnan(res["R^2"]) and not (pos_weights and res["message"] == "Inequality constraints incompatible"):
#if not silent:
print(f"lrret: 1st attempt (uniform) was better, 1st:{res['R^2']}, 2nd: {res2['R^2']}")
else:
#if not silent:
print(f"lrret: 2nd attempt (random) was better, 1st:{res['R^2']}, 2nd: {res2['R^2']}")
res = res2
names = sources.columns
ser = pd.Series(dict(zip(names, [round(x, 6) for x in res.x[1:]])))
ser = ser.sort_values(ascending=False)
_pred = finalize(res)
pred = _pred
if True:
#sources_dict = {s.name: s for s in sources}
#d = Portfolio([(s, ser[getName(s)]*100) for s in orig_sources])
d = (ser*100).to_dict()
if True and not orig_target.name.startswith("~"):
try:
pred = name(get(d, mode=get_mode(orig_target.name)), get_pretty_name(orig_target.name) + " - fit")
pred = pred / pred[_pred.index[0]] * _pred[0]
port = dict_to_port_name(d, drop_zero=True, drop_100=True, use_sym_name=True)
pred_str = f"{port} = {target.name} - fit"
except:
pred_str = '<NA>'
warn("failed to get portfolio based on regerssion")
# pred, _pred = doAlign([pred, _pred])
if show_res:
show(pred, _pred, target, align=not fit_values, trim=False)
print(f"R^2: {res['R^2']}")
if pos_weights and np.any(ser < -0.001):
print("lrret WARNING: pos_weights requirement violated!")
if return_pred_fit:
return _pred
if return_pred:
print(ser)
return pred_str
#if res_pred:
res["pred"] = pred_str
if return_res:
res["ser"] = ser
return res
if return_ser:
return ser
def lrret_old(target, regressors, sum1=False):
regressors = [get(x) for x in regressors]
target = get(target)
all = [unwrap(logret(x)) for x in (regressors + [target])]
# based on: https://stats.stackexchange.com/questions/21565/how-do-i-fit-a-constrained-regression-in-r-so-that-coefficients-total-1?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# NOTE: note finished, not working
if sum1:
allOrig = all
last = all[-2]
all = [r - last for r in (all[:-2] + [all[-1]])]
data = pd.DataFrame(all).T
data = data.dropna()
y = data.iloc[:, -1]
X = data.iloc[:, :-1]
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(X, y)
if sum1:
weights = np.append(regr.coef_, 1-np.sum(regr.coef_))
all = allOrig
data = pd.DataFrame(all).T
data = data.dropna()
y = data.iloc[:, -1]
X = data.iloc[:, :-1]
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(X, y)
regr.coef_ = weights
y_pred = regr.predict(X)
print('Regressors:', [s.name for s in regressors])
print('Coefficients:', regr.coef_)
#print('Coefficients*:', list(regr.coef_) + [1-np.sum(regr.coef_)])
#print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred))
print('Variance score r^2: %.3f' % sk.metrics.r2_score(y, y_pred))
y_pred = i_logret(pd.Series(y_pred, X.index))
y_pred.name = target.name + " fit"
#y_pred = "fit"
y_pred = Wrapper(y_pred)
show(target , y_pred)
return y_pred
def lrret_incremental(target, sources, show=True, show_steps=False, max_n=None, **lrret_args):
if not isinstance(sources, list):
sources = [sources]
target, *sources = get([target] + sources, trim=True)
sources = sources.copy()
top = []
cum_sources = []
while len(sources) > 0:
if max_n and len(top) == max_n:
break
allres = [lrret(target, cum_sources + [source], return_res=True, return_ser=False, res_pred=True, show_res=False, **lrret_args) for source in sources]
max_i = np.argmax([res["R^2"] for res in allres])
max_res = allres[max_i]
max_source = sources[max_i]
top.append((max_res["R^2"], max_source.name))
cum_sources.append(max_source)
del sources[max_i]
port = dict_to_port_name((max_res["ser"]*100).to_dict())
print(f"{port} R^2: {max_res['R^2']:.3f} start:{max_res['pred'].index[0]}")
if show_steps:
res = pd.Series(*list(zip(*top))).fillna(method='bfill')
#clear_output()
#plt.figure()
show_series(res)
plt.axhline(1, c="blue");
plt.axhline(0.995, c="green");
plt.axhline(0.99, c="orange");
plt.ylim(ymax=1.003)
plt.title("Cumulative R^2")
plt.show()
res = pd.Series(*list(zip(*top))).fillna(method='bfill')
if show:
#clear_output()
plt.figure()
show_series(res)
plt.axhline(1/math.e, c="orange");
plt.axhline(math.exp(0.9)/math.e, c="green");
plt.ylim(ymax=1.003)
plt.title("Cumulative R^2")
plt.show()
return res
def lrret_mutual_cross(*sources, show=True, **lrret_args):
if len(sources) <= 1:
return pd.Series()
sources = get(sources, trim=True)
res = []
for target in sources:
rest = [s for s in sources if s.name != target.name]
#rest = sources.copy()
#rest.remove(target)
rs = lrret(target, rest, return_res=True, return_ser=False, show_res=False, **lrret_args)
res.append((rs['R^2'], target.name))
port = dict_to_port_name((rs["ser"]*100).to_dict(), drop_zero=True)
print(f"{target.name}: {port} R^2: {rs['R^2']:.3f}")
res = pd.Series(*list(zip(*res))).fillna(method='bfill')
res = res.sort_values()
if show:
show_series(res, figure=False)
#plt.axhline(, c="blue");
plt.axhline(1/math.e, c="orange");
plt.axhline(math.exp(0.9)/math.e, c="green");
plt.ylim(ymax=1.003)
plt.title("mutual R^2")
#plt.show()
return res
def lrret_mutual_incremental(*sources, base=None, show=True, max_n=None, **lrret_args):
if base is None:
base = lc
base, *sources = get([base] + list(sources), trim=True)
cum_sources = [base]
top = []
while len(sources) > 0:
if max_n and len(top) == max_n:
break
allres = [lrret(target, cum_sources, return_res=True, return_ser=False, show_res=False, **lrret_args) for target in sources]
max_i = np.argmin([res["R^2"] for res in allres])
max_res = allres[max_i]
max_source = sources[max_i]
top.append((max_res["R^2"], max_source.name))
cum_sources.append(max_source)
del sources[max_i]
port = dict_to_port_name((max_res["ser"]*100).to_dict(), drop_zero=True)
print(f"{max_source.name}: {port} R^2: {max_res['R^2']:.3f}")
if len(top) == 1:
cum_sources.remove(base) # we only need the base for the first one
res = pd.Series(*list(zip(*top))).fillna(method='bfill')
if show:
#clear_output()
#plt.figure()
show_series(res, figure=False)
#plt.axhline(, c="blue");
plt.axhline(1/math.e, c="orange");
plt.axhline(math.exp(0.9)/math.e, c="green");
plt.ylim(ymax=1.003)
plt.title("incremental R^2 (S&P500 seed)")
plt.show()
return res
def lrret_mutual(*sources, base=None, show=True, max_n=None, **lrret_args):
print()
print("Cross:")
res_cross = lrret_mutual_cross(*sources, show=False)
print()
print("Incremental:")
res_inc = lrret_mutual_incremental(*sources, show=False)
if show:
plt.figure()
show_series(res_cross, figure=True, label="Cross")
#plt.gca().lines[-1].set_label("cross")
show_series(res_inc, figure=False, label="Incremental")
#plt.gca().lines[-1].set_label("inc")
plt.axhline(1/math.e, c="orange");
plt.axhline(math.exp(0.9)/math.e, c="green");
plt.ylim(ymax=1.003)
plt.title("R^2")
plt.legend()
plt.show()
def mean_logret_series(y):
res = name(pd.Series(i_logret(np.full_like(y, logret(y).mean())), y.index), y.name + " mean logret")
res *= y[0]/res[0]
return res
def liquidation(s):
return (s/s[0]-1)*0.75+1
def mean_series_perf(all):
df = pd.DataFrame(lmap(ret, all)).T.dropna(how='all')
ss = rpy(i_ret(df.mean(axis=1)))
return name(ss, "~mean perf")
def median_series_perf(all):
df = pd.DataFrame(lmap(ret, all)).T.dropna(how='all')
ss = rpy(i_ret(df.median(axis=1)))
return name(ss, "~median perf")
def median_series(lst, align):
if align:
lst = doAlign(lst)
df = pd.DataFrame([unwrap(s) for s in lst]).T
return name(rpy(df.median(axis=1)), "~median")
def mean_series(lst, align):
if align:
lst = doAlign(lst)
df = pd.DataFrame([unwrap(s) for s in lst]).T
return name(rpy(df.mean(axis=1)), "~mean")
def sdiv(a, b):
if isinstance(a,list):
return lmap(lambda x: sdiv(x, b), a)
a, b = get([a, b])
x = a / b
x.name = get_pretty_name(a) + " / " + get_pretty_name(b)
return x
# In[ ]:
def html_title(text):
display(HTML(f"<h1>{text}</h1>"))
from IPython.core.display import Javascript
import time, os, stat
def save_notebook(verbose=True, sleep=True):
Javascript('console.log(document.querySelector("div#save-notbook button").click())')
if verbose:
print("save requested, sleeping to ensure execution ..")
if sleep:
time.sleep(15)
if verbose:
print("done")
# save live notebook at first run to make sure it's the latest modified file in the folder (for later publishing)
save_notebook(False, False)
def publish(name=None):
def file_age_in_seconds(pathname):
return time.time() - os.stat(pathname)[stat.ST_MTIME]
filename = get_ipython().getoutput('ls -t *.ipynb | grep -v /$ | head -1')
filename = filename[0]
age = int(file_age_in_seconds(filename))
min_age = 5
if age > min_age:
print(filename + " file age is " + str(age) + " seconds old, auto saving current notebook ..")
save_notebook()
filename = get_ipython().getoutput('ls -t *.ipynb | grep -v /$ | head -1')
filename = filename[0]
if not name:
name = str(uuid.uuid4().hex.upper())
save()
print("Publishing " + filename + " ..")
res = subprocess.call(['bash', './publish.sh', name])
if res == 0:
print("published successfuly!")
print("https://nbviewer.jupyter.org/github/ertpload/test/blob/master/__name__.ipynb".replace("__name__", name))
else:
res = subprocess.call(['bash', './../publish.sh', name])
if res == 0:
print("published successfuly!")
print("https://nbviewer.jupyter.org/github/ertpload/test/blob/master/__name__.ipynb".replace("__name__", name))
else:
print("Failed!")
# In[ ]:
from IPython.display import display,Javascript
def save():
display(Javascript('IPython.notebook.save_checkpoint();'))
# In[ ]:
# make the plotly graphs look wider on mobile
from IPython.core.display import display, HTML
s = """
<style>
div.rendered_html {
max-width: 10000px;
}
#nbextension-scratchpad {
width: 80%;
}
.container {
width: 95%;
}
</style>
"""
display(HTML(s))
# In[ ]:
# interception to auto-fetch hardcoded symbols e.g:
# show(SPY)
# this should run last in the framework code, or it attempts to download unrelated symbols :)
from IPython.core.inputtransformer import *
intercept = ipy == True
intercept = False # rewrite the code: https://ipython.readthedocs.io/en/stable/config/inputtransforms.html
if intercept and not "my_transformer_tokens_instance" in locals():
#print("transformation hook init")
attempted_implied_fetches = set()
ip = get_ipython()
@StatelessInputTransformer.wrap
def my_transformer(line):
if line.startswith("x"):
return "specialcommand(" + repr(line) + ")"
return line
@TokenInputTransformer.wrap
def my_transformer_tokens(tokens):
global trimmed_messages
trimmed_messages.clear()
for i, x in enumerate(tokens):
if x.type == 1 and x.string.isupper() and x.string.isalpha() and len(x.string) >= 2: ## type=1 is NAME token
if i < len(tokens)-1 and tokens[i+1].type == 53 and tokens[i+1].string == "=":
attempted_implied_fetches.add(x.string)
continue
if x.string in attempted_implied_fetches or x.string in ip.user_ns:
continue
try:
ip.user_ns[x.string] = get(x.string)
except:
print("Failed to fetch implied symbol: " + x.string)
attempted_implied_fetches.add(x.string)
return tokens
my_transformer_tokens_instance = my_transformer_tokens()
ip.input_splitter.logical_line_transforms.append(my_transformer_tokens_instance)
ip.input_transformer_manager.logical_line_transforms.append(my_transformer_tokens_instance)
# In[ ]:
def date(s):
return pd.to_datetime(s, format="%Y-%m-%d")
# In[5]:
# another options for interception:
# ```python
# class VarWatcher(object):
# def __init__(self, ip):
# self.shell = ip
# self.last_x = None
#
# def pre_execute(self):
# if False:
# for k in dir(self.shell):
# print(k, ":", getattr(self.shell, k))
# print()
# #print("\n".join(dir(self.shell)))
# if "content" in self.shell.parent_header:
# code = self.shell.parent_header['content']['code']
# self.shell.user_ns[code] = 42
# #print(self.shell.user_ns.get('ASDF', None))
#
# def post_execute(self):
# pass
# #if self.shell.user_ns.get('x', None) != self.last_x:
# # print("x changed!")
#
# def load_ipython_extension(ip):
# vw = VarWatcher(ip)
# ip.events.register('pre_execute', vw.pre_execute)
# ip.events.register('post_execute', vw.post_execute)
#
# ip = get_ipython()
#
# load_ipython_extension(ip)
#
# ```
# In[ ]:
# def divs(symbolName, period=None, fill=False):
# if isinstance(symbolName, tuple) and period is None:
# symbolName, period = symbolName
# if isinstance(symbolName, Wrapper) or isinstance(symbolName, pd.Series):
# sym = symbolName
# symbolName = symbolName.name
# if symbolName.startswith("~"):
# divs = sym[-1:0] # we just want an empty series with DatetimeIndex
# #divs = pd.Series(index=pd.DatetimeIndex(freq="D"))
# divs.name = symbolName
# else:
# divs = get(symbolName, mode="divs")
# divs = divs[divs>0]
# if period:
# divs = wrap(divs.rolling(period).sum())
# if fill:
# price = get(symbolName)
# divs = divs.reindex(price.index.union(divs.index), fill_value=0)
# divs.name = divs.name + " divs"
# return divs
def show_yield_types(*lst, drop_special_divs=False, **args):
yields = lmap(partial(get_yield_types, drop_special_divs=drop_special_divs, **args), lst)
rets = [get_named(x, cagr) for x in get(lst, trim=True)]
show(*yields, rets, 0, ta=False, log=False, title=f"Yields {'without special divs' if drop_special_divs else ''}")
def show_income(*all, smooth, inf_adj=False):
income = lmap(partial(get_income, smooth=smooth), all)
if inf_adj:
income = lmap(adj_inf, income)
show(income, 0, ta=False, log=False, title=f"net income (smooth={smooth}) {'inf-adjusted' if inf_adj else ''}")
def show_cum_income(*all):
income = lmap(get_cum_income, all)
show(income, ta=False, log=False, legend=False, title="cumulative net income")
def show_cum_income_relative(*all, base):
income = lmap(get_cum_income, all)
base_income = get_cum_income(base)
income = [sdiv(x, base_income) for x in income]
show(income, ta=False, log=False, legend=False, title="relative cumulative net income")
def show_rr__yield_fees__mutual_dd_rolling_pr_SPY(*all):
show_rr2(*all, ret_func=[get_curr_yield_rolling_no_fees, get_curr_yield_rolling], risk_func=mutual_dd_rolling_pr_SPY, title="Impact of fees on yield")
def show_comp(target, base, extra=None, mode="NTR", despike=False, cache=True):
if extra is None:
extra = []
elif isinstance(extra, list):
pass
else:
extra = [extra]
analyze_assets(*extra, target=target, base=base, mode=mode, despike=despike, cache=cache)
def analyze_assets(*all, target=None, base=None, mode="NTR", start=None, end=None, despike=False, few=None, detailed=False, cache=True):
if any(map(lambda x:isinstance(x, list), all)):
raise Exception("analyze_assets individual argument cannot be lists")
if len(all) == 1 and target is None and base is None:
target = all[0]
all = []
has_target_and_base = not target is None and not base is None
has_target = not target is None
has_base = not base is None
all = list(all)
all_modes = set(map(lambda s: s.name.mode, filter(is_series, all + [target, base])))
print(f"Analyzing assets with internal modes {list(all_modes)} and requested mode [{mode}]")
all = get([target, base] + all, start=start, end=end, despike=despike, mode=mode, cache=cache) # note we don't trim
target, base, *extra = all
all = [s for s in all if not s is None]
all_trim = get(all, trim=True)
if few is None:
few = len(all) <= 5
# equity curve
html_title("Equity Curves")
if few:
show(*all, (1, 'start'), (0.5, '50% draw-down'), trim=False, align='rel', title=mode + " equity") # use 0.5 instead of 0 to keep the log scale
show_modes(*all)
if detailed:
show(lmap(adj_inf, lmap(price, all)), 1, title="real price")
if has_target:
show_min_max_bands(target)
if has_target_and_base:
r = (target / base).dropna()
show_min_max_bands(r)
show_modes_comp(target, base)
show_port_flow_comp(target, base)
# draw-down
html_title("Draw-Down")
if has_target_and_base:
show_dd_price_actions(target, base, dd_func=dd)
show_dd_price_actions(target, base, dd_func=dd_rolling)
if has_target:
show_dd_chunks(target)
if few:
show_dd(*all)
show_dd(*all_trim, title_prefix='trimmed')
#############
#############
# risk-return: cagr
html_title("RR: cagr")
bases = [mix(lc, gb, do_get=False), mix(i_ac, gb, do_get=False)]
all_with_bases = get(all + bases, mode=mode)
print("-------")
#show_rr__cagr__mutual_dd_risk_rolling_pr_SPY(*all_with_bases)
show_rr(*all_with_bases, risk_func=mutual_dd_rolling_pr_SPY)
if detailed:
show_rr(*get(all + bases, mode="TR"))
show_rr(*get(all + bases, mode="NTR"))
show_rr(*get(all + bases, mode="PR"))
show_rr_modes(*all)
show_rr_modes_mutual_dd_risk_rolling_SPY(*all)
else:
show_rr(*all_with_bases, risk_func=ulcer_pr)
show_rr(*all_with_bases, risk_func=max_dd_pr)
show_rr_capture_ratios(*all)
if has_target_and_base:
show_rr(mix(target, base))
if few:
html_title("AUM")
show_aum(*all)
# Yields
if few:
html_title("Yields")
if has_target:
show_yield(target, detailed=True)
elif len(all) == 1:
show_yield(all[0], detailed=True)
if len(all) > 1:
show_yield(*all, detailed=False)
show_yield_types(*all)
show_yield_types(*all, drop_special_divs=True)
# risk-return: Yields
html_title("RR: yield")
show_rr_yield(*all, risk_func=mutual_dd_rolling_pr_SPY)
show_rr_yield(*all, risk_func=ulcer_pr)
show_rr_yield(*all, risk_func=max_dd)
show_rr(*all, ret_func=cagr, risk_func=get_start_yield, lr_fit=True, same_ratio=True)
# show_rr__yield__mutual_dd_risk_rolling_pr_SPY(*all)
show_rr__yield_range__mutual_dd_rolling_pr_SPY(*all)
show_rr__yield_fees__mutual_dd_rolling_pr_SPY(*all)
show_rr__yield_min_cagrpr__mutual_dd_rolling_pr_SPY(*all)
show_rr__yield_cagrpr__ulcerpr_trim(*all)
if detailed:
show_rr__yield_cagrpr__ulcerpr_notrim(*all)
show_rr__yield_types__ulcer(*all)
if detailed:
show_rr_yield_tr_ntr(*all)
# show_rr_modes(*all, ret_func=get_curr_yield_rolling, modes=['TR'], title='Risk - 12m Yield TR')
show_rr(*get(all, mode='TR'), ret_func=get_curr_yield_rolling, title='Risk - 12m Yield TR')
#show_rr_modes(*all, ret_func=get_curr_yield_rolling, modes=['NTR'], title='Risk - 12m Yield NTR')
show_rr(*get(all, mode='NTR'), ret_func=get_curr_yield_rolling, title='Risk - 12m Yield NTR')
show_rr__yield_ntr_pr_diff__pr(*all)
if detailed:
show_rr_yield_ntr_pr_diff_pr_full_alt(*all)
show_rr_yield_ntr_pr_diff_pr_full_alt(*all, trim=False)
# zscores
html_title("z-scores")
display_zscores(all, _cache=[None])
# withdraw flows
html_title("RR: flows")
show_rr_flows(*all)
# risk-return: cross risks
html_title("RR: cross-risks")
show_rr(*all, ret_func=ulcer_pr, risk_func=mutual_dd_rolling_pr_SPY)
show_rr(*all, ret_func=max_dd_pr, risk_func=mutual_dd_rolling_pr_SPY)
show_rr(*all, ret_func=mutual_dd_rolling_pr_TLT, risk_func=mutual_dd_rolling_pr_SPY, same_ratio=True)
show_rr2(*all, x_func=[mutual_dd_rolling_pr_SPY, mutual_dd_pr_SPY], y_func=ulcer)
show_rr2(*all, x_func=[mutual_dd_rolling_pr_SPY, mutual_dd_pr_SPY], y_func=lrretm_beta_SPY, same_ratio=True)
show_rr(*all, ret_func=get_downside_capture_SPY, risk_func=mutual_dd_rolling_pr_SPY, lr_fit=True)
if detailed:
show_rr(*all, ret_func=mutual_dd_rolling_pr_SPY_weighted, risk_func=mutual_dd_rolling_pr_SPY_unweighted, same_ratio=True)
# show_rr(*all, risk_func=mutual_dd_pr_SPY, ret_func=ulcer,)
# show_rr(*all, risk_func=mutual_dd_pr_SPY, ret_func=lrretm_beta_SPY, same_ratio=True)
#######################
# Income
html_title("Income")
if few:
show_income_ulcer(*all)
show_income(*all, smooth=12)
show_income(*all, smooth=3)
show_income(*all, smooth=0)
if detailed:
show_income(*all, smooth=12, inf_adj=True)
show_income(*all, smooth=3, inf_adj=True)
show_income(*all, smooth=0, inf_adj=True)
show_cum_income(*all)
show_cum_income(*all_trim)
if has_base:
#show_cum_income_relative(*all_trim)
show_cum_income_relative(*all_trim, base=base)
# show(lmap(roi, all), ta=False, log=False, title="net ROI")
# lrret
html_title("Mutual lrret")
if len(all) < 30:
lrret_mutual(*all)
# PCA / MDS
html_title("MDS")
if len(all) < 30:
show_mds(*all)
def show_dd_chunks(s, min_days=10, min_depth=1, dd_func=dd, mode="PR"):
s = get(s, mode=mode)
ranges = get_dds(s, min_days=min_days, min_depth=min_depth, dd_func=dd_func)
chunks = [s[i:j] for i,j,_ in ranges]
show_dd(*chunks, mode=mode, dd_func=dd_func, legend=False, title_prefix=f"chunked {get_pretty_name_no_mode(s)}")
def show_dd_price_actions(target, base, min_days=0, min_depth=3, dd_func=dd_rolling):
# target = pr(target)
# base = pr(base)
# base_dd = dd_func(base)
# base_dd = trimBy(base_dd, target)
# # dd_target = dd_func(target)
# # base_dd, dd_target = doTrim([base_dd, dd_target], trim=True)
target, base = get([target, base], mode="PR", trim=True)
base_dd = dd_func(base)
ranges = get_dds(base, min_days=min_days, min_depth=min_depth, dd_func=dd_func)
if dd_func == dd:
# target_price_actions = get_price_actions(target, ranges)
target_price_actions = get_price_actions_with_rolling_base(target, ranges, base, n=0) # this is less efficient, but it takes care of alining the first draw-down
elif dd_func == dd_rolling:
target_price_actions = get_price_actions_with_rolling_base(target, ranges, base)
else:
raise Exception(f"unsupported dd_func: {get_func_name(dd_func)}")
#base_dd_actions = [base_dd[i:j] for i,j,_ in ranges]
show(base_dd, target_price_actions, -10, -20, -30, -40, -50, legend=False, ta=False, title=f"{get_func_name(dd_func)} price action {target.name} vs {base.name}")
print("mutual_dd_risk: ", mutual_dd(target, base, dd_func=dd_func, min_depth=min_depth))
def show_dd(*all, mode="PR", dd_func=dd, legend=True, title_prefix='', do_get=True, **args):
all = [s for s in all if not s is None]
if do_get:
all = get(all, mode=mode)
for s in all:
print(f"ulcer {get_name(s)}: {ulcer(s):.2f}")
show(lmap(dd_func, all), -10, -20, -30, -40, -50, ta=False, title=f"{title_prefix} {mode} {get_func_name(dd_func)} draw-down", legend=legend, **args)
##########################################
def adj_inf(s):
cpi = get(cpiUS)
s = get(s)
return name(s / cpi, s.name)
from functools import lru_cache
@lru_cache(maxsize=12)
def get_inflation(smooth=None, interpolate=True):
cpi = get(cpiUS, interpolate=False)
inf = (cpi / cpi.shift(12) - 1) * 100
if interpolate:
inf = inf.asfreq("D").interpolate()
if smooth:
inf = ma(inf, smooth)
return name(inf, "inflation")
def _inf(s):
inf = 100 * ret(s, 12).dropna().resample("M").sum()
return name(inf, f"inf_{s.name}")
def _get_cpi(type, core, alt):
if type == "cpi":
if core:
return get('FRED/CPILFESL@Q = cpiu_core', interpolate=False)
if alt == 0:
return get('RATEINF/CPI_USA@Q', interpolate=False)
return get('FRED/CPIAUCSL@Q = cpiu', interpolate=False)
if type == "pce":
if core:
return get('FRED/PCEPILFE@Q = pce_core', interpolate=False)
else:
return get('FRED/PCEPI@Q = pce', interpolate=False)
#get('FRED/PCE@Q', interpolate=False)
raise Exception(f"unknown cpi type: {type}")
def get_cpi(type='cpi', core=True, alt=0):
return name(_get_cpi(type, core, alt), f"CPI-{type} {'core' if core else 'all'} {alt if alt else ''}")
def _get_inf(type, core, alt):
if type == "cpi":
if core:
return _inf(get_cpi('cpi', core=True))
else:
if alt == 0:
return get('RATEINF/INFLATION_USA@Q = inf_us', interpolate=False) # 1914+
if alt == 1:
return _inf(get_cpi('cpi', core, alt=0)) # same, calculated from CPI
if alt == 2:
return get('FRBC/USINFL^2@Q = inf_all_cpiu', interpolate=False).resample("M").sum() # same, less history
elif type == "pce":
if not core and alt == 1:
return get('FRBC/USINFL^18@Q = inf_pce', interpolate=False).resample("M").sum() # the col name is wrong, this is the full, not core, index
return _inf(get_cpi('pce', core=core))
elif type == "ppi":
if core:
return get('FRBC/USINFL^14@Q = inf_ppi_core', interpolate=False).resample("M").sum()
else:
return get('FRBC/USINFL^10@Q = inf_ppi', interpolate=False).resample("M").sum()
elif type == "mean":
return mean_series([get_inf('cpi', core), get_inf('pce', core), get_inf('ppi', core)], align=False)
raise Exception(f"unknown inf type: {type}")
def get_inf(type='mean', core=True, alt=0):
return name(_get_inf(type, core, alt), f"Inflation-{type} {'core' if core else 'all'} {alt if alt else ''}")
##########################################
def get_real_yield(s, type=None):
yld = get_yield(s, type=type)
inf = get_inflation(365*7)
return name(yld - inf, f"{yld.name} real").dropna()
def roi(s,value=100000):
income = get_income(s, value=value, nis=False, per_month=True)
return income/value * 100 * 12
def cum_cagr(s):
s = get(s)
days = (s.index - s.index[0]).days
years = days/365
val = s / s[0]
return (np.power(val, 1/years)-1)*100
def modes(s, **get_args):
res = [get(s, mode="TR", **get_args), get(s, mode="NTR", **get_args), get(s, mode="PR", **get_args)]
# we can't rename exisitng series, it messes up future gets
# res[0].name += "-TR"
# res[1].name += "-NTR"
# res[2].name += "-ITR"
# res[3].name += "-PR"
return res
# ## Generic Utils
# In[ ]:
# In[ ]:
# https://raw.githubusercontent.com/bsolomon1124/pyfinance/master/pyfinance/utils.py
# fixed to use 365 days for "D"
from pandas.tseries.frequencies import FreqGroup, get_freq_code
PERIODS_PER_YEAR = {
FreqGroup.FR_ANN: 1.,
FreqGroup.FR_QTR: 4.,
FreqGroup.FR_MTH: 12.,
FreqGroup.FR_WK: 52.,
FreqGroup.FR_BUS: 252.,
FreqGroup.FR_DAY: 365., # All days are business days
FreqGroup.FR_HR: 365. * 6.5,
FreqGroup.FR_MIN: 365. * 6.5 * 60,
FreqGroup.FR_SEC: 365. * 6.5 * 60 * 60,
FreqGroup.FR_MS: 365. * 6.5 * 60 * 60,
FreqGroup.FR_US: 365. * 6.5 * 60 * 60 * 1000,
FreqGroup.FR_NS: 365. * 6.5 * 60 * 60 * 1000 * 1000 # someday...
}
def get_anlz_factor(freq):
"""Find the number of periods per year given a frequency.
Parameters
----------
freq : str
Any frequency str or anchored offset str recognized by Pandas.
Returns
-------
float
Example
-------
>>> get_periods_per_year('D')
252.0
>>> get_periods_per_year('5D') # 5-business-day periods per year
50.4
>>> get_periods_per_year('Q')
4.0
>>> get_periods_per_year('Q-DEC')
4.0
>>> get_periods_per_year('BQS-APR')
4.0
"""
# 'Q-NOV' would give us (2001, 1); we just want (2000, 1).
try:
base, mult = get_freq_code(freq)
except ValueError:
# The above will fail for a bunch of irregular frequencies, such
# as 'Q-NOV' or 'BQS-APR'
freq = freq.upper()
if freq.startswith(('A-', 'BA-', 'AS-', 'BAS-')):
freq = 'A'
elif freq.startswith(('Q-', 'BQ-', 'QS-', 'BQS-')):
freq = 'Q'
elif freq in {'MS', 'BMS'}:
freq = 'M'
else:
raise ValueError('Invalid frequency: %s' % freq)
base, mult = get_freq_code(freq)
return PERIODS_PER_YEAR[(base // 1000) * 1000] / mult
#################### PCA
from sklearn.decomposition import PCA
def get_ols_beta_dist(*all):
df = get_ret_df(*all)
n = df.shape[1]
res = np.empty((n, n))
for c1 in range(n):
for c2 in range(n):
y = df.iloc[:, c1]
X = df.iloc[:, c2]
beta1 = sm.OLS(y, X).fit().params[0]
beta2 = sm.OLS(X, y).fit().params[0]
x1 = np.array([beta1, beta2])
x2 = np.abs(x1 - 1)
val = x1[np.argmin(x2)]
res[c1, c2] = val
return pd.DataFrame(res, columns=df.columns, index=df.columns)
def get_beta_dist(*all, type):
all = get(all)
names = lmap(get_name, all)
n = len(all)
data = np.empty((n, n))
for c1 in range(n):
for c2 in range(n):
if c1 == c2:
val = 1
else:
y = all[c1]
X = all[c2]
# print(y.name, X.name)
res = lrret(y, [X], return_res=True, show_res=False, sum1=(type=="R2"), pos_weights=(type=="R2"))
if type == 'R2':
val = res['R^2']
elif type == 'weight':
val = res['ser'][0]
data[c1, c2] = val
for c1 in range(n):
for c2 in range(n):
if type == "R2":
val = max(data[c1, c2], data[c2, c1])
elif type == "weight":
x1 = np.array([data[c1, c2], data[c2, c1]])
x2 = np.abs(x1 - 1)
val = x1[np.argmin(x2)]
data[c1, c2] = val
data[c2, c1] = val
df = pd.DataFrame(data, columns=names, index=names)
return df
def get_ret_df(*lst):
lst = get(lst, trim=True)
df = pd.DataFrame({x.name: logret(x) for x in lst}).dropna()
return df
def get_df(*lst):
lst = get(lst, trim=True)
df = pd.DataFrame({x.name: x for x in lst}).dropna()
return df
def _show_mds(*all, type='cor'):
if type == 'cor':
df = get_ret_df(*all)
sim = np.corrcoef(df.T)
dist = 1-sim
elif type == 'cov':
# df = get_df(*all)
df = get_ret_df(*all)
sim = np.cov(df.T)
np.fill_diagonal(sim, 1)
dist = np.abs(1-sim)
elif type == 'weight':
dist = get_beta_dist(*all, type='weight')
dist = np.abs(1 - dist)
elif type == 'R2':
dist = get_beta_dist(*all, type='R2')
dist = 1 - dist
elif type == 'beta':
dist = get_ols_beta_dist(*all)
dist = np.abs(1 - dist)
names = lmap(get_name, all)
#dist = dist - dist.mean(axis=1)
if not isinstance(dist, pd.DataFrame):
dist = pd.DataFrame(dist, columns=names, index=names)
display(dist)
pca = PCA(n_components=2)
tr = pca.fit_transform(dist)
plot_scatter_xy(tr[:, 0], tr[:, 1], names=names, title=f"{type} MDS")
def show_mds(*all, type=['cor', 'cov', 'beta', 'weight', 'R2']):
if isinstance(type, str):
type = [type]
for t in type:
_show_mds(*all, type=t)
#################### PCA
#################### Func Tools #####################
# e.g.:
# compose(cagr, despike, get)(SPY)
# partial(get, mode="TR")(SPY)
# def func(*functions, **args):
# if len(functions) > 1:
# f = compose(*functions)
# else:
# f = functions[0]
# if len(args) > 0:
# f = wrapped_partial(f, **args)
# return f
#################### portfolio value and flow ############
def port_value(s, flow=None, cash=100000):
start = None
if is_series(s):
start = s.index[0]
pr = price(s)
dv = divs(s)
dv = dv * 0.75
if not start is None:
pr = pr[start:]
if not start is None:
dv = dv[start:]
purchace_price = pr[0]
units = cash / purchace_price
dv = dv.reindex(pr.index).fillna(0)
flow = flow.reindex(pr.index).fillna(0)
res = pd.Series(0.0, pr.index)
accum_cash = 0
for dt in pr.index:
cpr = pr[dt]
cdv = dv[dt]
cfl = flow[dt]
if cdv > 0:
accum_cash += units * cdv
if cfl < 0: # we assume only negatives for now
take_from_cash = min(accum_cash, abs(cfl))
accum_cash -= take_from_cash
cfl += take_from_cash
if cfl != 0:
diff_units = -cfl / cpr
units -= diff_units
if cpr > purchace_price:
gain = diff_units * (cpr - purchace_price)
tax = gain * 0.25
accum_cash -= tax
if accum_cash > 0:
new_units = accum_cash / cpr
units += new_units
accum_cash = 0
c_val = units * cpr
if c_val < 0:
c_val = 0
res[dt] = c_val
res.name = get_name(s) + " -flow"
# print(f"left with accum' cash {accum_cash}")
return res
def get_flow(s, amount=None, rate=None, freq="M", inf=0.03):
if amount is None and rate is None:
rate = 0.04
# raise Exception(f"amount or rate must be defined")
pr = price(s)
if amount is None and not rate is None:
amount = rate * 100000 / 12
flow = pd.Series(0.0, index=pr.index)
flow = flow.resample("M").sum()
flow -= amount
mult = np.full(len(flow), math.pow(1+inf, 1/12)).cumprod()
flow *= mult
flow.name = f"{pr.name} flow"
return flow
def get_port_with_flow(s, amount=None, rate=None, freq="M", inf=0.03):
flow = get_flow(s, amount=amount, rate=rate, freq=freq, inf=inf)
res = port_value(s, flow)
if not rate is None:
res.name = f"{s.name} {rate*100:.0f}%"
return res
def show_port_with_flow(s, amount=None, rate=None, freq="M", inf=0.03, income_smooth=0):
s_ntr = get(s, mode="NTR")
flow = get_flow(s, amount=amount, rate=rate, freq=freq, inf=inf)
s_flow = port_value(s, flow)
show(s_ntr, s_flow, price(s))
show(0, get_income(s, smooth=income_smooth), -flow, ta=False, log=False)
wrate = -flow / s_flow * 12 * 100
show(get_yield_true(s), wrate, 0, ta=False, log=False)
def show_port_flow_comp(target, base):
base, target = get([base, target], trim=True, mode="NTR")
flow = -get_income(target, smooth=0)
base_flow = port_value(base, flow)
target_pr = get(target, mode="PR")
#show(base, base_flow, target, target_pr, 0, 1)
show(base_flow, target_pr, 0, 1, title="base with flow vs target PR")
relative_value = target_pr / base_flow
relative_value.name = "target_pr / base_flow"
relative_ntr = ntr(target) / ntr(base)
relative_ntr.name = "relative NTR"
show(relative_value, relative_ntr, 0, 1, title="relative base with flow / target PR")
def get_flows(s, n=None, rng=None):
sers = []
if rng is None and n is None:
rng = range(5)
if rng is None:
rng = range(n)
for i in rng:
ser = get_port_with_flow(s, rate=i/100, inf=0)
ser.name = f"{s.name} {i}%"
sers.append(ser)
return sers
##########################################
def show_rr_flows(*all, n=None, rng=None):
if rng is None:
rng = [0, 5]
all = get(all, trim=True)
all = lmap(lambda x: get_flows(x, n=n, rng=rng), all)
show_rr(*all, title="net flows")
def show_rr__yield_min_cagrpr__mutual_dd_rolling_pr_SPY(*all):
show_rr2(2, 3, 4, 5, *all, y_func=[cagr_pr, lambda x: get_curr_yield_min2(ntr(x))], x_func=mutual_dd_rolling_pr_SPY, xlabel="mutual_dd_rolling_pr_SPY", ylabel="PR CAGR ➜ min net yield")
def show_rr__yield_prcagr__ulcerpr(*all, trim=True, title="PR CAGR ➜ 12m net yield vs PR ulcer"):
show_rr2(2, 3, 4, 5, *all, trim=trim, g_func=pr, y_func=[cagr, lambda x: get_curr_yield_min2(ntr(x))], title=title, xlabel="PR ulcer", ylabel="PR CAGR ➜ 12m net yield")
def show_rr__yield_cagrpr__ulcerpr_trim(*all):
all = get(all, trim=True)
show_rr__yield_prcagr__ulcerpr(*all, title="PR CAGR ➜ 12m net yield vs PR ulcer (trim)")
def show_rr__yield_cagrpr__ulcerpr_notrim(*all):
all = get(all, untrim=True)
show_rr__yield_prcagr__ulcerpr(*all, trim=False, title="PR CAGR ➜ 12m net yield vs PR ulcer (no trim)")
############ special risk-return ####################
# def show_risk_itr_pr(*lst, title=None):
# prs = get(lst, mode="PR", trim=True)
# itrs = get(lst, mode="ITR", trim=True)
# res = []
# for pr, itr in zip(prs, itrs):
# pr_ulcer = ulcer(pr)
# x = [pr_ulcer, pr_ulcer]
# y = [cagr(pr), cagr(itr)]
# ser = pd.Series(y, index=x)
# ser.name = pr.name
# ser.names = [pr.name, '']
# res.append(ser)
# title = title or f"PR Risk - ITR Return"
# plot_scatter(*res, title=title, xlabel="ulcer", ylabel="cagr", show_zero_point=True)
# def show_risk_itr_pr_diff(*lst, title=None):
# prs = get(lst, mode="PR", trim=True)
# itrs = get(lst, mode="ITR", trim=True)
# res = []
# for pr, itr in zip(prs, itrs):
# pr_ulcer = ulcer(pr)
# x = [pr_ulcer]
# y = [cagr(itr)-cagr(pr)]
# ser = pd.Series(y, index=x)
# ser.name = pr.name
# ser.names = [pr.name]
# res.append(ser)
# title = title or f"PR Risk - ITR Return"
# plot_scatter(*res, title=title, xlabel="ulcer", ylabel="cagr", show_zero_point=True)
def pr_cagr_full(s):
return cagr(get(s, untrim=True, mode="PR"))
def start_year_full(s):
s = get(s, untrim=True)
return str(s.index[0].year)
def start_year_full_with_name(s):
return f"{s.name} {start_year_full(s)}"
def show_rr_yield_ntr_pr_diff_pr_full_alt(*lst, trim=True):
alt_text = start_year_full if trim else start_year_full_with_name
show_rr__yield_ntr_pr_diff__pr(*lst, alt_risk_func=pr_cagr_full, alt_risk_text=alt_text, trim=trim)
def show_rr__yield_ntr_pr_diff__pr(*lst, risk_func=cagr, alt_risk_func=pr_lr_cagr, alt_risk_text=None, title=None, trim=True):
# date = getCommonDate(lst, 'start')
# prs = get(lst, mode="PR", trim=date)
# ntrs = get(lst, mode="NTR", trim=date)
prs = get(lst, mode="PR", trim=trim)
ntrs = get(lst, mode="NTR", trim=trim)
res = []
for pr, ntr in zip(prs, ntrs):
if pr.shape[0] == 0:
continue
pr_ulcer = ulcer(pr)
yld = get_curr_yield(get(pr, mode="NTR"), type='rolling')
risk1 = risk_func(pr)
x = [risk1, risk1]
y = [cagr(ntr)-cagr(pr), yld]
if not alt_risk_func is None:
x.insert(0, alt_risk_func(pr))
y.insert(0, y[0])
ser = | pd.Series(y, index=x) | pandas.Series |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import pickle
import sys
from distutils.version import LooseVersion
import pytest
import numpy as np
import pyarrow as pa
import pyarrow.tests.util as test_util
def test_schema_constructor_errors():
msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` "
"instead")
with pytest.raises(TypeError, match=msg):
pa.Schema()
def test_type_integers():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']
for name in dtypes:
factory = getattr(pa, name)
t = factory()
assert str(t) == name
def test_type_to_pandas_dtype():
M8_ns = np.dtype('datetime64[ns]')
cases = [
(pa.null(), np.float64),
(pa.bool_(), np.bool_),
(pa.int8(), np.int8),
(pa.int16(), np.int16),
(pa.int32(), np.int32),
(pa.int64(), np.int64),
(pa.uint8(), np.uint8),
(pa.uint16(), np.uint16),
(pa.uint32(), np.uint32),
(pa.uint64(), np.uint64),
(pa.float16(), np.float16),
(pa.float32(), np.float32),
(pa.float64(), np.float64),
(pa.date32(), M8_ns),
(pa.date64(), M8_ns),
(pa.timestamp('ms'), M8_ns),
(pa.binary(), np.object_),
(pa.binary(12), np.object_),
(pa.string(), np.object_),
(pa.list_(pa.int8()), np.object_),
# (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion
]
for arrow_type, numpy_type in cases:
assert arrow_type.to_pandas_dtype() == numpy_type
@pytest.mark.pandas
def test_type_to_pandas_dtype_check_import():
# ARROW-7980
test_util.invoke_script('arrow_7980.py')
def test_type_list():
value_type = pa.int32()
list_type = pa.list_(value_type)
assert str(list_type) == 'list<item: int32>'
field = pa.field('my_item', pa.string())
l2 = pa.list_(field)
assert str(l2) == 'list<my_item: string>'
def test_type_comparisons():
val = pa.int32()
assert val == pa.int32()
assert val == 'int32'
assert val != 5
def test_type_for_alias():
cases = [
('i1', pa.int8()),
('int8', pa.int8()),
('i2', pa.int16()),
('int16', pa.int16()),
('i4', pa.int32()),
('int32', pa.int32()),
('i8', pa.int64()),
('int64', pa.int64()),
('u1', pa.uint8()),
('uint8', pa.uint8()),
('u2', pa.uint16()),
('uint16', pa.uint16()),
('u4', pa.uint32()),
('uint32', pa.uint32()),
('u8', pa.uint64()),
('uint64', pa.uint64()),
('f4', pa.float32()),
('float32', pa.float32()),
('f8', pa.float64()),
('float64', pa.float64()),
('date32', pa.date32()),
('date64', pa.date64()),
('string', pa.string()),
('str', pa.string()),
('binary', pa.binary()),
('time32[s]', pa.time32('s')),
('time32[ms]', pa.time32('ms')),
('time64[us]', pa.time64('us')),
('time64[ns]', pa.time64('ns')),
('timestamp[s]', pa.timestamp('s')),
('timestamp[ms]', pa.timestamp('ms')),
('timestamp[us]', pa.timestamp('us')),
('timestamp[ns]', pa.timestamp('ns')),
('duration[s]', pa.duration('s')),
('duration[ms]', pa.duration('ms')),
('duration[us]', pa.duration('us')),
('duration[ns]', pa.duration('ns')),
]
for val, expected in cases:
assert pa.type_for_alias(val) == expected
def test_type_string():
t = pa.string()
assert str(t) == 'string'
def test_type_timestamp_with_tz():
tz = 'America/Los_Angeles'
t = pa.timestamp('ns', tz=tz)
assert t.unit == 'ns'
assert t.tz == tz
def test_time_types():
t1 = pa.time32('s')
t2 = pa.time32('ms')
t3 = pa.time64('us')
t4 = pa.time64('ns')
assert t1.unit == 's'
assert t2.unit == 'ms'
assert t3.unit == 'us'
assert t4.unit == 'ns'
assert str(t1) == 'time32[s]'
assert str(t4) == 'time64[ns]'
with pytest.raises(ValueError):
pa.time32('us')
with pytest.raises(ValueError):
pa.time64('s')
def test_from_numpy_dtype():
cases = [
(np.dtype('bool'), pa.bool_()),
(np.dtype('int8'), pa.int8()),
(np.dtype('int16'), pa.int16()),
(np.dtype('int32'), pa.int32()),
(np.dtype('int64'), pa.int64()),
(np.dtype('uint8'), pa.uint8()),
(np.dtype('uint16'), pa.uint16()),
(np.dtype('uint32'), pa.uint32()),
(np.dtype('float16'), pa.float16()),
(np.dtype('float32'), pa.float32()),
(np.dtype('float64'), pa.float64()),
(np.dtype('U'), pa.string()),
(np.dtype('S'), pa.binary()),
(np.dtype('datetime64[s]'), pa.timestamp('s')),
(np.dtype('datetime64[ms]'), pa.timestamp('ms')),
(np.dtype('datetime64[us]'), pa.timestamp('us')),
(np.dtype('datetime64[ns]'), pa.timestamp('ns')),
(np.dtype('timedelta64[s]'), pa.duration('s')),
(np.dtype('timedelta64[ms]'), pa.duration('ms')),
(np.dtype('timedelta64[us]'), pa.duration('us')),
(np.dtype('timedelta64[ns]'), pa.duration('ns')),
]
for dt, pt in cases:
result = pa.from_numpy_dtype(dt)
assert result == pt
# Things convertible to numpy dtypes work
assert pa.from_numpy_dtype('U') == pa.string()
assert pa.from_numpy_dtype(np.unicode) == pa.string()
assert pa.from_numpy_dtype('int32') == pa.int32()
assert pa.from_numpy_dtype(bool) == pa.bool_()
with pytest.raises(NotImplementedError):
pa.from_numpy_dtype(np.dtype('O'))
with pytest.raises(TypeError):
pa.from_numpy_dtype('not_convertible_to_dtype')
def test_schema():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
assert sch.field('foo').name == 'foo'
assert sch.field('foo').type == fields[0].type
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([None])
def test_schema_to_string_with_metadata():
lorem = """\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla accumsan vel
turpis et mollis. Aliquam tincidunt arcu id tortor blandit blandit. Donec
eget leo quis lectus scelerisque varius. Class aptent taciti sociosqu ad
litora torquent per conubia nostra, per inceptos himenaeos. Praesent
faucibus, diam eu volutpat iaculis, tellus est porta ligula, a efficitur
turpis nulla facilisis quam. Aliquam vitae lorem erat. Proin a dolor ac libero
dignissim mollis vitae eu mauris. Quisque posuere tellus vitae massa
pellentesque sagittis. Aenean feugiat, diam ac dignissim fermentum, lorem
sapien commodo massa, vel volutpat orci nisi eu justo. Nulla non blandit
sapien. Quisque pretium vestibulum urna eu vehicula."""
# ARROW-7063
my_schema = pa.schema([pa.field("foo", "int32", False,
metadata={"key1": "value1"}),
pa.field("bar", "string", True,
metadata={"key3": "value3"})],
metadata={"lorem": lorem})
assert my_schema.to_string() == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65)
# Metadata that exactly fits
result = pa.schema([('f0', 'int32')],
metadata={'key': 'value' + 'x' * 62}).to_string()
assert result == """\
f0: int32
-- schema metadata --
key: 'valuexxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"""
assert my_schema.to_string(truncate_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False) == """\
foo: int32 not null
bar: string
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'"""
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
bar: string"""
def test_schema_from_tuples():
fields = [
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([('foo', None)])
def test_schema_from_mapping():
fields = OrderedDict([
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
])
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
fields = OrderedDict([('foo', None)])
with pytest.raises(TypeError):
pa.schema(fields)
def test_schema_duplicate_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('foo', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'foo']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
foo: list<item: int8>
child 0, item: int8"""
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
with pytest.warns(FutureWarning):
assert sch.field_by_name('bar') == fields[1]
with pytest.warns(FutureWarning):
assert sch.field_by_name('xxx') is None
with pytest.warns((UserWarning, FutureWarning)):
assert sch.field_by_name('foo') is None
def test_field_flatten():
f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'})
assert f0.flatten() == [f0]
f1 = pa.field('bar', pa.float64(), nullable=False)
ff = pa.field('ff', pa.struct([f0, f1]), nullable=False)
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64(), nullable=False)] # XXX
# Nullable parent makes flattened child nullable
ff = pa.field('ff', pa.struct([f0, f1]))
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64())]
fff = pa.field('fff', pa.struct([ff]))
assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))]
def test_schema_add_remove_metadata():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
s1 = pa.schema(fields)
assert s1.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
s2 = s1.with_metadata(metadata)
assert s2.metadata == metadata
s3 = s2.remove_metadata()
assert s3.metadata is None
# idempotent
s4 = s3.remove_metadata()
assert s4.metadata is None
def test_schema_equals():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1.equals(sch2, check_metadata=True)
assert sch3.equals(sch4, check_metadata=True)
assert sch1.equals(sch3)
assert not sch1.equals(sch3, check_metadata=True)
assert not sch1.equals(sch3, check_metadata=True)
del fields[-1]
sch3 = pa.schema(fields)
assert not sch1.equals(sch3)
def test_schema_equals_propagates_check_metadata():
# ARROW-4088
schema1 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string())
])
schema2 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string(), metadata={'a': 'alpha'}),
])
assert not schema1.equals(schema2, check_metadata=True)
assert schema1.equals(schema2)
def test_schema_equals_invalid_type():
# ARROW-5873
schema = pa.schema([pa.field("a", pa.int64())])
for val in [None, 'string', pa.array([1, 2])]:
with pytest.raises(TypeError):
schema.equals(val)
def test_schema_equality_operators():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1 == sch2
assert sch3 == sch4
# __eq__ and __ne__ do not check metadata
assert sch1 == sch3
assert not sch1 != sch3
assert sch2 == sch4
# comparison with other types doesn't raise
assert sch1 != []
assert sch3 != 'foo'
def test_schema_get_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema.field('foo').name == 'foo'
assert schema.field(0).name == 'foo'
assert schema.field(-1).name == 'baz'
with pytest.raises(KeyError):
schema.field('other')
with pytest.raises(TypeError):
schema.field(0.0)
with pytest.raises(IndexError):
schema.field(4)
def test_schema_negative_indexing():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema[-1].equals(schema[2])
assert schema[-2].equals(schema[1])
assert schema[-3].equals(schema[0])
with pytest.raises(IndexError):
schema[-4]
with pytest.raises(IndexError):
schema[3]
def test_schema_repr_with_dictionaries():
fields = [
pa.field('one', pa.dictionary(pa.int16(), pa.string())),
pa.field('two', pa.int32())
]
sch = pa.schema(fields)
expected = (
"""\
one: dictionary<values=string, indices=int16, ordered=0>
two: int32""")
assert repr(sch) == expected
def test_type_schema_pickling():
cases = [
pa.int8(),
pa.string(),
pa.binary(),
pa.binary(10),
pa.list_(pa.string()),
pa.map_(pa.string(), pa.int8()),
pa.struct([
pa.field('a', 'int8'),
pa.field('b', 'string')
]),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_SPARSE),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_DENSE),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.date64(),
pa.timestamp('ms'),
pa.timestamp('ns'),
pa.decimal128(12, 2),
pa.field('a', 'string', metadata={b'foo': b'bar'})
]
for val in cases:
roundtripped = pickle.loads(pickle.dumps(val))
assert val == roundtripped
fields = []
for i, f in enumerate(cases):
if isinstance(f, pa.Field):
fields.append(f)
else:
fields.append(pa.field('_f{}'.format(i), f))
schema = pa.schema(fields, metadata={b'foo': b'bar'})
roundtripped = pickle.loads(pickle.dumps(schema))
assert schema == roundtripped
def test_empty_table():
schema = pa.schema([
pa.field('f0', pa.int64()),
pa.field('f1', pa.dictionary(pa.int32(), pa.string())),
pa.field('f2', pa.list_(pa.list_(pa.int64()))),
])
table = schema.empty_table()
assert isinstance(table, pa.Table)
assert table.num_rows == 0
assert table.schema == schema
@pytest.mark.pandas
def test_schema_from_pandas():
import pandas as pd
inputs = [
list(range(10)),
pd.Categorical(list(range(10))),
['foo', 'bar', None, 'baz', 'qux'],
np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'
], dtype='datetime64[ns]'),
]
if LooseVersion(pd.__version__) >= '1.0.0':
inputs.append(pd.array([1, 2, None], dtype= | pd.Int32Dtype() | pandas.Int32Dtype |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = | pd.concat([dts1, dts3]) | pandas.concat |
# -*- coding: utf-8 -*-
import matplotlib
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import pickle
import numpy as np
import random
from itertools import groupby
import copy
import os
'''
delete samples with less than 200 active days
'''
#with open ('data/individual_ID_list', 'rb') as fp:
#individual_ID_list = pickle.load(fp)
##------------****filter out data****------
#individual_ID_list_copy=copy.deepcopy(individual_ID_list)
#for Card_ID in individual_ID_list:
#file_name='data/sample_'+str(Card_ID)+'_201407_201408.csv'
#sample_one=pd.read_csv(file_name)
#sample_one['date_time']=pd.to_datetime(sample_one['date_time'])
#sample_one["date"]=sample_one["date_time"].apply(lambda x: x.date())
#date_list=pd.unique(sample_one['date'])
##print (date_list)
#flag=0
#if len(date_list) < 200:
#individual_ID_list_copy.remove(Card_ID)
#continue
#for date in date_list:
#trip_per_day=sample_one['date'].loc[sample_one['date']==date].count()-1
#if trip_per_day>8: # unreasonable card
#flag=1
#break
#if flag==1:
#individual_ID_list_copy.remove(Card_ID)
#with open('data/individual_ID_list_new', 'wb') as fp:
#pickle.dump(individual_ID_list_copy, fp)
colors = ["#3366cc", "#dc3912", "#109618", "#990099", "#ff9900"]
labels = ["All", "First", "Remaining"]
#------------**------
def process_data(Card_ID):
#print (Card_ID)
file_name='data/sample_'+str(Card_ID)+'_201407_201408.csv'
if os.path.exists(file_name)==False:
return 'nothing'
#print ('a')
sample_one=pd.read_csv(file_name)
sample_one['date_time']=pd.to_datetime(sample_one['date_time'])
sample_one["date"]=sample_one["date_time"].apply(lambda x: x.date())
date_list=list(pd.unique(sample_one['date']))
#print (date_list)
num_of_Activeday=len(date_list)
#print(num_of_Activeday)
location_list=list(pd.unique(sample_one['location']))
location_list.remove(-1)
num_of_location=len(location_list)
#print (num_of_location)
return (num_of_Activeday,num_of_location)
def process_data_trip_perday(samples):
samples = samples.loc[samples['if_last']== 0].copy()
samples['date_time']=pd.to_datetime(samples['date_time'])
samples["date"]=samples["date_time"].apply(lambda x: x.date())
samples['duration_hour'] = samples['duration'] / 3600
samples['trip_start_time'] = samples['date_time'] + pd.to_timedelta(samples['duration'],unit = 'sec')
samples['trip_start_time_hour'] = samples["trip_start_time"].dt.hour + 4
samples['trip_start_time_hour'] = samples['trip_start_time_hour'] % 24
samples_trip_per_day = samples.groupby(['Card_ID', 'date'])['seq_ID'].count().reset_index()
trip_per_day_list = list(samples_trip_per_day['seq_ID'])
samples_first = samples.loc[samples['act_ID'] == 0]
sample_not_first = samples.loc[samples['act_ID'] != 0]
first_duration_list = list(samples_first['duration_hour'])
duration_list = list(samples['duration_hour'])
all_except_duration_list = list(sample_not_first['duration_hour'])
trip_start_time = list(samples['trip_start_time_hour'])
num_active_day_list = samples.groupby(['Card_ID'])['date'].nunique().reset_index()
num_active_day_list = list(num_active_day_list['date'])
return trip_per_day_list,first_duration_list, duration_list, all_except_duration_list,trip_start_time, num_active_day_list
def plot_data_decription(trip_per_day,active_day,df1,df2,df3,trip_start_time, save_fig):
import seaborn as sns
sns.set(font_scale=1.8)
sns.set_style("whitegrid", {'axes.grid': False, "legend.frameon": True})
plt.figure(figsize=(14, 10))
ax1 = plt.subplot(2, 2, 1)
hist, bins = np.histogram(active_day, range(300,850,50))
p = hist.astype(np.float32) / len(active_day)
w = bins[1] - bins[0]
plt.bar(bins[:-1], p, width=w, align='edge', color=colors[0], edgecolor='w', alpha=0.8)
# plt.hist(S, bins=range(0, 151, 10), normed=True, color=colors[0], edgecolor='w', alpha=0.8)
plt.xlim(300, 800)
# ax1.set_yticks([0.0, 0.005, 0.010, 0.015, 0.020])
plt.xlabel('Number of active days', fontsize=18)
plt.ylabel('Probability', fontsize=18)
plt.text(-0.1, 1.05, '(a)', fontdict={'size': 18, 'weight': 'bold'},
transform=ax1.transAxes)
ax2 = plt.subplot(2, 2, 2)
n = 8 + 1
plt.hist(np.array(trip_per_day), bins=range(n) , density = True, color=colors[0], edgecolor='w', alpha=0.8)
plt.xlim(1, n)
plt.xlabel('Number of trips per active day', fontsize=18)
plt.ylabel('Probability', fontsize=18)
plt.xticks([i + 0.5 for i in range(1, n)], range(1, n))
plt.text(-0.1, 1.05, '(b)', fontdict={'size': 18, 'weight': 'bold'},
transform=ax2.transAxes)
ax3 = plt.subplot(2, 2, 3)
sns.kdeplot(df1[0][:], ax=ax3, shade=True, color=colors[0], label=labels[0])
sns.kdeplot(df2[0][:], ax=ax3, shade=True, color=colors[1], label=labels[1])
sns.kdeplot(df3[0][:], ax=ax3, shade=True, color=colors[2], label=labels[2])
meda = df1[0][:].mean()
medb = df2[0][:].mean()
medc = df3[0][:].mean()
#plt.axvline(meda, color=colors[0], linestyle='dashed', linewidth=2)
#plt.axvline(medb, color=colors[1], linestyle='dashed', linewidth=2)
#plt.axvline(medc, color=colors[2], linestyle='dashed', linewidth=2)
#plt.text(meda + 0.2, 0.02, 'Mean = {}'.format(round(meda, 1)),
#horizontalalignment='left', verticalalignment='center',
#fontsize=16, color=colors[0])
#plt.text(medb - 0.2, 0.02, 'Mean = {}'.format(round(medb, 1)),
#horizontalalignment='right', verticalalignment='center',
#fontsize=16, color=colors[1])
#plt.text(medc - 0.2, 0.02, 'Mean = {}'.format(round(medb, 1)),
#horizontalalignment='right', verticalalignment='center',
#fontsize=16, color=colors[2])
plt.xlim(0, 24)
#plt.xlim(0, 3.5)
#ax.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xticks(range(0, 24 + 4, 4))
plt.xlabel('Activity duration (hours)', fontsize=18)
plt.ylabel('Probability', fontsize=18)
plt.legend(fontsize=18, loc='upper right')
plt.text(-0.1, 1.05, '(c)', fontdict={'size': 18, 'weight': 'bold'},
transform=ax3.transAxes)
#plt.show()
ax4 = plt.subplot(2, 2, 4)
#hist, bins = np.histogram(trip_start_time, range(0,24,1))
#p = hist.astype(np.float32) / len(trip_start_time)
#w = bins[1] - bins[0]
#print (bins)
#plt.bar(bins[:-1], p, width=w, align='edge', color=colors[0], edgecolor='w', alpha=0.8)
unique, counts = np.unique(trip_start_time, return_counts=True)
prob = counts / sum(counts)
w = unique[1] - unique[0]
plt.bar(unique, prob, width=w, align='edge', color=colors[0], edgecolor='w', alpha=0.8)
ax4.set_xlim(0, 23)
unit_label = 4
# ax1.set_yticks([0.0, 0.005, 0.010, 0.015, 0.020])
plt.xticks(np.arange(0, 23 + unit_label, unit_label) + 0.5, tuple([str(i) + '' for i in range(0, 23 + unit_label, unit_label)]))
#ax4.set_xticklabels([str(i) + ':00' for i in range(0, 30, 6)])
plt.xlabel('Trip start time', fontsize=18)
plt.ylabel('Probability', fontsize=18)
plt.text(-0.1, 1.05, '(d)', fontdict={'size': 18, 'weight': 'bold'},
transform=ax4.transAxes)
plt.tight_layout()
if save_fig == 1:
plt.savefig('img/travel_patterns.png', dpi=200)
else:
plt.show()
if __name__ == '__main__':
# with open('data/individual_ID_list_new', 'rb') as fp:
# individual_ID_list = pickle.load(fp)
file_name_all = '../data/samples/sample_500_all_201407_201408.csv'
if not os.path.exists(file_name_all):
data_path = '../data/'
num_ind = 1000
with open(data_path + 'individual_ID_list_test_' + str(num_ind) + '.pickle', 'rb') as fp:
individual_ID_list_test = pickle.load(fp)
individual_ID_list = individual_ID_list_test[0:500]
samples = []
count = 0
for Card_ID in individual_ID_list:
count+=1
if count % 100 == 0:
print('Current id', count, 'Total', len(individual_ID_list))
file_name = '../data/samples/sample_' + str(Card_ID) + '_201407_201408.csv'
if not os.path.exists(file_name):
print(Card_ID, 'not available')
continue
data_temp = pd.read_csv(file_name)
data_temp['Card_ID'] = Card_ID
samples.append(data_temp)
samples = pd.concat(samples)
samples.to_csv(file_name_all, index=False)
else:
print('Just load data')
samples = pd.read_csv(file_name_all)
trip_per_day_list,first_duration_list, duration_list, all_except_duration_list,trip_start_time, num_active_day_list = process_data_trip_perday(samples)
plot_data_decription(trip_per_day_list,num_active_day_list, pd.DataFrame(duration_list,columns=None),pd.DataFrame(first_duration_list,columns=None),
| pd.DataFrame(all_except_duration_list,columns=None) | pandas.DataFrame |
import glob as gb
import librosa
import librosa.display
import numpy as np
import time
import skimage.measure
import os
import scipy
from scipy.spatial import distance
import pandas as pd
import tensorflow.keras as k
import data_utils as du
from skimage.transform import resize
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
start_time = time.time()
# region DataPreparation
def compute_ssm(X, metric="cosine"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
for i in range(D.shape[0]):
for j in range(D.shape[1]):
if np.isnan(D[i, j]):
D[i, j] = 0
D /= D.max()
return 1 - D
def mel_spectrogram(sr_desired, filepath, window_size, hop_length):
"""This function calculates the mel spectrogram in dB with Librosa library"""
y, sr = librosa.load(filepath, sr=None)
if sr != sr_desired:
y = librosa.core.resample(y, sr, sr_desired)
sr = sr_desired
S = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=window_size, hop_length=hop_length, n_mels=80, fmin=80,
fmax=16000)
S_to_dB = librosa.power_to_db(S, ref=np.max) # convert S in dB
return S_to_dB # S_to_dB is the spectrogam in dB
def fourier_transform(sr_desired, name_song, window_size, hop_length):
"""This function calculates the mel spectrogram in dB with Librosa library"""
y, sr = librosa.load(name_song, sr=None)
if sr != sr_desired:
y = librosa.core.resample(y, sr, sr_desired)
sr = sr_desired
stft = np.abs(librosa.stft(y=y, n_fft=window_size, hop_length=hop_length))
return stft
def max_pooling(stft, pooling_factor):
x_prime = skimage.measure.block_reduce(stft, (1, pooling_factor), np.max)
return x_prime
def sslm_gen(spectrogram, pooling_factor, lag, mode, feature):
padding_factor = lag
"""This part pads a mel spectrogram gived the spectrogram a lag parameter
to compare the first rows with the last ones and make the matrix circular"""
pad = np.full((spectrogram.shape[0], padding_factor), -70) # 80x30 frame matrix of -70dB corresponding to padding
S_padded = np.concatenate((pad, spectrogram), axis=1) # padding 30 frames with noise at -70dB at the beginning
"""This part max-poolend the spectrogram in time axis by a factor of p"""
x_prime = max_pooling(S_padded, pooling_factor)
x = []
if feature == "mfcc":
"""This part calculates a circular Self Similarity Lag Matrix given
the mel spectrogram padded and max-pooled"""
# MFCCs calculation from DCT-Type II
MFCCs = scipy.fftpack.dct(x_prime, axis=0, type=2, norm='ortho')
MFCCs = MFCCs[1:, :] # 0 componen ommited
# Bagging frames
m = 2 # baggin parameter in frames
x = [np.roll(MFCCs, n, axis=1) for n in range(m)]
elif feature == "chroma":
"""This part calculates a circular Self Similarity Lag Matrix given
the chromagram padded and max-pooled"""
PCPs = librosa.feature.chroma_stft(S=x_prime, sr=sr_desired, n_fft=window_size, hop_length=hop_length)
PCPs = PCPs[1:, :]
# Bagging frames
m = 2 # Bagging parameter in frames
x = [np.roll(PCPs, n, axis=1) for n in range(m)]
x_hat = np.concatenate(x, axis=0)
# Cosine distance calculation: D[N/p,L/p] matrix
distances = np.zeros((x_hat.shape[1], padding_factor // p)) # D has as dimensions N/p and L/p
for i in range(x_hat.shape[1]): # iteration in columns of x_hat
for l in range(padding_factor // p):
if i - (l + 1) < 0:
cur_dist = 1
elif i - (l + 1) < padding_factor // p:
cur_dist = 1
else:
cur_dist = 0
if mode == "cos":
cur_dist = distance.cosine(x_hat[:, i],
x_hat[:, i - (l + 1)]) # cosine distance between columns i and i-L
elif mode == "euc":
cur_dist = distance.euclidean(x_hat[:, i],
x_hat[:, i - (l + 1)]) # euclidian distance between columns i and i-L
if cur_dist == float('nan'):
cur_dist = 0
distances[i, l] = cur_dist
# Threshold epsilon[N/p,L/p] calculation
kappa = 0.1
epsilon = np.zeros((distances.shape[0], padding_factor // p)) # D has as dimensions N/p and L/p
for i in range(padding_factor // p, distances.shape[0]): # iteration in columns of x_hat
for l in range(padding_factor // p):
epsilon[i, l] = np.quantile(np.concatenate((distances[i - l, :], distances[i, :])), kappa)
# We remove the padding done before
distances = distances[padding_factor // p:, :]
epsilon = epsilon[padding_factor // p:, :]
x_prime = x_prime[:, padding_factor // p:]
# Self Similarity Lag Matrix
sslm = scipy.special.expit(1 - distances / epsilon) # aplicación de la sigmoide
sslm = np.transpose(sslm)
sslm = skimage.measure.block_reduce(sslm, (1, 3), np.max)
# Check if SSLM has nans and if it has them, substitute them by 0
for i in range(sslm.shape[0]):
for j in range(sslm.shape[1]):
if np.isnan(sslm[i, j]):
sslm[i, j] = 0
# if mode == "euc":
# return sslm, x_prime
# return sslm
return sslm, x_prime
def ssm_gen(spectrogram, pooling_factor):
"""This part max-poolend the spectrogram in time axis by a factor of p"""
x_prime = max_pooling(spectrogram, pooling_factor)
"""This part calculates a circular Self Similarity Matrix given
the mel spectrogram padded and max-pooled"""
# MFCCs calculation from DCT-Type II
MFCCs = scipy.fftpack.dct(x_prime, axis=0, type=2, norm='ortho')
MFCCs = MFCCs[1:, :] # 0 componen ommited
# Bagging frames
m = 2 # baggin parameter in frames
x = [np.roll(MFCCs, n, axis=1) for n in range(m)]
x_hat = np.concatenate(x, axis=0)
x_hat = np.transpose(x_hat)
ssm = compute_ssm(x_hat)
# Check if SSLM has nans and if it has them, substitute them by 0
for i in range(ssm.shape[0]):
for j in range(ssm.shape[1]):
if np.isnan(ssm[i, j]):
ssm[i, j] = 0
return ssm
# endregion
window_size = 2048 # (samples/frame)
hop_length = 1024 # overlap 50% (samples/frame)
sr_desired = 44100
p = 2 # pooling factor
p2 = 3 # 2pool3
L_sec_near = 14 # lag near context in seconds
L_near = round(L_sec_near * sr_desired / hop_length) # conversion of lag L seconds to frames
MASTER_DIR = 'D:/Google Drive/Resources/Dev Stuff/Python/Machine Learning/Master Thesis/'
DEFAULT_LABELPATH = os.path.join(MASTER_DIR, 'Labels/')
TRAIN_DIR = 'F:/Master Thesis Input/NewTrain/'
MIDI_DIR = os.path.join(MASTER_DIR, 'Data/MIDIs/')
def util_main_helper(feature, filepath, mode="cos", predict=False, savename=""):
sslm_near = None
if feature == "mfcc":
mel = mel_spectrogram(sr_desired, filepath, window_size, hop_length)
if mode == "cos":
sslm_near = sslm_gen(mel, p, L_near, mode=mode, feature="mfcc")[0]
# mls = max_pooling(mel, p2)
# Save mels matrices and sslms as numpy arrays in separate paths
# np.save(im_path_mel_near + song_id, mls)
elif mode == "euc":
sslm_near = sslm_gen(mel, p, L_near, mode=mode, feature="mfcc")[0]
if sslm_near.shape[1] < max_pooling(mel, 6).shape[1]:
sslm_near = np.hstack((np.ones((301, 1)), sslm_near))
elif sslm_near.shape[1] > max_pooling(mel, 6).shape[1]:
sslm_near = sslm_near[:, 1:]
elif feature == "chroma":
stft = fourier_transform(sr_desired, filepath, window_size, hop_length)
sslm_near = sslm_gen(stft, p, L_near, mode=mode, feature="chroma")[0]
if mode == "euc":
if sslm_near.shape[1] < max_pooling(stft, 6).shape[1]:
sslm_near = np.hstack((np.ones((301, 1)), sslm_near))
elif sslm_near.shape[1] > max_pooling(stft, 6).shape[1]:
sslm_near = sslm_near[:, 1:]
elif feature == "mls":
mel = mel_spectrogram(sr_desired, filepath, window_size, hop_length)
sslm_near = ssm_gen(mel, pooling_factor=6)
"""
# UNCOMMENT TO DISPLAY FEATURE GRAPHS
# recurrence = librosa.segment.recurrence_matrix(sslm_near, mode='affinity', k=sslm_near.shape[1])
plt.figure(figsize=(15, 10))
if feature == "mls":
plt.title("Mel Log-scaled Spectrogram - Self-Similarity matrix (MLS SSM)")
plt.imshow(sslm_near, origin='lower', cmap='plasma', aspect=0.8) # switch to recurrence if desired
else:
plt_title = "Self-Similarity Lag Matrix (SSLM): "
if feature == "chroma":
plt_title += "Chromas, "
else:
plt_title += "MFCCs, "
if mode == "cos":
plt_title += "Cosine Distance"
else:
plt_title += "Euclidian Distance"
plt.title(plt_title)
plt.imshow(sslm_near.astype(np.float32), origin='lower', cmap='viridis', aspect=0.8)
# switch to recurrence if desired
plt.show()
"""
if not predict:
# Save matrices and sslms as numpy arrays in separate paths
np.save(filepath, sslm_near)
else:
return sslm_near
def util_main(feature, mode="cos", predict=False, inpath=TRAIN_DIR, midpath=MIDI_DIR):
img_path = ""
if feature == "mfcc":
if mode == "cos":
img_path = os.path.join(inpath, 'SSLM_MFCC_COS/')
elif mode == "euc":
img_path = os.path.join(inpath, 'SSLM_MFCC_EUC/')
elif feature == "chroma":
if mode == "cos":
img_path = os.path.join(inpath, 'SSLM_CRM_COS/')
elif mode == "euc":
img_path = os.path.join(inpath, 'SSLM_CRM_EUC/')
elif feature == "mls":
img_path = os.path.join(inpath, 'MLS/')
if not os.path.exists(img_path):
os.makedirs(img_path)
num_songs = sum([len(files) for r, d, files in os.walk(midpath)])
i = 0
for folder in gb.glob(midpath + "*"):
for file in os.listdir(folder):
# foldername = folder.split('\\')[-1]
name_song, name = file, file.split('/')[-1].split('.')[0]
start_time_song = time.time()
i += 1
song_id = name_song[:-4] # delete .ext characters from the string
print("\tPreparing", song_id, "for processing...")
if str(song_id) + ".npy" not in os.listdir(img_path):
util_main_helper(feature, folder + '/' + name_song, mode, predict, savename=img_path + song_id)
print("\t\tFinished", i, "/", num_songs, "- Duration: {:.2f}s".format(time.time() - start_time_song))
else:
print("\t\tAlready completed. Skipping...\n\t\tFinished", i, "/", num_songs)
# return
print("All files have been converted. Duration: {:.2f}s".format(time.time() - start_time))
def validate_folder_contents(labels, midis, mlsdir, sslm1, sslm2, sslm3, sslm4):
"""Ensure all folders contain files of the same name"""
labelfiles = os.listdir(labels)
midifiles = os.listdir(midis)
mlsfiles = os.listdir(mlsdir)
sslm1files = os.listdir(sslm1)
sslm2files = os.listdir(sslm2)
sslm3files = os.listdir(sslm3)
sslm4files = os.listdir(sslm4)
for i in range(len(labelfiles)):
c_lbl = os.path.splitext(labelfiles[i])[0]
c_midi = os.path.splitext(midifiles[i])[0]
c_mls = os.path.splitext(mlsfiles[i])[0]
c_sslm1 = os.path.splitext(sslm1files[i])[0]
c_sslm2 = os.path.splitext(sslm2files[i])[0]
c_sslm3 = os.path.splitext(sslm3files[i])[0]
c_sslm4 = os.path.splitext(sslm4files[i])[0]
if c_lbl != c_midi or c_lbl != c_mls or\
c_lbl != c_sslm1 or c_lbl != c_sslm2 or c_lbl != c_sslm3 or c_lbl != c_sslm4:
err = FileNotFoundError("File discrepency at index " + str(i))
print("Current labels: ")
print(f"Label: {c_lbl}\nMIDI: {c_midi}\nMLS: {c_mls}\nSSLM-CRM-COS: {c_sslm1}"
f"\nSSLM-CRM-EUC: {c_sslm2}\nSSLM-MFCC-COS: {c_sslm3}\nSSLM-MFCC-EUC: {c_sslm4}")
raise err
if len(labelfiles) != len(midifiles) or len(labelfiles) != len(mlsfiles) or \
len(labelfiles) != len(sslm1files) or len(labelfiles) != len(sslm2files) or\
len(labelfiles) != len(sslm3files) or len(labelfiles) != len(sslm4files):
raise ValueError("Not all directories contain the same number of files")
# region Transformations
def gaussian(x, mu, sig):
"""Create array of labels"""
return np.exp(-np.power((x - mu) / sig, 2.) / 2)
def borders(image, label, labels_sec, label_form):
"""This function transforms labels in sc to gaussians in frames"""
pooling_factor = 6
num_frames = image.shape[2]
repeated_label = []
for i in range(len(labels_sec) - 1):
if labels_sec[i] == labels_sec[i + 1]:
repeated_label.append(i)
labels_sec = np.delete(labels_sec, repeated_label, 0) # labels in seconds
labels_sec = labels_sec / pooling_factor # labels in frames
# Pad frames we padded in images also in labels but in seconds
sr = sr_desired
padding_factor = 50
label_padded = [labels_sec[i] + padding_factor * hop_length / sr for i in range(labels_sec.shape[0])]
vector = np.arange(num_frames)
new_vector = (vector * hop_length + window_size / 2) / sr
sigma = 0.1
gauss_array = []
for mu in (label_padded[1:]): # Ignore first label (beginning of song) due to insignificance (0.000 Silence)
gauss_array = np.append(gauss_array, gaussian(new_vector, mu, sigma))
for i in range(len(gauss_array)):
if gauss_array[i] > 1:
gauss_array[i] = 1
return image, label[1:], gauss_array, label_form
def padding_MLS(image, label, labels_sec, label_form):
"""This function pads 30frames at the begining and end of an image"""
sr = sr_desired
padding_factor = 50
def voss(nrows, ncols=16):
"""Generates pink noise using the Voss-McCartney algorithm.
nrows: number of values to generate
rcols: number of random sources to add
returns: NumPy array
"""
array = np.empty((nrows, ncols))
array.fill(np.nan)
array[0, :] = np.random.random(ncols)
array[:, 0] = np.random.random(nrows)
# the total number of changes is nrows
n = nrows
cols = np.random.geometric(0.5, n)
cols[cols >= ncols] = 0
rows = np.random.randint(nrows, size=n)
array[rows, cols] = np.random.random(n)
df = pd.DataFrame(array)
df.fillna(method='ffill', axis=0, inplace=True)
total = df.sum(axis=1)
return total.values
n_mels = image.shape[1] # Default(80) - fit padding to image height
y = voss(padding_factor * hop_length - 1)
S = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=window_size, hop_length=hop_length,
n_mels=n_mels, fmin=80, fmax=16000)
S_to_dB = librosa.power_to_db(S, ref=np.max)
pad_image = S_to_dB[np.newaxis, :, :]
# Pad MLS
S_padded = np.concatenate((pad_image, image), axis=-1)
S_padded = np.concatenate((S_padded, pad_image), axis=-1)
return S_padded, label, labels_sec, label_form
def padding_SSLM(image, label, labels_sec, label_form):
"""This function pads 30 frames at the begining and end of an image"""
padding_factor = 50
# Pad SSLM
pad_image = np.full((image.shape[1], padding_factor), 1)
pad_image = pad_image[np.newaxis, :, :]
S_padded = np.concatenate((pad_image, image), axis=-1)
S_padded = np.concatenate((S_padded, pad_image), axis=-1)
return S_padded, label, labels_sec, label_form
def normalize_image(image, label, labels_sec, label_form):
"""This function normalizes an image"""
image = np.squeeze(image) # remove
def normalize(array):
"""This function normalizes a matrix along x axis (frequency)"""
normalized = np.zeros((array.shape[0], array.shape[1]))
for i in range(array.shape[0]):
normalized[i, :] = (array[i, :] - np.mean(array[i, :])) / np.std(array[i, :])
return normalized
image = normalize(image)
# image = (image-np.min(image))/(np.max(image)-np.min(image))
image = np.expand_dims(image, axis=0)
return image, label, labels_sec, label_form
# endregion
# Load MLS and SSLM Data
class BuildDataloader(k.utils.Sequence):
def __init__(self, images_path, label_path=DEFAULT_LABELPATH, transforms=None, batch_size=32, end=-1, reshape=True):
self.songs_list = []
self.images_path = images_path
self.images_list = []
self.labels_path = label_path
self.labels_list = []
self.labels_sec_list = []
self.labels_form_list = []
self.batch_size = batch_size
self.n = 0
self.reshape = reshape
print("Building dataloader for " + self.images_path)
cnt = 1
for (im_dirpath, im_dirnames, im_filenames) in os.walk(self.images_path):
for f in im_filenames:
if f.endswith('.npy'):
self.songs_list.append(os.path.splitext(f)[0])
# print("Reading file #" + str(cnt))
img_path = im_dirpath + f
image = np.load(img_path, allow_pickle=True)
if image.ndim == 1:
raise ValueError("Erroneous file:", img_path, "Shape:", image.shape, image.ndim)
else:
# image = resize(image, (300, 500))
# image = (image - image.mean()) / (image.std() + 1e-8)
if reshape:
image = np.mean(image, axis=0)
else:
image1 = np.mean(image, axis=0)
image2 = np.var(image, axis=0)
image = np.array([image1, image2])
self.images_list.append(image)
cnt += 1
if end != -1:
if cnt == end + 1:
break
lbls_seconds, lbls_phrases, lbl_forms = du.ReadLabelSecondsPhrasesFromFolder(lblpath=self.labels_path, stop=cnt)
self.labels_list = lbls_phrases
self.labels_sec_list = lbls_seconds
self.labels_form_list = lbl_forms
self.transforms = transforms
self.max = self.__len__()
def __len__(self):
return len(self.images_list)
def __getitem__(self, index):
# print("LEN: " + str(self.max) + " TRU LEN: " + str(len(self.images_list)) + " INDX: " + str(index))
image = self.images_list[index]
# print(image.shape, image.ndim)
# print(image)
# if image.ndim == 1:
# print(image)
if self.reshape:
image = image[np.newaxis, :, np.newaxis]
labels = self.labels_list[index]
# print("Labels: ", str(len(labels)), "Images: ", str(len(image)), image.shape)
labels_sec = self.labels_sec_list[index]
labels_form = self.labels_form_list[index]
song_name = self.songs_list[index]
if self.transforms is not None:
for t in self.transforms:
image, labels, labels_sec, labels_form = t(image, labels, labels_sec, labels_form)
return image, [labels, labels_sec, labels_form, song_name]
def __next__(self):
if self.n >= self.max:
self.n = 0
result = self.__getitem__(self.n)
self.n += 1
return result
def getNumClasses(self):
return len(self.labels_form_list[1])
def getLabels(self):
return self.labels_form_list
def getImages(self):
return self.images_list
def getCurrentIndex(self):
return self.n
def getSong(self, index):
return self.songs_list[index]
def getFormLabel(self, index):
return self.labels_form_list[index]
def getDuration(self, index):
return self.labels_sec_list[index][-1]
def get_midi_dataframe(building_df=False):
df = pd.DataFrame(columns=['spectral_contrast_mean', 'spectral_contrast_var'])
if building_df:
df2 = pd.DataFrame(columns=['chroma_stft_mean', 'chroma_stft_var',
'chroma_cqt_mean', 'chroma_cqt_var',
'chroma_cens_mean', 'chroma_cens_var',
'mel_mean', 'mel_var',
'mfcc_mean', 'mfcc_var',
'spectral_bandwidth_mean', 'spectral_bandwidth_var',
'spectral_centroid_mean', 'spectral_centroid_var',
'spectral_flatness_mean', 'spectral_flatness_var',
'spectral_rolloff_mean', 'spectral_rolloff_var',
'poly_features_mean', 'poly_features_var',
'tonnetz_mean', 'tonnetz_var',
'zero_crossing_mean', 'zero_crossing_var',
'tempogram_mean', 'tempogram_var',
'fourier_tempo_mean', 'fourier_tempo_var'])
df = pd.concat([df, df2], axis=1)
return df
def get_audio_features(df, cnt, mid_path, building_df=False):
X, sample_rate = librosa.load(mid_path, res_type='kaiser_fast', duration=3, sr=44100, offset=0.5)
contrast = librosa.feature.spectral_contrast(y=X, sr=sample_rate)
""" Plot spectral contrast
plt.figure(figsize=(10, 4))
librosa.display.specshow(contrast, cmap='plasma', x_axis='time')
plt.colorbar()
plt.ylabel('Frequency bands')
plt.title('Spectral contrast')
plt.tight_layout()
plt.show()
"""
contrast = np.mean(contrast, axis=0)
contrast2 = np.var(contrast, axis=0)
if building_df:
chroma_cens = librosa.feature.chroma_cens(y=X, sr=sample_rate)
chroma_cqt = librosa.feature.chroma_cqt(y=X, sr=sample_rate)
chroma_stft = librosa.feature.chroma_stft(y=X, sr=sample_rate)
mel_spec = librosa.feature.melspectrogram(y=X, sr=sample_rate)
mfcc_spec = librosa.feature.mfcc(y=X, sr=sample_rate)
spec_bdwth = librosa.feature.spectral_bandwidth(y=X, sr=sample_rate)
spec_centrd = librosa.feature.spectral_centroid(y=X, sr=sample_rate)
spec_flatns = librosa.feature.spectral_flatness(y=X)
spec_rolloff = librosa.feature.spectral_rolloff(y=X, sr=sample_rate)
poly_feat = librosa.feature.poly_features(y=X, sr=sample_rate)
tonnetz = librosa.feature.tonnetz(y=X, sr=sample_rate)
zero_cross = librosa.feature.zero_crossing_rate(y=X)
tempogram = librosa.feature.tempogram(y=X, sr=sample_rate)
fouriertemp = librosa.feature.fourier_tempogram(y=X, sr=sample_rate) # Not used in model, repurpose for others?
df.loc[cnt] = [contrast, contrast2, # 0, 1
np.mean(chroma_cens, axis=0), np.var(chroma_cens, axis=0), # 2, 3
np.mean(chroma_cqt, axis=0), np.var(chroma_cqt, axis=0), # 4, 5
np.mean(chroma_stft, axis=0), np.var(chroma_stft, axis=0), # 6, 7
np.mean(mel_spec, axis=0), np.var(mel_spec, axis=0), # 8, 9
np.mean(mfcc_spec, axis=0), np.var(mfcc_spec, axis=0), # 10, 11
np.mean(spec_bdwth, axis=0), np.var(spec_bdwth, axis=0), # 12, 13
np.mean(spec_centrd, axis=0), np.var(spec_centrd, axis=0), # 14, 15
np.mean(spec_flatns, axis=0), np.var(spec_flatns, axis=0), # 16, 17
np.mean(spec_rolloff, axis=0), np.var(spec_rolloff, axis=0), # 18, 19
np.mean(poly_feat, axis=0), np.var(poly_feat, axis=0), # 20, 21
np.mean(tonnetz, axis=0), np.var(tonnetz, axis=0), # 22, 23
np.mean(zero_cross, axis=0), np.var(zero_cross, axis=0), # 24, 25
np.mean(tempogram, axis=0), np.var(tempogram, axis=0), # 26, 27
np.mean(fouriertemp, axis=0), np.var(fouriertemp, axis=0)] # 28, 29
else:
df.loc[cnt] = [contrast, contrast2]
return df
# Load MIDI Data
class BuildMIDIloader(k.utils.Sequence):
def __init__(self, midi_path, label_path=DEFAULT_LABELPATH,
transforms=None, batch_size=32, end=-1, reshape=True, building_df=False):
self.songs_list = []
self.midi_path = midi_path
self.midi_list = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
"""Standardize a list of addresses using the USPS API.
Multi-threaded, since the API response time is slow.
Get an API key at https://registration.shippingapis.com.
"""
from __future__ import print_function
import threading
import sys
import pandas as pd
from tqdm import tqdm
from collections import deque
from pyusps import address_information
NUM_THREADS = 100
def standardize_address(
df,
type='vf',
col1=None,
col2=None,
key=None,
usps_key=None,
new_col='standardized_address'):
"""Standardize a list of addresses using the USPS API.
Arguments:
df: a DataFrame of data
type: 'vf' (NY State Voter File)
or 'raw', two columns
col1: if using 'raw', column name for first line of address
col2: if using 'raw', column name for second line of address
key: if using 'raw', column name for the key to lookup on
usps_key: USPS API key
new_col: name of new column to add."""
threads = deque()
results = {}
for obj in tqdm(df.iterrows(), total=df.shape[0]):
row = obj[1]
if len(threads) < NUM_THREADS:
if type == 'vf':
t = threading.Thread(
target=vf_standardize_address, args=(
row, results, usps_key))
elif type == 'raw':
t = threading.Thread(
target=gen_standardize_address, args=(
row[col1], row[col2], row[key], results, usps_key))
else:
raise Exception("type not recognized")
t.start()
threads.append(t)
continue
else:
t = threads.popleft()
t.join()
continue
while threads:
t = threads.popleft()
t.join()
sys.stderr.flush()
sys.stdout.flush()
if type == 'vf':
df[new_col] = df['SBOEID'].map(results)
elif type == 'raw':
df[new_col] = df[key].map(results)
def vf_standardize_address(row, results, usps_key):
"""Used for the NY State Voter File only."""
rhalfcode = '' if | pd.isnull(row['RHALFCODE']) | pandas.isnull |
import pandas as pd
import os
def get_options_data() -> pd.DataFrame:
"""Get FX option contracts (1m maturity) vol, in frac of 1 p.a.
Output contains
'base', 'counter' - str - 3-letter ISO
'date' - pd.Timestamp
'd10', 'd25' ... 'd90' - float - vol at that delta, in frac of 1 p.a.
"""
data = pd.read_feather(
os.path.join(os.environ.get("RESEARCH_DATA_PATH"),
"fx",
"fx-iv-by-delta-1m-blb-d.ftr")
)
data.loc[:, "vol"] /= 100
data.loc[:, "delta"] = data["delta"].map("d{}".format)
data.loc[:, "date"] = pd.to_datetime(data.loc[:, "date"])
data = data\
.pivot(index=["base", "counter", "date"],
columns="delta",
values="vol")\
.reset_index()
return data
def get_interest_rates() -> pd.DataFrame:
"""Get interest rates, in fractions of 1 p.a.
Output contains
'date' - pd.Timestamp
'currency' - str - 3-letter ISO
'value' - float - interest rate, in frac of 1 p.a.
"""
res = pd.read_feather(
os.path.join(os.environ.get("RESEARCH_DATA_PATH"),
"fixed-income",
"ois_d.ftr")
)
res = res.query("maturity == '1m'").drop("maturity", axis=1)
res.loc[:, "value"] = res.loc[:, "value"] / 100
res.loc[:, "date"] = | pd.to_datetime(res.loc[:, "date"]) | pandas.to_datetime |
import nose
import os
import string
import unittest
from pandas import Series, DataFrame, MultiIndex, PeriodIndex
import pandas.util.testing as tm
import numpy as np
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
class TestSeriesPlots(unittest.TestCase):
@classmethod
def setUpClass(cls):
import sys
if 'IPython' in sys.modules:
raise nose.SkipTest
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
raise nose.SkipTest
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
Series(np.random.randn(10)).plot(kind='bar',color='black')
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
class TestDataFramePlots(unittest.TestCase):
@classmethod
def setUpClass(cls):
import sys
if 'IPython' in sys.modules:
raise nose.SkipTest
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
raise nose.SkipTest
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x':[1,2], 'y':[3,4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = zip(list(string.ascii_letters[:10]), range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
@slow
def test_plot_bar(self):
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(np.random.randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=range(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
_check_plot_works(df.plot, kind='bar')
@slow
def test_boxplot(self):
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot)
_check_plot_works(df.boxplot, column=['one', 'two'])
_check_plot_works(df.boxplot, column=['one', 'two'],
by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
@slow
def test_hist(self):
df = DataFrame(np.random.randn(100, 4))
_check_plot_works(df.hist)
_check_plot_works(df.hist, grid=False)
#make sure layout is handled
df = DataFrame(np.random.randn(100, 3))
_check_plot_works(df.hist)
#make sure layout is handled
df = DataFrame(np.random.randn(100, 6))
_check_plot_works(df.hist)
#make sure kwargs are handled
ser = df[0]
xf, yf = 20, 20
xrot, yrot = 30, 30
ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
xf, yf = 20, 20
xrot, yrot = 30, 30
axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
for i, ax in enumerate(axes.ravel()):
if i < len(df.columns):
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
@slow
def test_scatter(self):
df = DataFrame(np.random.randn(100, 4))
import pandas.tools.plotting as plt
def scat(**kwds):
return | plt.scatter_matrix(df, **kwds) | pandas.tools.plotting.scatter_matrix |
import pandas as pd
import numpy as np
def get_rating_summary(df,num_users = None, num_items = None):
'''
print summary of user-item matrix
args:
df: data frame which contains userId & itemId columns
'''
if num_users == None:
num_users = len(df['userId'].unique())
if num_items == None:
num_items = len(df['itemId'].unique())
num_values = len(df)
sparsity = 1 - (num_values/(num_users * num_items))
print('# users: {0}, # items: {1}, # vals: {2}, sparsity: {3:.7f}'.
format(num_users, num_items, num_values, sparsity))
def get_tag_summary(df, num_users = None, num_items = None, tagcol = 'tagId'):
'''
print summary of user-item-tag matrix
args:
df: data frame which contains userId & itemId & tagId columns
'''
if num_users == None:
num_users = len(df['userId'].unique())
if num_items == None:
num_items = len(df['itemId'].unique())
num_tags = len(df[tagcol].unique())
tagnum_per_interaction = df.groupby(['userId','itemId'])[tagcol].apply(lambda x:len(set(x))).reset_index()[tagcol]
num_interaction = len(tagnum_per_interaction)
sparsity = 1 - (num_interaction/(num_users * num_items))
tagged_items_per_user = df.groupby('userId')['itemId'].apply(lambda x:len(set(x))).reset_index()['itemId']
tag_count = df.groupby(tagcol)['itemId'].apply(len).reset_index()['itemId']
print('# users: {0}, # items: {1}, # tags: {2}, #interaction: {3}, sparsity: {4:.7f}'.
format(num_users, num_items, num_tags, num_interaction, sparsity))
print("summary for the number of tags per interation")
print(tagnum_per_interaction.describe())
print("summary for the number of tagged items per users")
print(tagged_items_per_user.describe())
print("summary for the occurence per tag")
print(tag_count.describe())
def preprocess_ratings(ratings, min_rating):
if min_rating > 1:
ratings = ratings[ratings['rating'] >= min_rating]
return ratings[['userId','itemId']]
def preprocess_tags(tags, tag_user_threshold, tag_item_threshold):
'''
stemming tags and remove rare tags.
'''
tags = tags[['userId','itemId','tag']]
tags['tag'] = tags['tag'].apply(lambda x: x.lower().replace('.', ''))
#tags.loc[:,'tag'] = tt
if tag_item_threshold > 1:
#limit the vocabulary of tags to those that have been applied by at least "tag_item_threshold" items
counter = tags.groupby('tag')['itemId'].apply(lambda x: len(set(x))).to_frame('count').reset_index()
counter = counter[counter['count']>=tag_item_threshold]
tags = pd.merge(tags,counter,on='tag')[['userId','itemId','tag']]
if tag_user_threshold > 1:
#limit the vocabulary of tags to those that have been applied by at least "tag_user_threshold" users
counter = tags.groupby('tag')['userId'].apply(lambda x: len(set(x))).to_frame('count').reset_index()
counter = counter[counter['count']>=tag_user_threshold]
tags = pd.merge(tags,counter,on='tag')[['userId','itemId','tag']]
return tags
def set_tagId(tags):
'''
set uinque tag id for tags.
'''
tag_list = list(tags['tag'].unique())
tagId_list = list(range(len(tag_list)))
tag_tagId = pd.DataFrame({'tag':tag_list,'tagId':tagId_list})
tags = pd.merge(tags,tag_tagId, on='tag')[['userId','itemId','tagId']]
return tags, tag_tagId
def _update_id(ratings, tags):
old_itemId = ratings['itemId'].unique()
new_itemId = np.arange(len(old_itemId))
updated_itemId = pd.DataFrame({'itemId':old_itemId,'new_itemId':new_itemId})
old_userId = ratings['userId'].unique()
new_userId = np.arange(len(old_userId))
updated_userId = pd.DataFrame({'userId':old_userId,'new_userId':new_userId})
ratings = pd.merge(ratings,updated_itemId,on='itemId')[['userId','new_itemId','rating']].rename(columns={'new_itemId':'itemId'})
ratings = pd.merge(ratings,updated_userId,on='userId')[['new_userId','itemId','rating']].rename(columns={'new_userId':'userId'})
# remove items only in tag interacitons and users only in tag interactions: before: 310,041 after: 305,437
tags = pd.merge(tags,updated_itemId,on='itemId')[['userId','new_itemId','tagId']].rename(columns={'new_itemId':'itemId'})
tags = pd.merge(tags,updated_userId,on='userId')[['new_userId','itemId','tagId']].rename(columns={'new_userId':'userId'})
return ratings, tags, updated_itemId
def update_id(ratings, tags):
# consider items both has tag and appear in ratings(this is to remove items with small number)
old_itemId = list(set(tags['itemId'].unique()) & set(ratings['itemId'].unique()))
new_itemId = np.arange(len(old_itemId))
updated_itemId = pd.DataFrame({'itemId':old_itemId,'new_itemId':new_itemId})
old_userId = ratings['userId'].unique()
new_userId = np.arange(len(old_userId))
print(new_userId[-1])
updated_userId = pd.DataFrame({'userId':old_userId,'new_userId':new_userId})
ratings = pd.merge(ratings,updated_itemId,on='itemId')[['userId','new_itemId','rating']].rename(columns={'new_itemId':'itemId'})
ratings = | pd.merge(ratings,updated_userId,on='userId') | pandas.merge |
import logging
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import utils
from lob_data_utils import lob, model
from sklearn.decomposition import PCA
from sklearn.svm import SVC
logger = logging.getLogger(__name__)
class SvmGdfResults(object):
def __init__(self, stock, r=1.0, s=1.0, data_length=10000, gdf_filename_pattern='',
data_dir='../data/data_gdf', reg_data_dir='../data/prepared'):
self.stock = stock
self.r = r
self.s = s
self.data_length = data_length
self.gdf_filename_pattern = gdf_filename_pattern
self.data_dir = data_dir
self.reg_data_dir = reg_data_dir
self.df, self.df_test = self._read_stock()
all_gdf = ['gdf_{}'.format(i) for i in range(0, 50)]
all_gdf_que = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance']
all_gdf_que_prev = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance', 'prev_queue_imbalance']
feature_columns_dict = {
'que': ['queue_imbalance'],
'que_prev': ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_24_26': ['gdf_24', 'gdf_25'],
'gdf_24-26_que': ['gdf_24', 'gdf_25', 'queue_imbalance'],
'gdf_24-26_que_prev': ['gdf_24', 'gdf_25', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_23-27': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26'],
'gdf_23-27_que': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance'],
'gdf_23-27_que_prev': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_20_30': ['gdf_{}'.format(i) for i in range(20, 30)],
'gdf_20_30_que': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance'],
'gdf_20_30_que_prev': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_0_50': all_gdf,
'gdf_0-50_que': all_gdf_que,
'gdf_0-50_que_prev': all_gdf_que_prev,
'pca_gdf1': all_gdf,
'pca_gdf2': all_gdf,
'pca_gdf3': all_gdf,
'pca_gdf4': all_gdf,
'pca_gdf5': all_gdf,
'pca_gdf6': all_gdf,
'pca_gdf7': all_gdf,
'pca_gdf8': all_gdf,
'pca_gdf9': all_gdf,
'pca_gdf10': all_gdf,
'pca_n_gdf': all_gdf,
'pca_gdf_que1': all_gdf_que,
'pca_gdf_que2': all_gdf_que,
'pca_gdf_que3': all_gdf_que,
'pca_gdf_que4': all_gdf_que,
'pca_gdf_que5': all_gdf_que,
'pca_gdf_que6': all_gdf_que,
'pca_gdf_que7': all_gdf_que,
'pca_gdf_que8': all_gdf_que,
'pca_gdf_que9': all_gdf_que,
'pca_gdf_que10': all_gdf_que,
'pca_n_gdf_que': all_gdf_que,
'pca_gdf_que_prev1': all_gdf_que_prev,
'pca_gdf_que_prev2': all_gdf_que_prev,
'pca_gdf_que_prev3': all_gdf_que_prev,
'pca_gdf_que_prev4': all_gdf_que_prev,
'pca_gdf_que_prev5': all_gdf_que_prev,
'pca_gdf_que_prev6': all_gdf_que_prev,
'pca_gdf_que_prev7': all_gdf_que_prev,
'pca_gdf_que_prev8': all_gdf_que_prev,
'pca_gdf_que_prev9': all_gdf_que_prev,
'pca_gdf_que_prev10': all_gdf_que_prev,
'pca_n_gdf_que_prev': all_gdf_que_prev,
'pca_gdf_que_prev_split10': all_gdf_que_prev
}
def get_score_for_clf(self, clf, df_test, feature_name, pca=None):
x_test = df_test[self.feature_columns_dict[feature_name]]
if pca:
x_test = pca.transform(x_test)
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
@staticmethod
def get_number_of_pca_components(feature_name: str) -> Optional[int]:
if 'pca_gdf_que_prev_split' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev_split', ''))
if 'pca_gdf_que_prev' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev', ''))
if 'pca_gdf_que' in feature_name:
return int(feature_name.replace('pca_gdf_que', ''))
if 'pca_gdf' in feature_name:
return int(feature_name.replace('pca_gdf', ''))
return None
@classmethod
def split_sequences(cls, sequences, labels, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
end_ix = i + n_steps
if end_ix > len(sequences):
break
seq_x = sequences[i:end_ix]
lab = labels[end_ix - 1]
X.append(seq_x)
y.append(lab)
return np.array(X), np.array(y)
@staticmethod
def get_mean_scores(scores: dict) -> dict:
mean_scores = {}
for k, v in scores.items():
mean_scores[k] = np.mean(v)
return mean_scores
def get_score_for_clf_split_pca(self, clf, df_test, feature_name, pca=None) -> dict:
x_test = df_test[self.feature_columns_dict[feature_name]]
x_test_pca = x_test[[col for col in x_test.columns if 'gdf' in col]]
x_test = x_test[[col for col in x_test.columns if 'gdf' not in col]]
if pca:
x_test_pca = pca.transform(x_test_pca)
for n in range(pca.n_components):
x_test['pca_{}'.format(n)] = x_test_pca[:, n]
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
def get_pca(self, feature_name) -> Optional[PCA]:
train_x = self.df[self.feature_columns_dict[feature_name]].values
if feature_name in ['pca_n_gdf_que', 'pca_n_gdf_que_prev', 'pca_n_gdf']:
n_components = self.calculate_number_of_components(train_x, threshold=0.99)
else:
n_components = self.get_number_of_pca_components(feature_name)
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_x)
return pca
return None
@classmethod
def calculate_number_of_components(cls, train_x, threshold=0.99) -> int:
pca = PCA(n_components=10)
pca.fit(train_x)
for i in range(1, len(pca.explained_variance_ratio_)):
sum_of_ratio = np.sum(pca.explained_variance_ratio_[0:i])
if sum_of_ratio > threshold:
return i
return 10
def get_classes_weights(self):
y_train = self.df['mid_price_indicator'].values
classes = np.unique(y_train)
class_weight_list = utils.class_weight.compute_class_weight('balanced', classes, y_train)
class_weights = {classes[0]: class_weight_list[0], classes[1]: class_weight_list[1]}
return class_weights
def train_clf_with_split_pca(self, clf, feature_name, method=None):
"""
Deprecated
"""
logger.info('Training %s r=%s s=%s:',
self.stock, self.r, self.s)
train_x = self.df[self.feature_columns_dict[feature_name]]
train_pca = train_x[[col for col in train_x.columns if 'gdf' in col]]
train_x = train_x[[col for col in train_x.columns if 'gdf' not in col]]
n_components = self.get_number_of_pca_components(feature_name)
pca = None
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_pca)
train_pca = pca.transform(train_pca)
for n in range(n_components):
train_x['pca_{}'.format(n)] = train_pca[:, n]
scores = model.validate_model(clf, train_x, self.df['mid_price_indicator'])
res = {
**self.get_mean_scores(scores),
'stock': self.stock,
'method': method,
'features': feature_name
}
test_scores = self.get_score_for_clf_split_pca(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def get_train_set(self, feature_name='', n_steps=None):
train_x = self.df[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
return train_x, train_y
def get_test_set(self, feature_name='', n_steps=None):
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
test_x = pca.transform(test_x)
if n_steps:
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
return test_x, test_y
def train_mlp(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, plot_name=None, class_weight=None,
should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'mlp'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
m = clf()
model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight) # to have a clean fitted model
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_lstm(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, n_steps=None,
plot_name=None, class_weight=None, should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
test_x = np.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1]))
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'lstm'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
# m = clf()
# model.train_model(
# m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
# class_weight=class_weight)
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_clf(self, clf, feature_name='', should_validate=True, method=None, class_weight=None):
logger.info('Training %s r=%s s=%s: clf=%s',
self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
if not method:
method = 'logistic'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def train_svm(self, C=np.nan, gamma=np.nan, feature_name='', kernel='rbf', coef0=np.nan, should_validate=True,
class_weight=None):
logger.info('Training %s r=%s s=%s: kernel=%s C=%s gamma=%s coef0=%s',
self.stock, self.r, self.s, kernel, C, gamma, coef0)
if C and gamma and coef0:
clf = SVC(kernel=kernel, C=C, gamma=gamma, coef0=coef0)
elif C and gamma:
clf = SVC(kernel=kernel, C=C, gamma=gamma)
else:
clf = SVC(kernel=kernel)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'C': C,
'gamma': gamma,
'coef0': coef0,
'kernel': kernel,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def _read_stock(self):
gdf_filename = self.gdf_filename_pattern.format(self.stock, self.r, self.s)
reg_filename = '{}'.format(self.stock)
logger.debug('Will read %s and %s', gdf_filename, reg_filename)
d = lob.load_prepared_data(
gdf_filename, data_dir=self.data_dir, length=self.data_length)
if len(d) == 2:
df, df_test = d
else:
return pd.DataFrame(), pd.DataFrame()
df_reg, df_reg_test = lob.load_prepared_data(
reg_filename, data_dir=self.reg_data_dir, length=self.data_length)
df['datetime'] = df_reg['Unnamed: 0']
df['bid_price'] = df_reg['bid_price']
df['ask_price'] = df_reg['ask_price']
df['queue_imbalance'] = df_reg['queue_imbalance']
df['prev_queue_imbalance'] = df['queue_imbalance'].shift()
df.index = pd.to_datetime(df['datetime'])
df.dropna(inplace=True)
df_test['datetime'] = df_reg_test['Unnamed: 0']
df_test['bid_price'] = df_reg_test['bid_price']
df_test['ask_price'] = df_reg_test['ask_price']
df_test['queue_imbalance'] = df_reg_test['queue_imbalance']
df_test['prev_queue_imbalance'] = df_test['queue_imbalance'].shift()
df_test.index = | pd.to_datetime(df_test['datetime']) | pandas.to_datetime |
from datetime import datetime
import pandas as pd
from ..market_data_sources.base_data_connection import BaseDataConnection
from ..utils import logger, files_utils, config_utils
from ..market_data_sources.yahoo_connection import YahooConnection
class DataReader:
NAME = 'Data Reader'
def __init__(self):
_prices_data_source: BaseDataConnection
_statements_data_source: BaseDataConnection
self._set_sources()
def __repr__(self):
return self.NAME
def _set_sources(self) -> None:
"""
Sets the data source from the config file
:return:
"""
prices_data_source = config_utils.fetch_data_sources('market_data')
statements_data_source = config_utils.fetch_data_sources('statements')
if prices_data_source == 'Yahoo':
self._market_data_source = YahooConnection()
else:
logger.logging.error(f'prices datasource: {prices_data_source} not valid')
return None
if prices_data_source == 'Yahoo':
self._statements_data_source = YahooConnection()
else:
logger.logging.error(f'statements datasource: {statements_data_source} not valid')
return None
def read_prices(self, ticker: str) -> pd.Series:
"""
Read prices saved locally in client data folder.
If no .csv found, prices will be fetched with the data source
:param ticker: Stock ticker
:return:
"""
directory = self._market_data_source.prices_dir
filename = f"{self._market_data_source.file_prefix}_{ticker.replace('.TO', '_TO')}_prices.csv"
if files_utils.check_file(directory=directory,
file=filename):
df = pd.read_csv(f"{directory}/{filename}")
df = df.set_index('Date').dropna()
df.index = pd.to_datetime(df.index)
return df['Close']
else:
logger.logging.info(f'no price data to read for {ticker}, now fetching new data from api')
self.update_prices(ticker=ticker)
return self.read_prices(ticker)
def read_fx(self, currency_pair: str) -> pd.Series:
"""
Read fx rates saved locally in client data folder.
If no .csv found, rates will be fetched
:param currency_pair: Fx pair ex. USDCAD or CADUSD
:return:
"""
directory = self._market_data_source.fx_dir
filename = f"{self._market_data_source.file_prefix}_{currency_pair}_fx.csv"
if files_utils.check_file(directory=directory,
file=filename):
df = | pd.read_csv(f"{directory}/{filename}") | pandas.read_csv |
import numpy as np
import pandas as pd
from staircase.constants import inf
from staircase.core.arrays import docstrings
from staircase.core.stairs import Stairs
from staircase.core.stats.statistic import corr as _corr
from staircase.core.stats.statistic import cov as _cov
from staircase.util._decorators import Appender
@Appender(docstrings.sample_example, join="\n", indents=1)
def sample(collection, x):
"""
Takes a dict-like collection of Stairs instances and evaluates their values across a common set of points.
Technically the results of this function should be considered as :math:`\\lim_{x \\to z^{-}} f(x)`
or :math:`\\lim_{x \\to z^{+}} f(x)`, when how = 'left' or how = 'right' respectively. See
:ref:`A note on interval endpoints<getting_started.interval_endpoints>` for an explanation.
Parameters
----------
collection : array-like, dictionary or pandas.Series
The Stairs instances at which to evaluate
x : scalar or vector data
The points at which to sample the Stairs instances. Must belong to the step function domain.
Returns
-------
:class:`pandas.DataFrame`
A dataframe, where rows correspond to the Stairs instances in *collection*,
and column correspond to the points in *x*. If *collection* is a dictionary then the
resulting dataframe will be indexed by the dictionary keys. If *collection* is a
:class:`pandas.Series` then the dataframe will have the same index as the series.
See Also
--------
Stairs.sample
"""
array = | pd.Series(collection) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns # 对matplotlib的封装
import os
os.chdir('/Users/zhengzhiheng/PycharmProjects/untitled3')
pd.set_option('display.max_columns', 20)
| pd.set_option('display.max_rows', 100) | pandas.set_option |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
from collections import Counter
import math
import xgboost as xgb
import pickle
import sys, os
from acheron.helpers import model_evaluators
def is_valid_type(val):
# covers core floats, numpy floating and complex floating
if isinstance(val, (float, np.inexact)):
if math.isnan(val) or np.isnan(val):
return False
else:
return True
# covers core ints, numpy ints (signed unsigned), longs, shorts, bytes
# TODO check for bytes later, as they do NOT like math eqns when exponential
elif isinstance(val, (int, np.integer)):
return True
# covers core and np strings, excludes unicode
elif isinstance(val, (str, np.str_)):
if val.upper() in ['INVALID', 'NAN']:
return False
else:
return True
else:
return False
def make_mask(label_matrix, k):
"""
Takes in a label matrix and saves a mask of which labels are valid
for each index, this allows the model to keep a sample when only a single
columns is invalid, instead of discarding the sample.
If doing kfold cross validation (k != 1), there must be at least k samples
in each class for that class to be considered valid
"""
mask = np.zeros((label_matrix.values.shape), dtype='bool')
invalid_classes = {}
# if any class has fewer than k samples, it is marked as invalid
for attribute in label_matrix.columns:
class_counter = Counter(label_matrix[attribute])
invalid_classes[attribute] = []
for key in class_counter:
if class_counter[key] < k:
invalid_classes[attribute].append(key)
for i, row in enumerate(label_matrix.values):
for j, col in enumerate(row):
if not is_valid_type(col):
continue
if col in invalid_classes[label_matrix.columns[j]]:
continue
mask[i,j] = True
return pd.DataFrame(data=mask, columns = label_matrix.columns,
index = label_matrix.index)
def make_split(label_matrix, mask, k, samples):
"""
Takes a label matrix, splits it according to k fold cross validation
but only for valid samples. Produces a random split each time (stratified)
"""
assert(2<=k<=255) # if exceeding 255, change dtype below to uint16
split_df = pd.DataFrame(data=np.zeros(label_matrix.shape),
columns=label_matrix.columns, index = label_matrix.index, dtype='uint8')
split_df = split_df[~split_df.index.duplicated(keep='first')]
for col in label_matrix.columns:
# which labels are valid in this specific column
valid_labels = label_matrix[col].values[mask[col].values]
# matching sample name for each i in valid_labels
valid_samples = label_matrix.index.values[mask[col].values]
if len(valid_samples) == 0:
print("All samples in column "+col+" are invalid, skipping split")
continue
# in the event of duplicates, keep only the first seen instance
processed = []
# we also need to factor in that we only have the samples in /samples,
# where a datasheet might have thousands of valid, but extra datapoints
#seen_bool_mask = np.array([i in samples and i not in duplicates for i in valid_samples])
seen_bool_mask = []
for i in valid_samples:
if i in processed:
seen_bool_mask.append(False)
else:
processed.append(i)
if i in samples:
seen_bool_mask.append(True)
else:
seen_bool_mask.append(False)
seen_bool_mask = np.array(seen_bool_mask)
final_labels = valid_labels[seen_bool_mask]
final_samples = valid_samples[seen_bool_mask]
# at this point, we only have labels and samples that are eligible
# for machine learning
skf = StratifiedKFold(n_splits=k, shuffle=True)
num_samples = len(final_samples)
splits = enumerate(skf.split(np.zeros((num_samples,k)),final_labels))
for i, split in splits:
# pull the array of values assigned to the testing set,
# label these genomes as per the fold they belong to
for sample in final_samples[split[1]]:
split_df.at[sample,col] = i
return split_df
def load_data(dataset_name, label_name, trial, type, num_feats, k, attribute):
"""
load requested dataset, mask, and split
"""
features = pd.read_pickle("data/{}/features/{}_matrix.df".format(dataset_name, type))
labels = pd.read_pickle("data/{}/labels/{}.df".format(dataset_name,label_name))[attribute]
mask = pd.read_pickle("data/{}/features/masks/{}_{}.df".format(dataset_name,type,label_name))[attribute]
if k!=1:
split = pd.read_pickle("data/{}/splits/split{}_{}_{}_{}xCV.df".format(dataset_name,trial,type,label_name,k))
else:
split = []
features, labels = apply_mask(features, labels, mask)
return features, labels, mask, split
def train_model(features, label, model_type, num_classes):
"""
Converts feature and label matrices in a trained model
Sci-kit models are at the end, as they share a fit method
"""
# XGBoost
if model_type.upper() in ['XGB', 'XGBOOST']:
if num_classes == 2:
objective = 'binary:logistic'
else:
objective = 'multi:softmax'
# this is is probably going to suck for memory, so lets revist XGBClassifier
# if we explode past our ram usage on this step
xgb_matrix = xgb.DMatrix(features.values, label, feature_names=features.columns)
params = {'objective':objective, 'num_class': num_classes}
#params = {'objective':objective}
booster = xgb.train(params, xgb_matrix)
return booster
# Artificial Neural Network
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from keras.utils import np_utils, to_categorical
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
cat_labels = to_categorical(label, num_classes)
patience = 16
early_stop = EarlyStopping(monitor='loss', patience=patience, verbose=1, min_delta=0.005, mode='auto')
reduce_LR = ReduceLROnPlateau(monitor='loss', factor= 0.1, patience=(patience/2), verbose = 1, min_delta=0.005,mode = 'auto', cooldown=0, min_lr=0)
num_feats = len(features.columns)
model = Sequential()
model.add(Dense(int(((num_feats+num_classes)/2)),activation='relu',input_dim=(num_feats)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, kernel_initializer='uniform', activation='softmax'))
if num_classes == 2:
loss = "binary_crossentropy"
else:
loss = "poisson"
model.compile(loss=loss, metrics=['accuracy'], optimizer='adam')
model.fit(features.values, cat_labels, epochs=100, verbose=1, callbacks=[early_stop, reduce_LR])
return model
# Support Vector Classifier
# https://scikit-learn.org/stable/modules/svm.html#classification
if model_type.upper() in ['SVC', 'SVM']:
from sklearn import svm
model = svm.SVC()
# Support Vector Regressor
# https://scikit-learn.org/stable/modules/svm.html#regression
elif model_type.upper() in ['SVR']:
from sklearn import svm
model = svm.SVR()
# Stochastic Gradient Descent Classifier
# https://scikit-learn.org/stable/modules/sgd.html#classification
elif model_type.upper() in ['SGDC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="hinge", penalty="l2", max_iter=25)
# Perceptron
# https://scikit-learn.org/stable/modules/linear_model.html#perceptron
elif model_type.upper() in ['PERC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)
# Passive Aggressive Algorithms
# https://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
# https://www.geeksforgeeks.org/passive-aggressive-classifiers
elif model_type.upper() in ['PAC']:
from sklearn.linear_model import PassiveAggressiveClassifier
model = PassiveAggressiveClassifier(max_iter=100)
# Nearest Neighbours Classifier
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification
elif model_type.upper() in ['NNC']:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
# Nearest Neighbours Regressor
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-regression
elif model_type.upper() in ['NNR']:
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor(n_neighbors=3)
# Gaussian Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#gaussian-naive-bayes
elif model_type.upper() in ['GNB']:
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
# Multinomial Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#multinomial-naive-bayes
elif model_type.upper() in ['MNB']:
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
# Categorical Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#categorical-naive-bayes
elif model_type.upper() in ['CNB']:
from sklearn.naive_bayes import CategoricalNB
model = CategoricalNB()
# Decision Tree Classifier
# https://scikit-learn.org/stable/modules/tree.html#classification
elif model_type.upper() in ['DTC']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
# Decision Tree Regressor
# https://scikit-learn.org/stable/modules/tree.html#regression
elif model_type.upper() in ['DTR']:
from sklearn import tree
model = tree.DecisionTreeRegressor()
# AdaBoost
# https://scikit-learn.org/stable/modules/ensemble.html#adaboost
elif model_type.upper() in ['ADA']:
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier()
# Gradient Boosted Decision Trees
# https://scikit-learn.org/stable/modules/ensemble.html#gradient-tree-boosting
elif model_type.upper() in ['GBDT']:
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
# Multi-layer Perceptron Classifier
# https://scikit-learn.org/stable/modules/neural_networks_supervised.html#multi-layer-perceptron
elif model_type.upper() in ['MLPC']:
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
else:
raise Exception("model type {} not defined".format(model_type))
model.fit(features.values, label)
return model
def train_hyper_model(x_train, y_train, x_val, y_val, model_type, num_classes):
"""
Trains a hyperparameter optimized model
"""
from acheron.workflows import hyp
best_model, best_params = hyp.get_best(x_train, y_train, x_val, y_val, model_type, num_classes)
return best_model, best_params
"""
Unsure if saving required, to do without
import time
from random import seed
from random import random
seed(time.time())
test_id = random()
save_path = "data/hyp_data/{}/".format(test_id)
os.makedirs(save_path, exist_ok=False)
for i, data_chunk in enumerate([x_train, y_train, x_val, y_val]):
if i%2 == 0:
data_chunk.to_pickle("{}{}.pkl".format(save_path,i))
else:
np.save("{}{}.npy".format(save_path,i),data_chunk)
trials = Trials()
# https://towardsdatascience.com/an-example-of-hyperparameter-optimization-on-xgboost-lightgbm-and-catboost-using-hyperopt-12bc41a271e
# Search Space Subject to Change!!
if model_type.upper() in ['XGB','XGBOOST']:
search_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(1, 8, 1, dtype=int)),
'min_child_weight': hp.choice('min_child_weight', np.arange(1, 8, 1, dtype=int)),
'colsample_bytree': hp.choice('colsample_bytree', np.arange(0.3, 0.8, 0.1)),
'subsample': hp.uniform('subsample', 0.8, 1),
'num_class': num_classes,
'test_id': test_id
}
best_index = fmin(
fn=hyp.xgboost_objective, space=search_params,
algo=tpe.suggest, max_evals=100, trials=trials)
best_params = space_eval(search_params, best_index)
if num_classes == 2:
best_params['objective'] = 'binary:logistic'
else:
best_params['objective'] = 'multi:softmax'
best_params['n_estimators'] = 10
best_params['num_class'] = num_classes
xgb_matrix = xgb.DMatrix(x_train.values, y_train, feature_names=x_train.columns)
booster = xgb.train(best_params, xgb_matrix)
return booster, best_params
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from acheron.workflows import hyp
best_run, best_model = optim.minimize(
model=hyp.create_model,
data=load_hyp_data,
algo=tpe.suggest,
max_evals=10,
trials=Trials(),
keep_temp=True)
return best_model, best_run
"""
#else:
#raise Exception("model type {} not defined".format(model_type))
"""
# Minimal Cost-Complexity Pruning
# https://scikit-learn.org/stable/modules/tree.html#minimal-cost-complexity-pruning
elif model_type.upper() in ['MCCP']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
path = model.cost_complexity_pruning_path(features.values, labels)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
models = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(ccp_alpha=ccp_alpha)
clf.fit(features.values, labels)
clfs.append(clf)
# now use validation set to see which model did best, use that alpha to train final model
"""
def predict(model, features, model_type):
"""
Takes a model and a feature set, returns an label like array of predictions
"""
if model_type.upper() in ['XGB', 'XGBOOST']:
xgb_matrix = xgb.DMatrix(features.values, feature_names = features.columns)
return [round(i) for i in model.predict(xgb_matrix, validate_features=True)]
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
# This will be in categorical form, need to decode it
prediction = model.predict_classes(features)
#return np.argmax(prediction, axis=1)
return prediction
else:
try:
return [round(i) for i in model.predict(features)]
except:
raise Exception("model type {} not defined".format(model_type))
def evaluate_model(predicted, actual, model_type, dilutions, attribute, encoder):
"""
Evaluates how well a model did (accuracy)
For mic modules, also reports off-by-one accuracy and error rates
Takes encoded class labels (0,1,2) not decoded values (2,4,8,16)
"""
# this df will eventually get all info about the test
direct_accuracy = np.sum([predicted[i]==actual[i] for i in range(len(predicted))])/len(predicted)
dilutional_accuracies = {}
find_errors = False
if len(dilutions) > 0:
find_errors = True
for dilution in dilutions:
total = 0
correct = 0
for i in range(len(predicted)):
total +=1
if abs(predicted[i]-actual[i]) <= dilution:
correct +=1
dilutional_accuracies[dilution] = correct/total
data = [len(predicted),direct_accuracy]
columns = ["Supports", "Accuracy"]
for dilution in dilutions:
if str(dilution) == '0':
continue
else:
data.append(dilutional_accuracies[dilution])
columns.append("Within {} Dilution".format(dilution))
if find_errors:
decoder = {v:k for k,v in encoder.items()}
pred_decoded = [decoder[i] for i in predicted]
act_decoded = [decoder[i] for i in actual]
errors = [model_evaluators.find_error_type(i[0],i[1], attribute) for i in zip(pred_decoded, act_decoded)]
error_counts = Counter(errors)
error_types = ["Very Major Error", "Major Error", "Non Major Error", "Correct"]
total_errors = 0
for error_type in error_types:
total_errors += error_counts[error_type]
percent = error_counts[error_type]/len(predicted)
data.append(percent)
columns.append(error_type)
try:
assert len(predicted) == total_errors
except:
print('Number of Errors+Correct does not equal number of predictions')
raise
results_df = pd.DataFrame(data=[data], columns=columns)
return results_df
def mean_summaries(summaries):
"""
Takes a list of model summaries
averages them appropriately, relevant to number of supports
"""
try:
indx = summaries[0].index[0]
except:
indx = 0
mean_df = pd.DataFrame(columns=summaries[0].columns, index=[indx])
total_supports = 0
proportion = {}
for summary in summaries:
num_sups = summary['Supports'][0]
total_supports += num_sups
for col in summary.columns:
if col != "Supports":
if col in proportion.keys():
proportion[col] += num_sups*summary[col][0]
else:
proportion[col] = num_sups*summary[col][0]
mean_df.loc[indx,'Supports'] = total_supports
for k,v in proportion.items():
mean_df.loc[indx, k] = v/total_supports
return mean_df
def mean_prec_recall(prec_recall_dfs):
"""
Takes a list of precision_recall_fscore_support dataframes
Returns the mean df based on proportion of supports
"""
indeces = prec_recall_dfs[0].index
done_rows = []
for indx in indeces:
rows = [i[i.index == indx] for i in prec_recall_dfs]
done_rows.append(mean_summaries(rows))
return pd.concat(done_rows)
def apply_mask(features, labels, mask):
"""
Takes in a pandas dataframe or series with a mask
and returns only valid samples
Mask series looks like:
AMC
BioSample
SAMN00000001 False
SAMN00000002 True
"""
# its important to note that the mask is for everything in the label df, but
# when we mask the features, that df might not have it
# therefore we reduce the mask to samples that are seen
if isinstance(features, pd.DataFrame):
seen = list(features.index)
skip_nan_check = False
elif isinstance(features, list):
seen = features
skip_nan_check = True
else:
raise exception("Needs list of features or dataframe with declared index, not {}".format(type(features)))
mask = mask[[i in seen for i in mask.index]]
labels = labels[[i in seen for i in labels.index]]
# prior to reindexing, we need to make sure there are no duplicates
mask = mask[~mask.index.duplicated(keep='first')]
labels = labels[~labels.index.duplicated(keep='first')]
# reorder dataframes to make sure they are in the same order as the features
mask = mask.reindex(seen)
labels = labels.reindex(seen)
# remove samples that we dont have data for
# (these appear as nans in the mask)
if not skip_nan_check:
try:
mask = pd.Series(index = mask.index,
data = [False if np.isnan(i) else i for i in mask])
except:
for i in mask:
try:
np.isnan(i)
except:
print("{} cannot be checked if isnan".format(i))
raise
# double check that the mask biosample order matches the feature biosample order
for i, biosample in enumerate(mask.index):
assert biosample == seen[i]
if isinstance(features, pd.Series) or isinstance(features, pd.DataFrame):
labels = labels[list(mask)]
features = features[list(mask)]
elif isinstance(features, list):
labels = labels[list(mask)]
features = | pd.Series(features) | pandas.Series |
import time
import os
import os.path as osp
import numpy as np
import sys
import json
import pandas as pd
import pickle as pkl
import torch
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from skorch import NeuralNetRegressor
from torch.optim.lr_scheduler import StepLR
from skorch.callbacks.lr_scheduler import LRScheduler
from classicalgsg.nn_models.dataloader import DataLoader
from classicalgsg.nn_models.models import GSGNN
from classicalgsg.molreps_models.utils import scop_to_str
# Running training guowei dataset of different features
# The Gaff forcefield without atom types is going to be used
DATASET = 'OpenChem'
MODELS_SAVE_PATH = f'models/{DATASET}'
GSG_PARAMS = {'wavelet_scale': [4],
'scattering_operators': ['(z,f,s)']}
SEED = 1702
def report(results, n_top):
num_tests = results['rank_test_score'].shape[0]
for i in range(min(n_top, num_tests)):
candidate = np.flatnonzero(results['rank_test_score'] == i+1)[0]
print(f'Model with rank: {i:0}')
print('Mean validation score: '
f'{results["mean_test_score"][candidate]:.3f}'
f' (std: {results["std_test_score"][candidate]:.3f})')
print(f'Parameters: {results["params"][candidate]}')
def split_dataset(wavelet_step, scattering_operators, ratio=0.2):
dataset_save_path = osp.join(f'data_{wavelet_step}_'
f'{scop_to_str(scattering_operators)}',
f'{DATASET}')
save_file_path = osp.join(dataset_save_path, f'{DATASET}.pkl')
# Split the data
with open(save_file_path, 'rb') as rfile:
data = pkl.load(rfile)
data = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 10:39:56 2018
normalization code for data matrix
ver 200319
@author: tadahaya
"""
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import time
def z_array(x):
"""
to calculate z scores
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
myu = np.mean(x,axis=0)
sigma = np.std(x,axis=0,ddof=1)
return (x - myu)/sigma
def z_pop(x,axis=0,drop=True):
"""
to calculate z scores from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether z scores are calculate in column or row
drop: boolean
whether drop inf and nan
"""
if axis==0:
myu = np.mean(x.values,axis=0)
sigma = np.std(x.values,axis=0,ddof=1)
else:
myu = np.c_[np.mean(x.values,axis=1)]
sigma = np.c_[np.std(x.values,axis=1,ddof=1)]
df = pd.DataFrame((x.values - myu)/sigma)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
def z(x,control="",drop=True):
"""
to calculate z scores based on control data from dataframe
Parameters
----------
x: a dataframe
a dataframe to be analyzed
control: string, default ""
indicates the control column name
drop: boolean
whether drop inf and nan
"""
if len(control) > 0:
print("control column name: {0}".format(control))
con = x.loc[:,x.columns.str.contains(control)]
n = len(con.columns)
print("control column No.: {0}".format(n))
if n < 3:
print("<< CAUTION >> control columns are too few: population control was employed")
return z_pop(x,axis=1,drop=drop)
else:
myu = np.c_[np.mean(con.values,axis=1)]
sigma = np.c_[np.std(con.values,axis=1,ddof=1)]
x = x.loc[:,~x.columns.str.contains(control)]
df = pd.DataFrame((x.values - myu)/sigma)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
else:
print("<< CAUTION >> no control columns: population control was employed")
return z_pop(x,axis=1,drop=drop)
def madz_array(x):
"""
to calculate MAD Z
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
med = np.median(x,axis=0)
mad = np.median(np.abs(x - med),axis=0)
return (x - med)/(1.4826*mad)
def madz_pop(x,axis=0,drop=True):
"""
to calculate MAD Z from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether MAD Z are calculate in column or row
drop: boolean
whether drop inf and nan
"""
if axis==0:
med = np.median(x.values,axis=0)
mad = np.median(np.abs(x.values - med),axis=0)
else:
med = np.c_[np.median(x.values,axis=1)]
mad = np.c_[np.median(np.abs(x.values - med),axis=1)]
df = pd.DataFrame((x.values - med)/(1.4826*mad))
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
def madz(x,control="",drop=True):
"""
to calculate MAD Z based on control data from dataframe
Parameters
----------
x: a dataframe
a dataframe to be analyzed
control: string, default ""
indicates the control column name
drop: boolean
whether drop inf and nan
"""
if len(control) > 0:
print("control column name: {0}".format(control))
con = x.loc[:,x.columns.str.contains(control)]
n = len(con.columns)
print("control column No.: {0}".format(n))
if n < 3:
print("<< CAUTION >> control columns are too few: population control was employed")
return madz_pop(x,axis=1,drop=drop)
else:
med = np.c_[np.median(con.values,axis=1)]
mad = np.c_[np.median(np.abs(con.values - med),axis=1)]
x = x.loc[:,~x.columns.str.contains(control)]
df = pd.DataFrame((x.values - med)/(1.4826*mad))
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
else:
print("<< CAUTION >> no control columns: population control was employed")
return madz_pop(x,axis=1,drop=drop)
def robz_array(x):
"""
to calculate robust z scores
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
med = np.median(x,axis=0)
q1,q3 = np.percentile(x,[25,75],axis=0)
niqr = (q3-q1)*0.7413
return (x - med)/niqr
def robz_pop(x,axis=0,drop=True):
"""
to calculate robust z scores from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether robust z scores are calculate in rows or columns
drop: boolean
whether drop inf and nan
"""
if axis==0:
med = np.median(x.values,axis=0)
q1,q3 = np.percentile(x.values,[25,75],axis=0)
else:
med = np.c_[np.median(x.values,axis=1)]
q1 = np.c_[np.percentile(x.values,25,axis=1)]
q3 = np.c_[np.percentile(x.values,75,axis=1)]
niqr = (q3-q1)*0.7413
df = | pd.DataFrame((x.values - med)/niqr) | pandas.DataFrame |
import pandas as pd
from pyfiglet import Figlet
import yaml
import yfinance as yf
def print_overview(config):
keep = ['EBITDA EBITDA', 'EPS (Basic) EPS (Basic)', 'Sales/Revenue Sales/Revenue']
res = []
for ticker in config.keys():
country = config[ticker]
if country == 'USA':
url = f'https://www.marketwatch.com/investing/stock/{ticker}/financials/income/quarter'
else:
url = f'https://www.marketwatch.com/investing/stock/{ticker}/financials/income/quarter?countrycode={country}'
#print(pd.read_html(url))
df = | pd.read_html(url) | pandas.read_html |
import io
import itertools
import pytest
from pandas.util.testing import (
assert_series_equal, assert_frame_equal, assert_index_equal)
from numpy.testing import assert_array_equal
import pandas as pd
import numpy as np
import matplotlib.figure
import matplotlib.pyplot as plt
from upsetplot import plot
from upsetplot import UpSet
from upsetplot import generate_counts, generate_samples
from upsetplot.plotting import _process_data
# TODO: warnings should raise errors
def is_ascending(seq):
# return np.all(np.diff(seq) >= 0)
return sorted(seq) == list(seq)
@pytest.mark.parametrize('x', [
generate_counts(),
generate_counts().iloc[1:-2],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_series(x, sort_by, sort_categories_by):
assert x.name == 'value'
for subset_size in ['auto', 'legacy', 'sum', 'count']:
for sum_over in ['abc', False]:
with pytest.raises(ValueError, match='sum_over is not applicable'):
_process_data(x, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size=subset_size, sum_over=sum_over)
df, intersections, totals = _process_data(
x, subset_size='auto', sort_by=sort_by,
sort_categories_by=sort_categories_by, sum_over=None)
assert intersections.name == 'value'
x_reordered = (x
.reorder_levels(intersections.index.names)
.reindex(index=intersections.index))
assert len(x) == len(x_reordered)
assert x_reordered.index.is_unique
assert_series_equal(x_reordered, intersections,
check_dtype=False)
if sort_by == 'cardinality':
assert is_ascending(intersections.values[::-1])
else:
# check degree order
assert is_ascending(intersections.index.to_frame().sum(axis=1))
# TODO: within a same-degree group, the tuple of active names should
# be in sort-order
if sort_categories_by:
assert is_ascending(totals.values[::-1])
assert np.all(totals.index.values == intersections.index.names)
assert np.all(df.index.names == intersections.index.names)
assert set(df.columns) == {'_value', '_bin'}
assert_index_equal(df['_value'].reorder_levels(x.index.names).index,
x.index)
assert_array_equal(df['_value'], x)
assert_index_equal(intersections.iloc[df['_bin']].index,
df.index)
assert len(df) == len(x)
@pytest.mark.parametrize('x', [
generate_samples()['value'],
generate_counts(),
])
def test_subset_size_series(x):
kw = {'sort_by': 'cardinality',
'sort_categories_by': 'cardinality',
'sum_over': None}
df_sum, intersections_sum, totals_sum = _process_data(
x, subset_size='sum', **kw)
if x.index.is_unique:
expected_warning = None
else:
expected_warning = FutureWarning
with pytest.warns(expected_warning):
df, intersections, totals = _process_data(
x, subset_size='legacy', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
| assert_series_equal(totals, totals_sum) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import re
import os
import pymorphy2
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from gensim import models
from datetime import datetime as dt
def get_similarity_KE(lemmas1, lemmas2):
big = lemmas2
small = lemmas1
if len(lemmas1) > len(lemmas2):
big = lemmas1
small = lemmas2
inters = [i for i in small if i in big] # TODO: or conversely
# no intersection
if len(inters) == 0:
return 0
ratio = (len(inters)/len(lemmas1) + len(inters)/len(lemmas2)) / 2.0
return ratio
def text_similarity(text1, text2, stemmer, stop_words, model):
text1 = str(text1)
text2 = str(text2)
text1 = text1.replace('\n', '')
text2 = text2.replace('\n', '')
lemmas1 = []
lemmas2 = []
digits1 = [] # for simbols with digits
digits2 = [] # for simbols with digits
# english1 = [] # for brands or english words
# english2 = [] # for brands or english words
tokenizer = RegexpTokenizer(r'\w+')
tk1 = tokenizer.tokenize(text1)
tk2 = tokenizer.tokenize(text2)
for word in tk1:
normal = stemmer.parse(word)[0].normal_form
# normal = re.search('[а-я]+', normal)
if not word.isalpha():
digits1.append(word)
continue
# if re.match("^[A-Za-z_-]*$", word):
# english1.append(word)
# continue
if word not in stop_words:
lemmas1.append(normal)
for word in tk2:
normal = stemmer.parse(word)[0].normal_form
if not word.isalpha():
digits2.append(word)
continue
# if re.match("^[A-Za-z_-]*$", word):
# english1.append(word)
# continue
if word not in stop_words:
lemmas2.append(normal)
try:
score = model.n_similarity(lemmas1, lemmas2)
except KeyError as e:
# print('KEY ERROR', e)
score = get_similarity_KE(lemmas1, lemmas2)
# dscore = get_similarity_KE(digits1, digits2)
# total_score = (score+dscore)/2.0
return float(score)
def split_train():
oidx = 1
for i in range(1, 5):
fname = 'data/texts/Ptext_' + str(i) + '.csv'
df = pd.read_csv(fname, compression='gzip')
splitrow = int(df.shape[0]/2)
print(df.shape[0])
set1 = df[:splitrow]
set1.to_csv('data/texts/splits/train_'+str(oidx)+'.csv', compression='gzip')
oidx += 1
print(set1.shape[0])
set2 = df[splitrow:]
set2.to_csv('data/texts/splits/train_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set2.shape[0])
def split_test():
oidx = 1
df = pd.read_csv('data/texts/FPairs_text_test.csv', compression='gzip')
splitrow = int(df.shape[0]/4)
print(df.shape[0])
set1 = df[:splitrow]
set1.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set1.shape[0])
set2 = df[splitrow:splitrow*2]
set2.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set2.shape[0])
set3 = df[splitrow*2:splitrow*3]
set3.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set3.shape[0])
set4 = df[splitrow*3:]
set4.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set4.shape[0])
#
#
#
if __name__ == '__main__':
set1 = pd.read_csv('data/texts/splits/OUTtrain_1.csv', compression='gzip')
set2 = pd.read_csv('data/texts/splits/OUTtrain_2.csv', compression='gzip')
set3 = pd.read_csv('data/texts/splits/OUTtrain_3.csv', compression='gzip')
set4 = pd.read_csv('data/texts/splits/OUTtrain_4.csv', compression='gzip')
set5 = pd.read_csv('data/texts/splits/OUTtrain_5.csv', compression='gzip')
set6 = pd.read_csv('data/texts/splits/OUTtrain_6.csv', compression='gzip')
set7 = pd.read_csv('data/texts/splits/OUTtrain_7.csv', compression='gzip')
set8 = | pd.read_csv('data/texts/splits/OUTtrain_8.csv', compression='gzip') | pandas.read_csv |
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import os
import tempfile
import unittest
# noinspection PyPackageRequirements
import pytest
from pandas.tests.extension import base
from text_extensions_for_pandas.array.test_span import ArrayTestBase
from text_extensions_for_pandas.array.span import *
from text_extensions_for_pandas.array.token_span import *
class TokenSpanTest(ArrayTestBase):
def test_create(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 1)
self.assertEqual(s1.covered_text, "This")
# Begin too small
with self.assertRaises(ValueError):
TokenSpan(toks, -2, 4)
# End too small
with self.assertRaises(ValueError):
TokenSpan(toks, 1, -1)
# End too big
with self.assertRaises(ValueError):
TokenSpan(toks, 1, 10)
# Begin null, end not null
with self.assertRaises(ValueError):
TokenSpan(toks, TokenSpan.NULL_OFFSET_VALUE, 0)
def test_repr(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 2)
self.assertEqual(repr(s1), "[0, 7): 'This is'")
toks2 = SpanArray(
"This is a really really really really really really really really "
"really long string.",
np.array([0, 5, 8, 10, 17, 24, 31, 38, 45, 52, 59, 66, 73, 78, 84]),
np.array([4, 7, 9, 16, 23, 30, 37, 44, 51, 58, 65, 72, 77, 84, 85]),
)
self._assertArrayEquals(
toks2.covered_text,
[
"This",
"is",
"a",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"long",
"string",
".",
],
)
s2 = TokenSpan(toks2, 0, 4)
self.assertEqual(repr(s2), "[0, 16): 'This is a really'")
s2 = TokenSpan(toks2, 0, 15)
self.assertEqual(
repr(s2),
"[0, 85): 'This is a really really really really really really "
"really really really [...]'"
)
def test_equals(self):
toks = self._make_spans_of_tokens()
other_toks = toks[:-1].copy()
s1 = TokenSpan(toks, 0, 2)
s2 = TokenSpan(toks, 0, 2)
s3 = TokenSpan(toks, 0, 3)
s4 = TokenSpan(other_toks, 0, 2)
s5 = Span(toks.target_text, s4.begin, s4.end)
s6 = Span(toks.target_text, s4.begin, s4.end + 1)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertEqual(s1, s4)
self.assertEqual(s1, s5)
self.assertEqual(s5, s1)
self.assertNotEqual(s1, s6)
def test_less_than(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
self.assertLess(s1, s3)
self.assertLessEqual(s1, s3)
self.assertFalse(s1 < s2)
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
self.assertEqual(s1 + s2, s1)
self.assertEqual(char_s1 + s2, char_s1)
self.assertEqual(s2 + char_s1, char_s1)
self.assertEqual(char_s2 + char_s1, char_s1)
self.assertEqual(s2 + s3, TokenSpan(toks, 2, 4))
def test_hash(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 0, 3)
s3 = TokenSpan(toks, 3, 4)
d = {s1: "foo"}
self.assertEqual(d[s1], "foo")
self.assertEqual(d[s2], "foo")
d[s2] = "bar"
d[s3] = "fab"
self.assertEqual(d[s1], "bar")
self.assertEqual(d[s2], "bar")
self.assertEqual(d[s3], "fab")
class TokenSpanArrayTest(ArrayTestBase):
def _make_spans(self):
toks = self._make_spans_of_tokens()
return TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
def test_create(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
with self.assertRaises(TypeError):
TokenSpanArray(self._make_spans_of_tokens(), "Not a valid begins list", [42])
def test_dtype(self):
arr = self._make_spans()
self.assertTrue(isinstance(arr.dtype, TokenSpanDtype))
def test_len(self):
self.assertEqual(len(self._make_spans()), 7)
def test_getitem(self):
arr = self._make_spans()
self.assertEqual(arr[2].covered_text, "a")
self._assertArrayEquals(arr[2:4].covered_text, ["a", "test"])
def test_setitem(self):
arr = self._make_spans()
arr[1] = arr[2]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", "test"])
arr[3] = None
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", None])
with self.assertRaises(ValueError):
arr[0] = "Invalid argument for __setitem__()"
arr[0:2] = arr[0]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "This", "a", None])
arr[[0, 1, 3]] = None
self._assertArrayEquals(arr.covered_text[0:4], [None, None, "a", None])
arr[[2, 1, 3]] = arr[[4, 5, 6]]
self._assertArrayEquals(
arr.covered_text[0:4], [None, "a test", "This is", "This is a test"]
)
def test_equals(self):
arr = self._make_spans()
self._assertArrayEquals(arr[0:4] == arr[1], [False, True, False, False])
arr2 = self._make_spans()
self._assertArrayEquals(arr == arr, [True] * 7)
self._assertArrayEquals(arr == arr2, [True] * 7)
self._assertArrayEquals(arr[0:3] == arr[3:6], [False, False, False])
arr3 = SpanArray(arr.target_text, arr.begin, arr.end)
self._assertArrayEquals(arr == arr3, [True] * 7)
self._assertArrayEquals(arr3 == arr, [True] * 7)
def test_not_equals(self):
arr = self._make_spans()
arr2 = self._make_spans()
self._assertArrayEquals(arr[0:4] != arr[1], [True, False, True, True])
self._assertArrayEquals(arr != arr2, [False] * 7)
self._assertArrayEquals(arr[0:3] != arr[3:6], [True, True, True])
def test_concat_same_type(self):
arr = self._make_spans()
arr2 = self._make_spans()
# Type: TokenSpanArray
arr3 = TokenSpanArray._concat_same_type((arr, arr2))
self._assertArrayEquals(arr3.covered_text, np.tile(arr2.covered_text, 2))
def test_from_factorized(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_factorized(spans_list, arr)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_from_sequence(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_sequence(spans_list)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_nulls(self):
arr = self._make_spans()
self._assertArrayEquals(arr.isna(), [False] * 7)
self.assertFalse(arr.have_nulls)
arr[2] = TokenSpan.make_null(arr.tokens)
self.assertIsNone(arr.covered_text[2])
self._assertArrayEquals(arr[0:4].covered_text, ["This", "is", None, "test"])
self._assertArrayEquals(arr[0:4].isna(), [False, False, True, False])
self.assertTrue(arr.have_nulls)
def test_copy(self):
arr = self._make_spans()
arr2 = arr.copy()
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
self.assertEqual(arr[1], arr2[1])
arr[1] = TokenSpan.make_null(arr.tokens)
self.assertNotEqual(arr[1], arr2[1])
# Double underscore because you can't call a test case "test_take"
def test_take(self):
arr = self._make_spans()
arr2 = arr.take([1, 1, 2, 3, 5, -1])
self._assertArrayEquals(
arr2.covered_text, ["is", "is", "a", "test", "a test", "This is a test"]
)
arr3 = arr.take([1, 1, 2, 3, 5, -1], allow_fill=True)
self._assertArrayEquals(
arr3.covered_text, ["is", "is", "a", "test", "a test", None]
)
def test_less_than(self):
tokens = self._make_spans_of_tokens()
arr1 = TokenSpanArray(tokens, [0, 2], [4, 3])
s1 = TokenSpan(tokens, 0, 1)
s2 = TokenSpan(tokens, 3, 4)
arr2 = TokenSpanArray(tokens, [0, 3], [0, 4])
self._assertArrayEquals(s1 < arr1, [False, True])
self._assertArrayEquals(s2 > arr1, [False, True])
self._assertArrayEquals(arr1 < s1, [False, False])
self._assertArrayEquals(arr1 < arr2, [False, True])
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
s4 = TokenSpan(toks, 2, 4)
s5 = TokenSpan(toks, 0, 3)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
char_s3 = Span(s3.target_text, s3.begin, s3.end)
char_s4 = Span(s4.target_text, s4.begin, s4.end)
char_s5 = Span(s5.target_text, s5.begin, s5.end)
# TokenSpanArray + TokenSpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
TokenSpanArray._from_sequence([s1, s4, s3]),
)
# SpanArray + TokenSpanArray
self._assertArrayEquals(
SpanArray._from_sequence([char_s1, char_s2, char_s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + SpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ SpanArray._from_sequence([char_s2, char_s3, char_s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + TokenSpan
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + s2,
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpan + TokenSpanArray
self._assertArrayEquals(
s2 + TokenSpanArray._from_sequence([s1, s2, s3]),
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpanArray + Span
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + char_s2,
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
# Span + SpanArray
self._assertArrayEquals(
char_s2 + SpanArray._from_sequence([char_s1, char_s2, char_s3]),
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
def test_reduce(self):
arr = self._make_spans()
self.assertEqual(arr._reduce("sum"), TokenSpan(arr.tokens, 0, 4))
# Remind ourselves to modify this test after implementing min and max
with self.assertRaises(TypeError):
arr._reduce("min")
def test_make_array(self):
arr = self._make_spans()
arr_series = pd.Series(arr)
toks_list = [arr[0], arr[1], arr[2], arr[3]]
self._assertArrayEquals(
TokenSpanArray.make_array(arr).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(arr_series).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(toks_list).covered_text,
["This", "is", "a", "test"],
)
def test_begin_and_end(self):
arr = self._make_spans()
self._assertArrayEquals(arr.begin, [0, 5, 8, 10, 0, 8, 0])
self._assertArrayEquals(arr.end, [4, 7, 9, 14, 7, 14, 14])
def test_normalized_covered_text(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.normalized_covered_text,
["this", "is", "a", "test", "this is", "a test", "this is a test"],
)
def test_as_frame(self):
arr = self._make_spans()
df = arr.as_frame()
self._assertArrayEquals(
df.columns, ["begin", "end", "begin_token", "end_token", "covered_text"]
)
self.assertEqual(len(df), len(arr))
class TokenSpanArrayIOTests(ArrayTestBase):
def do_roundtrip(self, df):
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, 'token_span_array_test.feather')
df.to_feather(filename)
df_read = pd.read_feather(filename)
pd.testing.assert_frame_equal(df, df_read)
def test_feather(self):
toks = self._make_spans_of_tokens()
# Equal token spans to tokens
ts1 = TokenSpanArray(toks, np.arange(len(toks)), np.arange(len(toks)) + 1)
df1 = pd.DataFrame({"ts1": ts1})
self.do_roundtrip(df1)
# More token spans than tokens
ts2 = TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
df2 = pd.DataFrame({"ts2": ts2})
self.do_roundtrip(df2)
# Less token spans than tokens, 2 splits no padding
ts3 = TokenSpanArray(toks, [0, 3], [3, 4])
df3 = pd.DataFrame({"ts3": ts3})
self.do_roundtrip(df3)
# Less token spans than tokens, 1 split with padding
ts4 = TokenSpanArray(toks, [0, 2, 3], [2, 3, 4])
df4 = pd.DataFrame({"ts4": ts4})
self.do_roundtrip(df4)
# With a Span column, TokenSpan padded to same length
df5 = pd.DataFrame({"cs": toks})
df5 = pd.concat([df3, df5], axis=1)
self.do_roundtrip(df5)
# All columns together, TokenSpan arrays padded as needed
df = | pd.concat([df1, df2, df3, df4], axis=1) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
web_stats = {'Day':[1,2,3,4,5,6], 'Visitors':[43,53,34,45,64,34],'Bounce_Rate':[65,72,62,64,54,66]}
df = | pd.DataFrame(web_stats) | pandas.DataFrame |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import argparse
import copy
import json
import logging
import os
import sys
import time
import git
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from merlion.evaluate.anomaly import (
TSADEvaluatorConfig,
accumulate_tsad_score,
TSADScoreAccumulator as ScoreAcc,
TSADEvaluator,
)
from merlion.models.anomaly.base import DetectorBase
from merlion.models.ensemble.anomaly import DetectorEnsemble
from merlion.evaluate.anomaly import TSADMetric, ScoreType
from merlion.models.factory import ModelFactory
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries
from merlion.utils.resample import to_pd_datetime
from ts_datasets.anomaly import *
logger = logging.getLogger(__name__)
# Benchmark code assumes you have created data/<dirname> symlinks to
# the root directories of all the relevant datasets
MERLION_ROOT = os.path.dirname(os.path.abspath(__file__))
CONFIG_JSON = os.path.join(MERLION_ROOT, "conf", "benchmark_anomaly.json")
DATADIR = os.path.join(MERLION_ROOT, "data")
def parse_args():
with open(CONFIG_JSON, "r") as f:
valid_models = list(json.load(f).keys())
parser = argparse.ArgumentParser(
description="Script to benchmark Merlion time series anomaly detection "
"models. This script assumes that you have pip installed "
"both merlion (this repo's main package) and ts_datasets "
"(a sub-repo)."
)
parser.add_argument(
"--dataset",
default="NAB_all",
help="Name of dataset to run benchmark on. See get_dataset() "
"in ts_datasets/ts_datasets/anomaly/__init__.py for "
"valid options.",
)
parser.add_argument(
"--models",
type=str,
nargs="+",
default=["DefaultDetector"],
help="Name of model (or models in ensemble) to benchmark.",
choices=valid_models,
)
parser.add_argument(
"--retrain_freq",
type=str,
default="default",
help="String (e.g. 1d, 2w, etc.) specifying how often "
"to re-train the model before evaluating it on "
"the next window of data. Note that re-training "
"is unsupervised, i.e. does not use ground truth "
"anomaly labels in any way. Default retrain_freq is "
"1d for univariate data and None for multivariate.",
)
parser.add_argument(
"--train_window",
type=str,
default=None,
help="String (e.g. 30d, 6m, etc.) specifying how much "
"data (in terms of a time window) the model "
"should train on at any point.",
)
parser.add_argument(
"--metric",
type=str,
default="F1",
choices=list(TSADMetric.__members__.keys()),
help="Metric to optimize for (where relevant)",
)
parser.add_argument(
"--point_adj_metric",
type=str,
default="PointAdjustedF1",
choices=list(TSADMetric.__members__.keys()),
help="Final metric to optimize for when evaluating point-adjusted performance",
)
parser.add_argument(
"--pointwise_metric",
type=str,
default="PointwiseF1",
choices=list(TSADMetric.__members__.keys()),
help="Final metric to optimize for when evaluating pointwise performance",
)
parser.add_argument("--unsupervised", action="store_true")
parser.add_argument(
"--tune_on_test",
action="store_true",
default=False,
help="Whether to tune the threshold on both train and "
"test splits of the time series. Useful for "
"metrics like Best F1, or NAB score with "
"threshold optimization.",
)
parser.add_argument(
"--load_checkpoint",
action="store_true",
default=False,
help="Specify this option if you would like continue "
"training your model on a dataset from a "
"checkpoint, instead of restarting from scratch.",
)
parser.add_argument(
"--eval_only",
action="store_true",
default=False,
help="Specify this option if you would like to skip "
"the model training phase, and simply evaluate "
"on partial saved results.",
)
parser.add_argument("--debug", action="store_true", default=False, help="Whether to enable INFO-level logs.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Whether to plot the model's predictions after "
"training on each example. Mutually exclusive "
"with running any sort of evaluation.",
)
args = parser.parse_args()
args.metric = TSADMetric[args.metric]
args.pointwise_metric = TSADMetric[args.pointwise_metric]
args.visualize = args.visualize and not args.eval_only
if args.retrain_freq.lower() in ["", "none", "null"]:
args.retrain_freq = None
elif args.retrain_freq != "default":
rf = pd.to_timedelta(args.retrain_freq).total_seconds()
if rf % (3600 * 24) == 0:
args.retrain_freq = f"{int(rf/3600/24)}d"
elif rf % 3600 == 0:
args.retrain_freq = f"{int(rf/3600)}h"
elif rf % 60 == 0:
args.retrain_freq = f"{int(rf//60)}min"
else:
args.retrain_freq = f"{int(rf)}s"
return args
def dataset_to_name(dataset: TSADBaseDataset):
if dataset.subset is not None:
return f"{type(dataset).__name__}_{dataset.subset}"
return type(dataset).__name__
def dataset_to_threshold(dataset: TSADBaseDataset, tune_on_test=False):
if isinstance(dataset, IOpsCompetition):
return 2.25
elif isinstance(dataset, NAB):
return 3.5
elif isinstance(dataset, Synthetic):
return 2
elif isinstance(dataset, MSL):
return 3.0
elif isinstance(dataset, SMAP):
return 3.5
elif isinstance(dataset, SMD):
return 3 if not tune_on_test else 2.5
elif hasattr(dataset, "default_threshold"):
return dataset.default_threshold
return 3
def resolve_model_name(model_name: str):
with open(CONFIG_JSON, "r") as f:
config_dict = json.load(f)
if model_name not in config_dict:
raise NotImplementedError(
f"Benchmarking not implemented for model {model_name}. Valid model names are {list(config_dict.keys())}"
)
while "alias" in config_dict[model_name]:
assert model_name != config_dict[model_name]["alias"], "Alias name cannot be the same as the model name"
model_name = config_dict[model_name]["alias"]
return model_name
def get_model(
model_name: str, dataset: TSADBaseDataset, metric: TSADMetric, tune_on_test=False, unsupervised=False
) -> Tuple[DetectorBase, dict]:
with open(CONFIG_JSON, "r") as f:
config_dict = json.load(f)
if model_name not in config_dict:
raise NotImplementedError(
f"Benchmarking not implemented for model {model_name}. Valid model names are {list(config_dict.keys())}"
)
while "alias" in config_dict[model_name]:
model_name = config_dict[model_name]["alias"]
# Load the model with default kwargs, but override with dataset-specific
# kwargs where relevant
model_configs = config_dict[model_name]["config"]
model_type = config_dict[model_name].get("model_type", model_name)
model_kwargs = model_configs["default"]
model_kwargs.update(model_configs.get(type(dataset).__name__, {}))
model = ModelFactory.create(name=model_type, **model_kwargs)
# The post-rule train configs are fully specified for each dataset (where
# relevant), with a default option if there is no dataset-specific option.
post_rule_train_configs = config_dict[model_name].get("post_rule_train_config", {})
d = post_rule_train_configs.get("default", {})
d.update(post_rule_train_configs.get(type(dataset).__name__, {}))
if len(d) == 0:
d = copy.copy(model._default_post_rule_train_config)
d["metric"] = None if unsupervised else metric
d.update({"max_early_sec": dataset.max_lead_sec, "max_delay_sec": dataset.max_lag_sec})
t = dataset_to_threshold(dataset, tune_on_test)
model.threshold.alm_threshold = t
d["unsup_quantile"] = None
return model, d
def df_to_merlion(df: pd.DataFrame, md: pd.DataFrame, get_ground_truth=False, transform=None) -> TimeSeries:
"""Converts a pandas dataframe time series to the Merlion format."""
if get_ground_truth:
if False and "changepoint" in md.keys():
series = md["anomaly"] | md["changepoint"]
else:
series = md["anomaly"]
else:
series = df
time_series = TimeSeries.from_pd(series)
if transform is not None:
time_series = transform(time_series)
return time_series
def train_model(
model_name,
metric,
dataset,
retrain_freq=None,
train_window=None,
load_checkpoint=False,
visualize=False,
debug=False,
unsupervised=False,
tune_on_test=False,
):
"""Trains a model on the time series dataset given, and save their predictions
to a dataset."""
resampler = None
if isinstance(dataset, IOpsCompetition):
resampler = TemporalResample("5min")
model_name = resolve_model_name(model_name)
dataset_name = dataset_to_name(dataset)
model_dir = model_name if retrain_freq is None else f"{model_name}_{retrain_freq}"
dirname = os.path.join("results", "anomaly", model_dir)
csv = os.path.join(dirname, f"pred_{dataset_name}.csv.gz")
config_fname = os.path.join(dirname, f"{dataset_name}_config.json")
checkpoint = os.path.join(dirname, f"ckpt_{dataset_name}.txt")
# Determine where to start within the dataset if there is a checkpoint
i0 = 0
if os.path.isfile(checkpoint) and os.path.isfile(csv) and load_checkpoint:
with open(checkpoint, "r") as f:
i0 = int(f.read().rstrip("\n"))
# Validate & sanitize the existing CSV checkpoint
df = pd.read_csv(csv, dtype={"trainval": bool, "idx": int})
df = df[df["idx"] < i0]
if set(df["idx"]) == set(range(i0)):
df.to_csv(csv, index=False)
else:
i0 = 0
model = None
for i, (df, md) in enumerate(tqdm(dataset)):
if i < i0:
continue
# Reload model & get the train / test split for this time series
model, post_rule_train_config = get_model(
model_name=model_name, dataset=dataset, metric=metric, tune_on_test=tune_on_test, unsupervised=unsupervised
)
delay = post_rule_train_config["max_delay_sec"]
train_vals = df_to_merlion(df[md.trainval], md[md.trainval], get_ground_truth=False, transform=resampler)
test_vals = df_to_merlion(df[~md.trainval], md[~md.trainval], get_ground_truth=False, transform=resampler)
train_anom = df_to_merlion(df[md.trainval], md[md.trainval], get_ground_truth=True)
test_anom = df_to_merlion(df[~md.trainval], md[~md.trainval], get_ground_truth=True)
# Set up an evaluator & get predictions
evaluator = TSADEvaluator(
model=model,
config=TSADEvaluatorConfig(
train_window=train_window,
retrain_freq=retrain_freq,
max_delay_sec=delay,
max_early_sec=getattr(model.threshold, "suppress_secs", delay),
),
)
train_scores, test_scores = evaluator.get_predict(
train_vals=train_vals,
test_vals=test_vals,
post_process=False,
train_kwargs={"anomaly_labels": train_anom, "post_rule_train_config": post_rule_train_config},
)
# Write the model's predictions to the csv file, starting a new one
# if we aren't loading an existing checkpoint. Scores from all time
# series in the dataset are combined together in a single csv. Each
# line in the csv corresponds to a point in a time series, and contains
# the timestamp, raw anomaly score, and index of the time series.
if not visualize:
if i == i0 == 0:
os.makedirs(os.path.dirname(csv), exist_ok=True)
df = pd.DataFrame({"timestamp": [], "y": [], "trainval": [], "idx": []})
df.to_csv(csv, index=False)
df = pd.read_csv(csv)
ts_df = train_scores.to_pd().append(test_scores.to_pd())
ts_df.columns = ["y"]
ts_df.loc[:, "timestamp"] = ts_df.index.view(int) // 1e9
ts_df.loc[:, "trainval"] = [j < len(train_scores) for j in range(len(ts_df))]
ts_df.loc[:, "idx"] = i
df = df.append(ts_df, ignore_index=True)
df.to_csv(csv, index=False)
# Start from time series i+1 if loading a checkpoint.
with open(checkpoint, "w") as f:
f.write(str(i + 1))
if visualize or debug:
# Train the post-rule on the appropriate labels
score = test_scores if tune_on_test else train_scores
label = test_anom if tune_on_test else train_anom
model.train_post_rule(
anomaly_scores=score, anomaly_labels=label, post_rule_train_config=post_rule_train_config
)
# Log (many) evaluation metrics for the time series
score_acc = evaluator.evaluate(ground_truth=test_anom, predict=model.threshold(test_scores))
mttd = score_acc.mean_time_to_detect()
if mttd < | pd.to_timedelta(0) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 26 21:38:39 2021
@author: panay
"""
import pandas as pd
import plotly.express as px
import os
from glob import glob
pd.options.plotting.backend = "plotly"
def Separate(df):
#separate the 2 people
df1= df.loc[df[speaker] == df[speaker][0]]
df2= df.loc[df[speaker] != df1[speaker][0]]
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
Person_1 = df1[speaker][0]
Person_2 = df2[speaker][0]
return df,Person_1,Person_2
def Wordcount(df,Person_1,Person_2):
List = []
#count words
for i in range(len(df.index)):
NumWords=0
sentence=df[message][i]
#checks if the sentence is NaN
if isinstance(sentence, str):
NumWords =NumWords + len(sentence.split())
else:
NumWords=0
List.append(NumWords)
df['Num_Words']=List
#separate the words for each speaker
df['Num_Words_1']=df['Num_Words'].loc[df[speaker]==Person_1]
df['Num_Words_2']=df['Num_Words'].loc[df[speaker]==Person_2]
df.drop('Num_Words',inplace=True,axis=1)
df=df.fillna(0)
return df
def Message_Spam(df,Person_1,Person_2):
list_1 = []
list_2 = []
count_1 = 0
count_2 = 0
for i in range(len(df)):
if df[speaker][i] == Person_1:
count_1 +=1
count_2 = 0
elif df[speaker][i] == Person_2:
count_2 +=1
count_1 = 0
list_1.append(count_1)
list_2.append(count_2)
df['Spam_1'] = list_1
df['Spam_2'] = list_2
return df
def CountSpamWords(df):
NumWords_1 = 0
NumWords_2 = 0
NumWordsSpam_1 = []
NumWordsSpam_2 = []
for i in range(len(df)):
if df['Spam_1'][i] != 0:
NumWords_1 += df['Num_Words_1'][i]
NumWords_2 = 0
elif df['Spam_2'][i] != 0:
NumWords_2 += df['Num_Words_2'][i]
NumWords_1 = 0
NumWordsSpam_1.append(NumWords_1)
NumWordsSpam_2.append(NumWords_2)
df['Number_of_Continuous_Words_1'] = NumWordsSpam_1
df['Number_of_Continuous_Words_2'] = NumWordsSpam_2
return df
def Density(df):
df['Dn_1']= df['Number_of_Continuous_Words_1'].div(df['Spam_1'])
df['Dn_2']= df['Number_of_Continuous_Words_2'].div(df['Spam_2'])
df=df.fillna(0)
return df
def Time(df,name):
dfTime= df.groupby(['time',speaker]).sum()
dfTime=dfTime.reset_index()
dfTime=dfTime.drop(speaker,axis=1)
dfTime = dfTime.set_index('time')
fig = dfTime.boxplot(title="Person 1/ Person 2", template="simple_white")
fig.show()
fig.write_html("./"+str(name)+'_'+'Time_box'+ ".html")
fig = px.bar(dfTime,title="Person 1/ Person 2", template="simple_white",
labels=dict(index="Index of Message", value="NumWords", variable="option"))
fig.show()
fig.write_html("./"+str(name)+'_'+'Time'+ ".html")
return dfTime
def Avg(df,filename,GlobalMetrics):
#get mean Global metrics
avg=df.mean(axis=0)
columns_to_use=avg.index.tolist()
avg=list(avg)
#Save them according to their file
GlobalMetrics[str(filename)]=avg
dfGlobal= | pd.DataFrame.from_dict(GlobalMetrics,orient='index',columns=columns_to_use ) | pandas.DataFrame.from_dict |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date= | pd.Timestamp('2015-01-15', tz='utc') | pandas.Timestamp |
# -------------------------------------------------------------------------------
# Name: critical_loads.py
# Purpose: Functions to implement the updated (November 2018) Critical Loads
# workflow.
#
# Author: <NAME>
# -------------------------------------------------------------------------------
def view_dep_series(eng):
"""View table of deposition series already in the database.
Args:
eng: Obj. Active database connection object
Returns:
Dataframe.
"""
import pandas as pd
# Get existing series from db
sql = "SELECT * FROM deposition.dep_series_defs"
df = pd.read_sql(sql, eng)
return df
def add_dep_series(series_id, name, short_name, grid, desc, eng):
"""Add new deposition series to the database.
Args:
series_id: Int. Unique integer ID for series
name: Str. Full name for this series
short_name: Str. Short name for this series
grid: Str. One of ['blr', '0_1deg', 'emep']
desc: Str. Description of series
eng: Obj. Active database connection object
Returns:
None. Row is added.
"""
import pandas as pd
from sqlalchemy.sql import text
assert isinstance(series_id, int), "'series_id' must be an integer."
assert grid in (
"blr",
"0_1deg",
"emep",
), "'grid' must be one of ('blr', '0_1deg', 'emep')."
# Get existing series from db
sql = (
"INSERT INTO deposition.dep_series_defs "
"(series_id, name, short_name, grid, description) "
"VALUES "
"(:series_id, :name, :short_name, :grid, :desc)"
)
param_dict = {
"series_id": series_id,
"name": name,
"short_name": short_name,
"grid": grid,
"desc": desc,
}
sql = text(sql)
eng.execute(sql, param_dict)
print("Series added successfully.")
return None
def upload_nilu_0_1deg_dep_data(data_fold, eng, series_id):
"""Process .dat files containing deposition data supplied by NILU. This
function is based on the data supplied by NILU during 2017, which uses
the new 0.1 degree deposition grid.
Args:
dat_fold: Str. Path to folder containing .dat files provided by NILU
eng: Obj. Active database connection object connect to the Docker
PostGIS db
series_id: Int. 'series_id' for this dataset from the table
deposition.dep_series_defs
Returns:
DataFrame of the data added to the database.
"""
import glob
import os
import pandas as pd
# Read NILU data
search_path = os.path.join(data_fold, "*.dat")
file_list = glob.glob(search_path)
df_list = []
for fpath in file_list:
# Get par name
name = os.path.split(fpath)[1].split("_")[:2]
name = "_".join(name)
# Read file
df = pd.read_csv(
fpath, delim_whitespace=True, header=None, names=["lat", "lon", name]
)
df.set_index(["lat", "lon"], inplace=True)
df_list.append(df)
# Combine
df = pd.concat(df_list, axis=1)
df.reset_index(inplace=True)
# Calculate unique integer cell ID as latlon
# (both *100 and padded to 4 digits)
df["cell_id"] = (df["lat"] * 100).astype(int).map("{:04d}".format) + (
df["lon"] * 100
).astype(int).map("{:04d}".format)
df["cell_id"] = df["cell_id"].astype(int)
del df["lat"], df["lon"], df["tot_n"]
# Rename
df.rename(
columns={
"tot_nhx": 2, # N (red)
"tot_nox": 1, # N (oks)
"tot_s": 4,
}, # Non-marine S
inplace=True,
)
# Melt
df = pd.melt(df, var_name="param_id", id_vars="cell_id")
# Add series ID
df["series_id"] = series_id
# Add to db
df.to_sql(
"dep_values_0_1deg_grid",
con=eng,
schema="deposition",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
print("%s new rows added successfully." % len(df))
return df
def extract_deposition_as_gdf(series_id, par, eng, veg_class=None):
"""Extracts deposition data for the specified series as a geodataframe.
Args:
series_id: Int. ID for deposition series of interest
par: Str. One of ['nitrogen', 'sulphur']
eng: Obj. Active database connection object
veg_class: Str or None. Only applies for data using the EMEP grid, which
reports deposition values for different vegetation classes.
For EMEP, must be one of ['grid average', 'forest', 'semi-natural'];
otherwise, pass None
Returns:
GeoDataFrame.
"""
import geopandas as gpd
import warnings
from sqlalchemy.sql import text
veg_class_dict = {"grid average": 1, "forest": 2, "semi-natural": 3}
assert isinstance(series_id, int), "'series_id' must be an integer."
assert par in (
"nitrogen",
"sulphur",
), "'par' must be one of ('nitrogen', 'sulphur')."
# Identify grid
param_dict = {"series_id": series_id}
sql = "SELECT grid FROM deposition.dep_series_defs " "WHERE series_id = :series_id"
sql = text(sql)
grid = eng.execute(sql, param_dict).fetchall()[0][0]
assert (
grid is not None
), "'grid' is not defined for this series in the 'dep_series_defs' table.\n"
if (grid == "emep") and (veg_class is None):
assert veg_class in ["grid average", "forest", "semi-natural"], (
"The specified series ID refers to the EMEP grid, "
"so you must also specify the 'veg_class' parameter.\n"
"Choose one of ['grid average', 'forest', 'semi-natural'] "
"and pass e.g. veg_class='grid average'."
)
if (grid != "emep") and (veg_class is not None):
print(
"WARNING: The specified series ID does NOT refer to the EMEP grid. "
"The 'veg_class' parameter will be ignored."
)
if par == "nitrogen":
unit_factor = 1 / 14.01
if grid == "emep":
param_dict["veg_class_id"] = veg_class_dict[veg_class]
# Choose 'grid-average' for veg class
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.n_dep) AS ndep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as n_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id IN (1, 2) "
f" AND veg_class_id = :veg_class_id "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
# No veg classes to consider
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.n_dep) AS ndep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as n_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id IN (1, 2) "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
unit_factor = 2 / 32.06
if grid == "emep":
param_dict["veg_class_id"] = veg_class_dict[veg_class]
# Choose 'grid-average' for veg class
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.s_dep) AS sdep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as s_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id = 4 "
f" AND veg_class_id = :veg_class_id "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
# No veg classes to consider
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.s_dep) AS sdep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as s_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id = 4 "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
sql = text(sql)
gdf = gpd.read_postgis(sql, eng, params=param_dict)
# Convert units
gdf[par[0] + "dep_meqpm2pyr"] = gdf[par[0] + "dep_mgpm2pyr"] * unit_factor
gdf[par[0] + "dep_kgphapyr"] = gdf[par[0] + "dep_mgpm2pyr"] / 100
return gdf
def create_deposition_raster(
series_id,
par,
unit,
cell_size,
eng,
ndv=-9999,
bit_depth="Int16",
fname=None,
veg_class=None,
):
"""Create a raster of deposition values from a Geodataframe.
Args:
series_id: Int. ID for deposition series of interest
par: Str. One of ['nitrogen', 'sulphur']
unit: Str. One of ['mgpm2pyr', kgphapyr, 'meqpm2pyr']
cell_size: Int. Output cell size in metres. Determines the "snap raster" to be used
One of (30, 60, 120)
eng: Obj. Active database connection object
ndv: Int. Value to use for No Data
bit_depth: Str. GDAL bit depth:
'Byte'
'Int16'
'UInt16'
'UInt32'
'Int32'
'Float32'
'Float64'
fname: Str or None. File path for output. If None, the raster will be saved to
veg_class: Str or None. Only applies for data using the EMEP grid, which
reports deposition values for different vegetation classes.
For EMEP, must be one of ['grid average', 'forest', 'semi-natural'];
otherwise, pass None
shared/critical_loads/raster/deposition/short_name.tif
where 'short_name' is as defined in the 'dep_series_defs' table.
Returns:
None. The grid is saved to the specified path.
"""
import geopandas as gpd
import os
from sqlalchemy.sql import text
assert unit in (
"mgpm2pyr",
"kgphapyr" "meqpm2pyr",
), "'unit' must be one of ('mgpm2pyr', 'kgphapyr', 'meqpm2pyr')."
assert cell_size in (30, 60, 120), "'cell_size' must be one of (30, 60, 120)."
# Get data
gdf = extract_deposition_as_gdf(series_id, par, eng, veg_class=veg_class)
# Save temporary file
gdf.to_file("temp_ndep.geojson", driver="GeoJSON")
# Convert to raster
col_name = f"{par[0]}dep_{unit}"
if fname is None:
# Get short_name from db
param_dict = {"series_id": series_id}
sql = "SELECT short_name FROM deposition.dep_series_defs WHERE series_id = :series_id"
sql = text(sql)
res = eng.execute(sql, param_dict).fetchall()[0][0]
assert res is not None, (
"'short_name' is not defined for this series in the 'dep_series_defs' table.\n"
"Consider explicitly specifying a file name?"
)
fname = f"/home/jovyan/shared/critical_loads/raster/deposition/{col_name}_{res}_{cell_size}m.tif"
snap_tif = (
f"/home/jovyan/shared/critical_loads/raster/blr_land_mask_{cell_size}m.tif"
)
vec_to_ras("temp_ndep.geojson", fname, snap_tif, col_name, ndv, bit_depth)
# Delete temp file
os.remove("temp_ndep.geojson")
def vec_to_ras(in_shp, out_tif, snap_tif, attrib, ndv, data_type, fmt="GTiff"):
"""Converts a shapefile to a raster with values taken from
the 'attrib' field. The 'snap_tif' is used to set the
resolution and extent of the output raster.
Args:
in_shp: Str. Raw string to shapefile
out_tif: Str. Raw string for geotiff to create
snap_tif: Str. Raw string to geotiff used to set resolution
and extent
attrib: Str. Shapefile field for values
ndv: Int. No data value
data_type: Str. GDAL bit depth:
'Byte'
'Int16'
'UInt16'
'UInt32'
'Int32'
'Float32'
'Float64'
fmt: Str. Format string.
Returns:
None. Raster is saved.
"""
import ogr
import gdal
# Bit depth dict
bit_dict = {
"Byte": gdal.GDT_Byte,
"Int16": gdal.GDT_Int16,
"UInt16": gdal.GDT_UInt16,
"UInt32": gdal.GDT_UInt32,
"Int32": gdal.GDT_Int32,
"Float32": gdal.GDT_Float32,
"Float64": gdal.GDT_Float64,
}
assert data_type in bit_dict.keys(), "ERROR: Invalid data type."
# 1. Create new, empty raster with correct dimensions
# Get properties from snap_tif
snap_ras = gdal.Open(snap_tif)
cols = snap_ras.RasterXSize
rows = snap_ras.RasterYSize
proj = snap_ras.GetProjection()
geotr = snap_ras.GetGeoTransform()
# Create out_tif
driver = gdal.GetDriverByName(fmt)
out_ras = driver.Create(
out_tif, cols, rows, 1, bit_dict[data_type], options=["COMPRESS=LZW"]
)
out_ras.SetProjection(proj)
out_ras.SetGeoTransform(geotr)
# Fill output with NoData
out_ras.GetRasterBand(1).SetNoDataValue(ndv)
out_ras.GetRasterBand(1).Fill(ndv)
# 2. Rasterize shapefile
shp_ds = ogr.Open(in_shp)
shp_lyr = shp_ds.GetLayer()
gdal.RasterizeLayer(
out_ras, [1], shp_lyr, options=["ATTRIBUTE=%s" % attrib, "COMPRESS=LZW"]
)
# Flush and close
snap_ras = None
out_ras = None
shp_ds = None
def reclassify_raster(in_tif, mask_tif, out_tif, reclass_df, reclass_col, ndv):
"""Reclassify categorical values in a raster using a mapping
in a dataframe. The dataframe index must contain the classes
in in_tif and the 'reclass_col' must specify the new classes.
Only cells with value=1 in 'mask_tif' are written to output.
Args:
in_tif: Str. Raw path to input raster
mask_tif: Str. Raw path to mask grid defining land area
out_tif: Str. Raw path to .tif file to create
reclass_df: DataFrame. Reclassification table
reclass_col: Str. Name of column with new raster values
ndv: Int. Value to use as NoData in the new raster
Returns:
None. A new raster is saved.
"""
import gdal
import ogr
from gdalconst import GA_ReadOnly as GA_ReadOnly
import numpy as np
import pandas as pd
# Open source file, read data
src_ds = gdal.Open(in_tif, GA_ReadOnly)
assert src_ds
rb = src_ds.GetRasterBand(1)
src_data = rb.ReadAsArray()
# Open mask, read data
mask_ds = gdal.Open(mask_tif, GA_ReadOnly)
assert mask_ds
mb = mask_ds.GetRasterBand(1)
mask_data = mb.ReadAsArray()
# Reclassify
rc_data = src_data.copy()
for idx, row in reclass_df.iterrows():
rc_data[src_data == idx] = row[reclass_col]
# Apply mask
rc_data[mask_data != 1] = ndv
# Write output
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.CreateCopy(out_tif, src_ds, 0, options=["COMPRESS=LZW"])
out_band = dst_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(rc_data)
# Flush data and close datasets
dst_ds = None
src_ds = None
mask_ds = None
def calc_vegetation_exceedance_0_1deg(dep_tif, cl_tif, ex_tif, ex_tif_bool, ser_id):
"""Calculate exceedances for vegetation.
Args:
dep_tif: Str. Raw string to deposition grid
cl_tif: Str. Raw string to critical loads grid
ex_tif: Str. Raw string to exceedance grid to be created
ex_tif_bool: Str. Raw string to exceedance grid with Boolean values (i.e. 1
where exceeded and 0 otherwise)
ser_id: Int. Deposition series ID for the data of interest
Returns:
Summary dataframe.
"""
import nivapy3 as nivapy
import pandas as pd
import numpy as np
import gdal
# Container for output
data_dict = {"total_area_km2": [], "exceeded_area_km2": []}
# Read grids
cl_grid, cl_ndv, cl_epsg, cl_ext = nivapy.spatial.read_raster(cl_tif)
dep_grid, dep_ndv, dep_epsg, dep_ext = nivapy.spatial.read_raster(dep_tif)
# Work out cell size
cs = (cl_ext[1] - cl_ext[0]) / cl_grid.shape[1]
cs = float(int(cs + 0.5))
# Upcast to float32 for safe handling of negative values
cl_grid = cl_grid.astype(np.float32)
dep_grid = dep_grid.astype(np.float32)
# Set ndv
cl_grid[cl_grid == cl_ndv] = np.nan
dep_grid[dep_grid == dep_ndv] = np.nan
# Get total area of non-NaN from dep grid
nor_area = np.count_nonzero(~np.isnan(dep_grid)) * cs * cs / 1.0e6
# Apply scaling factor to CLs
cl_grid = cl_grid * 100.0
# Exceedance
ex_grid = dep_grid - cl_grid
del dep_grid, cl_grid
# Get total area exceeded
ex_area = np.count_nonzero(ex_grid > 0) * cs * cs / 1.0e6
# Set <0 to 0
ex_grid[ex_grid < 0] = 0
# Reset ndv
ex_grid[np.isnan(ex_grid)] = -1
# Downcast to int16 to save space
ex_grid = ex_grid.round(0).astype(np.int16)
# Append results
data_dict["total_area_km2"].append(nor_area)
data_dict["exceeded_area_km2"].append(ex_area)
# Write exceedance output
write_geotiff(ex_grid, ex_tif, cl_tif, -1, gdal.GDT_Int16)
# Convert to bool grid
ex_grid[ex_grid > 0] = 1
ex_grid[ex_grid == -1] = 255
# Write bool output
write_geotiff(ex_grid, ex_tif_bool, cl_tif, 255, gdal.GDT_Byte)
del ex_grid
# Build output df
ex_df = pd.DataFrame(data_dict)
ex_df["exceeded_area_pct"] = (
100 * ex_df["exceeded_area_km2"] / ex_df["total_area_km2"]
)
ex_df = ex_df.round(0).astype(int)
ex_df["series_id"] = ser_id
ex_df["medium"] = "vegetation"
ex_df = ex_df[
[
"series_id",
"medium",
"total_area_km2",
"exceeded_area_km2",
"exceeded_area_pct",
]
]
return ex_df
def write_geotiff(data, out_tif, snap_tif, ndv, data_type):
"""Write a numpy array to a geotiff using 'snap_tif' to define
raster properties.
Args:
data: Array.
out_tif: Str. File to create
snap_tif: Str. Path to existing tif with same resolution
and extent as target
ndv: Int. No data value
data_type: Bit depth etc. e.g. gdal.GDT_UInt32
Returns:
None. Geotiff is saved.
"""
from osgeo import ogr
from osgeo import gdal
# 1. Create new, empty raster with correct dimensions
# Get properties from snap_tif
snap_ras = gdal.Open(snap_tif)
cols = snap_ras.RasterXSize
rows = snap_ras.RasterYSize
proj = snap_ras.GetProjection()
geotr = snap_ras.GetGeoTransform()
# Create out_tif
driver = gdal.GetDriverByName("GTiff")
out_ras = driver.Create(out_tif, cols, rows, 1, data_type, options=["COMPRESS=LZW"])
out_ras.SetProjection(proj)
out_ras.SetGeoTransform(geotr)
# Write data
out_band = out_ras.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Flush and close
snap_ras = None
out_band = None
out_ras = None
def bbox_to_pixel_offsets(gt, bbox):
"""Helper function for zonal_stats(). Modified from:
https://gist.github.com/perrygeo/5667173
Original code copyright 2013 <NAME>
"""
originX = gt[0]
originY = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int((bbox[0] - originX) / pixel_width)
x2 = int((bbox[1] - originX) / pixel_width) + 1
y1 = int((bbox[3] - originY) / pixel_height)
y2 = int((bbox[2] - originY) / pixel_height) + 1
xsize = x2 - x1
ysize = y2 - y1
return (x1, y1, xsize, ysize)
def remap_categories(category_map, stats):
"""Modified from https://gist.github.com/perrygeo/5667173
Original code copyright 2013 <NAME>
"""
def lookup(m, k):
"""Dict lookup but returns original key if not found"""
try:
return m[k]
except KeyError:
return k
return {lookup(category_map, k): v for k, v in stats.items()}
def exceedance_stats_per_0_1deg_cell(
ex_tif,
ser_id,
eng,
write_to_db=True,
nodata_value=-1,
global_src_extent=False,
categorical=False,
category_map=None,
):
"""Summarise exceedance values for each 0.1 degree grid cell.
Args:
raster_path: Raw str. Path to exceedance raster
ser_id: Int. Deposition series ID
eng: Obj. Active database connection object
write_to_db: Bool. If True, results will be written to the database
nodata_value: Float. Value in raster to treat as NoData
global_src_extent: Bool. If True, reads all data into memory in a single
pass. May be faster, but also takes up loats of memory
when used with large vector or raster datasets
categorical: Bool. If true, raster is assumed to be categorical, with
integer values representing different categories (e.g. land
use). In this case, the statistics returned are pixel counts
of each category within each vector zone
category_map: Dict. Only used when "categorical" is True. Dict mapping
integer values to category names {int_id:'cat_name'}. If
supplied, the integer categories in the results dataframe
will be mapped to the specified category names
Returns:
GeoDataFrame of cell statistics.
"""
import gdal
import ogr
import numpy as np
import pandas as pd
import sys
import geopandas as gpd
import os
from gdalconst import GA_ReadOnly
gdal.PushErrorHandler("CPLQuietErrorHandler")
# Read vector
temp_fold = os.path.split(ex_tif)[0]
temp_shp = os.path.join(temp_fold, "temp.shp")
gdf = extract_deposition_as_gdf(ser_id, "nitrogen", eng, veg_class="grid_average")
gdf.to_file(temp_shp)
# Read raster
rds = gdal.Open(ex_tif, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
rgt = rds.GetGeoTransform()
# Get cell size
cs = rgt[1]
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
vds = ogr.Open(
temp_shp, GA_ReadOnly
) # TODO maybe open update if we want to write stats
assert vds
vlyr = vds.GetLayer(0)
# create an in-memory numpy array of the source raster data
# covering the whole extent of the vector layer
if global_src_extent:
# use global source extent
# useful only when disk IO or raster scanning inefficiencies are your limiting factor
# advantage: reads raster data in one pass
# disadvantage: large vector extents may have big memory requirements
src_offset = bbox_to_pixel_offsets(rgt, vlyr.GetExtent())
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the layer subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5],
)
mem_drv = ogr.GetDriverByName("Memory")
driver = gdal.GetDriverByName("MEM")
# Loop through vectors
stats = []
feat = vlyr.GetNextFeature()
while feat is not None:
if not global_src_extent:
# use local source extent
# fastest option when you have fast disks and well indexed raster (ie tiled Geotiff)
# advantage: each feature uses the smallest raster chunk
# disadvantage: lots of reads on the source raster
src_offset = bbox_to_pixel_offsets(rgt, feat.geometry().GetEnvelope())
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the feature subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5],
)
# Create a temporary vector layer in memory
mem_ds = mem_drv.CreateDataSource("out")
mem_layer = mem_ds.CreateLayer("poly", None, ogr.wkbPolygon)
mem_layer.CreateFeature(feat.Clone())
# Rasterize it
rvds = driver.Create(
"", src_offset[2], src_offset[3], 1, gdal.GDT_Byte, options=["COMPRESS=LZW"]
)
rvds.SetGeoTransform(new_gt)
gdal.RasterizeLayer(rvds, [1], mem_layer, burn_values=[1])
rv_array = rvds.ReadAsArray()
# Mask the source data array with our current feature
# we take the logical_not to flip 0<->1 to get the correct mask effect
# we also mask out nodata values explictly
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(src_array == nodata_value, np.logical_not(rv_array)),
)
if categorical:
# Get cell counts for each category
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(
zip([np.asscalar(k) for k in keys], [np.asscalar(c) for c in counts])
)
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
# Get summary stats
feature_stats = {
"min": float(masked.min()),
"mean": float(masked.mean()),
"max": float(masked.max()),
"std": float(masked.std()),
"sum": float(masked.sum()),
"count": int(masked.count()),
"fid": int(feat.GetFID()),
}
stats.append(feature_stats)
rvds = None
mem_ds = None
feat = vlyr.GetNextFeature()
# Tidy up
vds = None
rds = None
for fname in ["temp.cpg", "temp.dbf", "temp.prj", "temp.shp", "temp.shx"]:
os.remove(os.path.join(temp_fold, fname))
# Combine results
df = pd.DataFrame(stats)
df.fillna(0, inplace=True)
df["series_id"] = ser_id
df["fid"] = df.index
gdf["fid"] = gdf.index
gdf = gdf.merge(df, on="fid")
# Calc areas
gdf["exceeded_area_km2"] = gdf["exceeded"] * cs * cs / 1e6
gdf["total_area_km2"] = (gdf["exceeded"] + gdf["not_exceeded"]) * cs * cs / 1e6
gdf["pct_exceeded"] = 100 * gdf["exceeded_area_km2"] / gdf["total_area_km2"]
gdf.drop(
[
"fid",
"exceeded",
"not_exceeded",
"ndep_mgpm2pyr",
"ndep_meqpm2pyr",
"ndep_kgphapyr",
],
axis="columns",
inplace=True,
)
gdf.dropna(how="any", inplace=True)
if write_to_db:
gdf2 = gdf.copy()
del gdf2["geom"]
df = pd.DataFrame(gdf2)
df.to_sql(
"exceedance_stats_0_1deg_grid",
eng,
"vegetation",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
return gdf
def exceedance_stats_per_land_use_class(
ex_tif_bool, veg_tif, ser_id, eng, write_to_db=True, nodata_value=255
):
"""Summarise exceedance values for each land use class.
Args:
ex_tif_bool: Raw str. Path to boolean exceedance raster
veg_tif: Str. Path to vegetation data with same resolution as ex_tif_bool
ser_id: Int. Deposition series ID
eng: Obj. Active database connection object
write_to_db: Bool. If True, results will be written to the database
nodata_value: Float. Value in rasters to treat as NoData
Returns:
GeoDataFrame of land use statistics.
"""
import gdal
import ogr
import numpy as np
import pandas as pd
import sys
import geopandas as gpd
import os
from gdalconst import GA_ReadOnly
gdal.PushErrorHandler("CPLQuietErrorHandler")
# Read LU table
sql = "SELECT * FROM vegetation.land_class_crit_lds"
lu_df = pd.read_sql(sql, eng)
# Read exceedance raster
rds = gdal.Open(ex_tif_bool, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
ex_array = rb.ReadAsArray()
# Get cell size
rgt = rds.GetGeoTransform()
cs = rgt[1]
# Read vegetation raster
rds = gdal.Open(veg_tif, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
veg_array = rb.ReadAsArray()
# Loop through land classes
stats = []
for idx, row in lu_df.iterrows():
# Mask the source data array
masked = np.ma.MaskedArray(
ex_array,
mask=np.logical_or(
ex_array == nodata_value, veg_array != row["norut_code"]
),
)
# Get cell counts for each category
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(
zip([np.asscalar(k) for k in keys], [np.asscalar(c) for c in counts])
)
feature_stats = dict(pixel_count)
feature_stats = remap_categories(
{1: "exceeded", 0: "not_exceeded"}, feature_stats
)
stats.append(feature_stats)
# Tidy up
rds = None
# Combine results
df = pd.DataFrame(stats)
df.fillna(0, inplace=True)
df["norut_code"] = lu_df["norut_code"]
df["series_id"] = ser_id
# Calc areas
df["exceeded_area_km2"] = df["exceeded"] * cs * cs / 1e6
df["total_area_km2"] = (df["exceeded"] + df["not_exceeded"]) * cs * cs / 1e6
df["pct_exceeded"] = 100 * df["exceeded_area_km2"] / df["total_area_km2"]
del df["exceeded"], df["not_exceeded"]
df.dropna(how="any", inplace=True)
if write_to_db:
df.to_sql(
"exceedance_stats_land_class",
eng,
"vegetation",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
return df
def veg_exceedance_as_gdf_0_1deg(ser_id, eng, shp_path=None):
"""Extracts exceedance statistics for the specified series as a
geodataframe using NILU's 0.1 degree grid. Optionally, the data
can be saved as a shapefile.
Args:
ser_id: Int. ID for deposition series of interest
eng: Obj. Active database connection object
shp_path: Str. Raw path for shapefile to be created
Returns:
GeoDataFrame.
"""
import geopandas as gpd
# Get dep values
sql_args = {"ser_id": ser_id}
sql = (
"SELECT ST_Transform(b.geom, 32633) AS geom, "
" a.cell_id, "
" a.exceeded_area_km2, "
" a.total_area_km2, "
" a.pct_exceeded "
"FROM (SELECT cell_id, "
" exceeded_area_km2, "
" total_area_km2, "
" pct_exceeded "
" FROM vegetation.exceedance_stats_0_1deg_grid "
" WHERE series_id = {ser_id}) AS a, "
"deposition.dep_grid_0_1deg AS b "
"WHERE a.cell_id = b.cell_id"
).format(**sql_args)
gdf = gpd.read_postgis(sql, eng)
if shp_path:
gdf.to_file(shp_path)
return gdf
def calc_anclimit_cla_clminmax(df, bc0):
"""Calculates the ANC Limit, the CLA and the CLmin and CLmax values using
the specified version of BC0, both with and without a correction for organic
acids).
Used by calculate_critical_loads_for_water()
Args:
df: Dataframe.
bc0: Str. One of ['BC0', 'BC0_Ffac', 'BC0_magic']
Returns:
Dataframe with new columns for
['ANClimit_{bc0}',
'ANClimitOAA_{bc0}',
'CLA_{bc0}',
'CLAOAA_{bc0}',
'CLmaxS_{bc0}',
'CLmaxSoaa_{bc0}',
'CLmaxN_{bc0}',
'CLmaxNoaa_{bc0}',
]
"""
import numpy as np
assert bc0 in [
"BC0",
"BC0_Ffac",
"BC0_magic",
], "'bc0' must be one of ['BC0', 'BC0_Ffac', 'BC0_magic']."
method = bc0.replace("BC0", "")
# ANC limit
df[f"ANClimit{method}"] = np.minimum(
50, (0.25 * df["Runoff"] * df[bc0]) / (1 + 0.25 * df["Runoff"])
)
# ANC limit OAA
df[f"ANClimitOAA{method}"] = np.minimum(
40,
(0.2 * df["Runoff"] * (df[bc0] - 3.4 * df["TOC"])) / (1 + 0.2 * df["Runoff"]),
)
# CLA
df[f"CLA{method}"] = df["Runoff"] * (df[bc0] - df[f"ANClimit{method}"])
# CLA OAA
df[f"CLAOAA{method}"] = df["Runoff"] * (
df[bc0] - df[f"ANClimitOAA{method}"] - 3.4 * df["TOC"]
)
# CLmaxS
df[f"CLmaxS{method}"] = df[f"CLA{method}"] / df["alphaS"]
# CLmaxSoaa
df[f"CLmaxSoaa{method}"] = df[f"CLAOAA{method}"] / df["alphaS"]
# CLmaxN
df[f"CLmaxN{method}"] = df["CLminN"] + (df[f"CLA{method}"] / df["alphaN"])
# CLmaxNoaa
df[f"CLmaxNoaa{method}"] = df["CLminN"] + (df[f"CLAOAA{method}"] / df["alphaN"])
return df
def calculate_critical_loads_for_water(
xl_path=None, req_df=None, opt_df=None, mag_df=None, out_csv=None
):
"""Calculates critical loads for water based on values entered in an
Excel template (input_template_critical_loads_water.xlsx) or from
the database. See the Excel file for full details of the input
data requirements.
You must provide EITHER the 'xl_path' OR the three separate
dataframes - NOT BOTH.
This function performs broadly the same calculations as Tore's 'CL'
and 'CALKBLR' packages in RESA2, but generalised to allow for more
flexible input data. The original critical loads calculations were
implemented by Tore's 'cl.clcalculations' function, which has been
documented by Kari:
K:\Avdeling\317 Klima- og miljømodellering\KAU\Focal Centre\Data\CL script 23032015_notes.docx
These notes form the basis for much of the code here.
Args:
xl_path: Str. Path to completed copy the Excel input template
req_df: Dataframe of required parameters
opt_df: Dataframe of optional parameters
mag_df: Dataframe of magic parameters
out_csv: Raw str. The final dataframe is saved to the specified path
Returns:
Dataframe.
"""
import pandas as pd
import numpy as np
# Check input
if xl_path and (req_df is not None or mag_df is not None or opt_df is not None):
message = (
"ERROR: You must provide EITHER the 'xl_path' OR the three "
"separate dataframes - NOT both."
)
print(message)
raise ValueError(message)
if xl_path and (req_df is None and mag_df is None and opt_df is None):
# Read worksheets
req_df = pd.read_excel(xl_path, sheet_name="required_parameters")
opt_df = pd.read_excel(xl_path, sheet_name="optional_parameters")
mag_df = pd.read_excel(xl_path, sheet_name="magic_parameters")
# Dicts of constants used in script
# 1. Default values for parameters
default_vals = {
"Catch_area": 1,
"Lake_area": 0.05,
"Forest_area": 0.95,
"Ni": 3.57,
"Fde": 0.1,
"SN": 5,
"SS": 0.5,
"TOC": 1,
"K": 0,
"Na": 0,
}
# 2. Unit conversions
units_dict = {
"Runoff": 1e-3, # mm/yr => m/yr
"Ca": 2 * 1000 / 40.08, # mg/l => ueq/l
"Cl": 1 * 1000 / 35.45, # mg/l => ueq/l
"Mg": 2 * 1000 / 24.31, # mg/l => ueq/l
"Na": 1 * 1000 / 22.99, # mg/l => ueq/l
"SO4": 2 * 1000 / 96.06, # mg/l => ueq/l
"NO3N": 1 / 14.01, # ug/l => ueq/l
"K": 1 * 1000 / 39.10, # mg/l => ueq/l
}
# 3. Ratios to chloride
cl_ratios = {
"Ca": 0.037,
"Mg": 0.196,
"Na": 0.859,
"SO4": 0.103,
"K": 0.018,
}
# Check region ID is unique
assert req_df[
"Region_id"
].is_unique, "'Region_id' is not unique within worksheet 'required_parameters'."
assert opt_df[
"Region_id"
].is_unique, "'Region_id' is not unique within worksheet 'optional_parameters'."
assert mag_df[
"Region_id"
].is_unique, "'Region_id' is not unique within worksheet 'magic_parameters'."
# Join
df = pd.merge(req_df, opt_df, how="left", on="Region_id")
df = pd.merge(df, mag_df, how="left", on="Region_id")
# Fill NaNs in params with defaults
for col in default_vals.keys():
df[col].fillna(default_vals[col], inplace=True)
# Convert units
for col in units_dict.keys():
df[col] = df[col] * units_dict[col]
# Apply sea-salt correction for required pars
for col in cl_ratios.keys():
df[col] = df[col] - (df["Cl"] * cl_ratios[col])
df.loc[df[col] < 0, col] = 0 # Set negative values to zero
# Apply sea-salt correction for Magic pars
for col in ["Ca_magic", "Mg_magic", "Na_magic", "K_magic"]:
par = col.split("_")[0]
df[col] = df[col] - (df["Cl_magic"] * cl_ratios[par])
df.loc[df[col] < 0, col] = 0 # Set negative values to zero
# Nitrate flux
df["ENO3_flux"] = df["Runoff"] * df["NO3N"]
# 'CLrat' is ratio of lake:catchment area
df["CLrat"] = df["Lake_area"] / df["Catch_area"]
# 'Ffor' is ratio of forest:catchment area
df["Ffor"] = df["Forest_area"] / df["Catch_area"]
# 'Nimm' is the long-term annual immobilisation (accumulation) rate of N
df["Nimm"] = df["Ni"] * (1 - df["CLrat"])
# If 'Lake_area' is 0, SN and SS should be zero, else use defaults of
# 5 and 0.5, respectively
df.loc[df["CLrat"] == 0, "SN"] = 0
df.loc[df["CLrat"] == 0, "SS"] = 0
# Present-day sum of sea-salt corrected base cation concentrations
# NB: K was not included in the original workflow before 20.3.2015
df["BCt"] = df["Ca"] + df["Mg"] + df["Na"] + df["K"]
# Calculate BC0 using F-Factor method
# This was used before 2005 - 2006, but is not relevant at present
df["SO40"] = 3 + 0.17 * df["BCt"]
df["Ffac"] = np.sin((np.pi / 2) * df["Runoff"] * df["BCt"] * (1 / 400))
df["BC0_Ffac"] = df["BCt"] - (df["Ffac"] * (df["SO4"] - df["SO40"] + df["NO3N"]))
# Calculate BC0 using regression
# This is the current approach. Note the following:
# - Prior to 20.3.2015, the equation BC0 = 0.1936 + 1.0409*BCt was used
# This is incorrect - the x and y axes were swapped
# - The correct equation is BC0 = 0.9431*BCt + 0.2744. Note, however, that
# the intercept term is not significant and should probably be omitted
df["BC0"] = 0.9431 * df["BCt"] + 0.2744
# Calculate BC0 from MAGIC output (if provided)
df["BC0_magic"] = df["Ca_magic"] + df["Mg_magic"] + df["Na_magic"] + df["K_magic"]
# Lake retention factors for N and S
df["rhoS"] = df["SS"] / (df["SS"] + (df["Runoff"] / df["CLrat"]))
df["rhoN"] = df["SN"] / (df["SN"] + (df["Runoff"] / df["CLrat"]))
df.loc[df["CLrat"] == 0, "rhoS"] = 0 # If 'CLrat' is 0, rhoS is 0
df.loc[df["CLrat"] == 0, "rhoN"] = 0 # If 'CLrat' is 0, rhoN is 0
# Lake transmission factors. For N, takes into account what is lost to denitrification
# before the N reaches the lake
df["alphaS"] = 1 - df["rhoS"]
df["alphaN"] = (1 - (df["Fde"] * (1 - df["CLrat"]))) * (1 - df["rhoN"])
# beta1 is the fraction of N available for uptake by the forest
df["beta1"] = df["Ffor"] * (1 - df["Fde"]) * (1 - df["rhoN"])
# beta2 the fraction of N available for immobilisation
df["beta2"] = (1 - df["Fde"]) * (1 - df["CLrat"]) * (1 - df["rhoN"])
# Calculate ANC limits, CLAs and CLmax & CLmin (using BC0, BC0_Ffac and BC0_magic, both with and without
# corrections for organic acids)
df["CLminN"] = ((df["beta1"] * df["Nupt"]) + (df["beta2"] * df["Nimm"])) / df[
"alphaN"
]
for bc0_method in ["BC0", "BC0_Ffac", "BC0_magic"]:
df = calc_anclimit_cla_clminmax(df, bc0_method)
# Rename columns to reflect unit and sea-salt corrections
df.rename(
{
"Ca": "ECax",
"Cl": "ECl",
"Mg": "EMgx",
"Na": "ENax",
"SO4": "ESO4x",
"NO3N": "ENO3",
"K": "EKx",
},
inplace=True,
axis="columns",
)
# Columns of interest for output (with units)
col_dict = {
"Nimm": "meq/m2/yr",
"Nupt": "meq/m2/yr",
"rhoN": "",
"Ffor": "",
"CLrat": "",
"BC0": "ueq/l",
"BC0_Ffac": "ueq/l",
"BC0_magic": "ueq/l",
"ANClimit": "ueq/l",
"ANClimitOAA": "ueq/l",
"ANClimit_Ffac": "ueq/l",
"ANClimitOAA_Ffac": "ueq/l",
"ANClimit_magic": "ueq/l",
"ANClimitOAA_magic": "ueq/l",
"CLA": "meq/m2/yr",
"CLAOAA": "meq/m2/yr",
"CLA_Ffac": "meq/m2/yr",
"CLAOAA_Ffac": "meq/m2/yr",
"CLA_magic": "meq/m2/yr",
"CLAOAA_magic": "meq/m2/yr",
"CLminN": "meq/m2/yr",
"CLmaxN": "meq/m2/yr",
"CLmaxNoaa": "meq/m2/yr",
"CLmaxN_Ffac": "meq/m2/yr",
"CLmaxNoaa_Ffac": "meq/m2/yr",
"CLmaxN_magic": "meq/m2/yr",
"CLmaxNoaa_magic": "meq/m2/yr",
"CLmaxS": "meq/m2/yr",
"CLmaxSoaa": "meq/m2/yr",
"CLmaxS_Ffac": "meq/m2/yr",
"CLmaxSoaa_Ffac": "meq/m2/yr",
"CLmaxS_magic": "meq/m2/yr",
"CLmaxSoaa_magic": "meq/m2/yr",
"Runoff": "m/yr",
"ENO3_flux": "meq/m2/yr",
"ECax": "ueq/l",
"ECl": "ueq/l",
"EMgx": "ueq/l",
"ENax": "ueq/l",
"ESO4x": "ueq/l",
"ENO3": "ueq/l",
"EKx": "ueq/l",
"TOC": "mg/l",
}
df = df[["Region_id"] + list(col_dict.keys())]
cols_units = ["Region_id"] + [
f"{i}_{col_dict[i]}" for i in col_dict.keys()
] # Add units to header
df.columns = cols_units
if out_csv:
df.to_csv(out_csv, index=False)
return df
def rasterise_water_critical_loads(
eng,
out_fold,
cell_size=120,
bc0="BC0",
req_df=None,
opt_df=None,
mag_df=None,
df_to_csv=False,
):
"""Creates rasters of key critical loads parameters:
'claoaa', 'eno3', 'clminn', 'clmaxnoaa', 'clmaxsoaa', 'clmins',
'anclimit', 'anclimit_oaa', 'bc0'
using the specified water chemistry, model parameters per BLR grid square, and BC0 method.
Args:
eng: Obj. Valid connection object for the 'critical_loads' database
out_fold: Raw str. Folder in which to save grids
cell_size: Int. Resolution of output rasters
bc0: Str. BC0 method to use. One of ['BC0', 'BC0_Ffac', 'BC0_magic']
req_df: Dataframe of required parameters or None
opt_df: Dataframe of optional parameters or None
mag_df: Dataframe of magic parameters or None
df_to_csv: Bool. If True, the dataframe of critical loads is also written to
'out_fold/critical_loads.csv'
Returns:
None. The rasters are written to the specified folder.
"""
import os
import pandas as pd
import geopandas as gpd
import nivapy3 as nivapy
assert bc0 in [
"BC0",
"BC0_Ffac",
"BC0_magic",
], "'bc0' must be one of ['BC0', 'BC0_Ffac', 'BC0_magic']."
if (mag_df is not None) and (bc0 != "BC0_magic"):
print(
f"WARNING: You have supplied 'mag_df', but BC0 is '{bc0}'. Magic parameters will be ignored.\n"
" Are you sure you don't mean 'BC0_magic' instead?\n"
)
if (bc0 == "BC0_magic") and (mag_df is None):
raise ValueError(
"To use 'BC0_magic' you must supply 'mag_df'. This probably means filling in the Excel template."
)
# Read parameters table from db
par_df = pd.read_sql(
"SELECT id as parameter_id, name, class FROM water.parameter_definitions",
eng,
)
if req_df is None:
# Read data from db
req_df = pd.read_sql("SELECT * FROM water.blr_required_parameters", eng)
# Restructure
req_df = pd.merge(req_df, par_df, how="left", on="parameter_id")
del req_df["parameter_id"], req_df["class"]
req_df = req_df.pivot(index="region_id", columns="name", values="value")
req_df.index.name = "Region_id"
req_df.reset_index(inplace=True)
req_df.columns.name = ""
if opt_df is None:
# Create empty dataframe with correct cols 'optional' parameters
# (There are not used in the calculations below, but are expected by the CL function)
opt_cols = list(par_df[par_df["class"] == "optional"]["name"].values)
opt_df = pd.DataFrame(columns=["Region_id"] + opt_cols)
if mag_df is None:
# Create empty dataframe with correct cols 'magic' parameters
# (There are not used in the calculations below, but are expected by the CL function)
mag_cols = list(par_df[par_df["class"] == "magic"]["name"].values)
mag_df = | pd.DataFrame(columns=["Region_id"] + mag_cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 15:31:40 2020
@author: josea
"""
import os
import re
import random
import unicodedata
import pandas as pd
from itertools import product
random.seed(404)
path = os.getcwd()
os.chdir(path)
file_path = "../../data/"
target_path = "../../processed_data/"
esp = pd.read_csv(os.path.join(file_path, 'ESP.csv'), encoding='utf-8')
eng = pd.read_csv(os.path.join(file_path, 'ENG.csv'), sep=',',
encoding='utf-8')
eus = pd.read_csv(os.path.join(file_path, 'EUS.txt'), sep='\t', header=None)
eus.columns = ['spelling', 'freq']
eus['len'] = eus['spelling'].apply(len)
# %% Normalizing eus data
eus = eus[(eus.freq > eus.freq.quantile(q=0.5))]
esp = esp[(esp.zipf > esp.zipf.quantile(q=0.5))]
eng = eng[(eng.ZipfUS > eng.ZipfUS.quantile(q=0.5))]
esp = esp[(esp.len >= 3) & (esp.len <= 10)]
eng = eng[(eng.Length >= 3) & (eng.Length <= 10)]
eus = eus[(eus.len >= 3) & (eus.len <= 10)]
def preprocess(st):
st = ''.join(c for c in unicodedata.normalize('NFD', st)
if unicodedata.category(c) != 'Mn')
st = re.sub(r"[^a-zA-Z]", r"", st)
return st.lower()
esp_words = list(set([preprocess(st) for st in esp.spelling]))
eng_words = list(set([preprocess(st) for st in eng.spelling]))
eus_words = list(set([preprocess(st) for st in eus.spelling]))
def editDistance(word1, word2):
'''
Return minimum number of edits required to transform word1 into word2
Edits include: deletion, insertion, replacement
Uses memoization to speed up the process
'''
n1, n2 = len(word1), len(word2)
memo = [[0]*(n2) for _ in range(n1)]
def minDist(i, j):
if i < 0:
return j+1
if j < 0:
return i+1
if memo[i][j]:
return memo[i][j]
if word1[i] == word2[j]:
memo[i][j] = minDist(i-1, j-1)
return memo[i][j]
memo[i][j] = 1 + min(minDist(i, j-1),
minDist(i-1, j),
minDist(i-1, j-1))
return memo[i][j]
return minDist(n1-1, n2-1)
def get_num_cognates(vocab1, vocab2):
cognates = 0
for w1, w2 in product(vocab1, vocab2):
if editDistance(w1, w2) == 1:
cognates += 1
return cognates
# print(get_num_cognates(esp_words, eng_words))
# print(get_num_cognates(esp_words, eus_words))
# print(get_num_cognates(eus_words, eng_words))
random.shuffle(esp_words)
random.shuffle(eng_words)
random.shuffle(eus_words)
esp_words = esp_words[:min(len(esp_words), len(eng_words), len(eus_words))]
eng_words = eng_words[:min(len(esp_words), len(eng_words), len(eus_words))]
eus_words = eus_words[:min(len(esp_words), len(eng_words), len(eus_words))]
idx1 = int(len(esp_words)*0.8)
idx2 = int(len(esp_words)*0.1)
idx3 = int(len(esp_words)*0.1)
idx1 += len(esp_words) - idx1 - idx2 - idx3
assert idx1 + idx2 + idx3 == len(esp_words)
# %% First dataset
data = | pd.DataFrame(columns=['data', 'label', 'split']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Library of functions used for identifying mechanically coupled nuclei in tissues
Available Functions:
vis_mechanically_coupled_regions: Plot the clusters
clustering_mech_coupled_regions: Spatially cluster ellongated nuclei.
clusterability:Summarise the clustering results
cluster_membership_occupancy: Characterise the number, area and density of nuclei in a cluster
cluster_angular_homogenity:Characterise the dispersion of angles within clusters
tissue_angular_homogenity: Characterise the dispersion of angles within clusters
cluster_spatial_positioning:Characterise relative distances between identified clusters
"""
# import libraries
from sklearn.cluster import DBSCAN
from skimage import measure
from math import degrees, sqrt
import numpy as np
import matplotlib.pyplot as plt
from itertools import groupby
from statistics import median
from tifffile import imread
import pandas as pd
import sklearn.metrics as sklm
import scipy.spatial as ss
class Clusterability_Features:
def __init__(self,dbscn_output = np.zeros(shape=(6,))):
self.Number_nuclei_in_tissue = dbscn_output[0]
self.Number_of_ellongated_nuclei_in_tissue = dbscn_output[1]
self.Number_of_clusters = dbscn_output[2]
self.Number_of_ellongated_nuclei_unclusted = dbscn_output[3]
self.Number_of_ellongated_nuclei_clustered = dbscn_output[4]
self.Silohhouette_score = dbscn_output[5]
class Cluster_Membership_Features:
def __init__(self,membership = np.zeros(shape=(8,))):
self.Median_num_cluster_members = membership[0]
self.Min_num_cluster_members = membership[1]
self.Max_num_cluster_members = membership[2]
self.StdDev_num_cluster_members = membership[3]
self.CV_num_cluster_members = membership[4]
self.CD_num_cluster_members = membership[5]
self.IQR_num_cluster_members = membership[6]
self.Q_CD_num_cluster_members = membership[7]
class Cluster_Area_Features:
def __init__(self,chull_ad = np.zeros(shape=(9,))):
self.Total_cluster_area = chull_ad[0]
self.Median_cluster_area = chull_ad[1]
self.Min_cluster_area = chull_ad[2]
self.Max_cluster_area = chull_ad[3]
self.StdDev_cluster_area = chull_ad[4]
self.CV_cluster_area = chull_ad[5]
self.CD_cluster_area = chull_ad[6]
self.IQR_cluster_area = chull_ad[7]
self.Q_CD_cluster_area = chull_ad[8]
class Cluster_Density_Features:
def __init__(self,chull_ad = np.zeros(shape=(8,))):
self.Median_cluster_dens = chull_ad[0]
self.Min_cluster_dens = chull_ad[1]
self.Max_cluster_dens = chull_ad[2]
self.StdDev_cluster_dens = chull_ad[3]
self.CV_cluster_dens = chull_ad[4]
self.CD_cluster_dens = chull_ad[5]
self.IQR_cluster_dens = chull_ad[6]
self.Q_CD_cluster_dens = chull_ad[7]
class Cluster_Angular_Homogeneity:
def __init__(self,cangl = np.zeros(shape=(15,))):
self.Med_StdDev_angles_cluster = cangl[0]
self.Med_CV_angles_cluster = cangl[1]
self.Med_CD_angles_cluster = cangl[2]
self.Med_IQR_angles_cluster = cangl[3]
self.Med_Q_CD_angles_cluster = cangl[4]
self.SD_StdDev_angles_cluster = cangl[5]
self.SD_CV_angles_cluster = cangl[6]
self.SD_CD_angles_cluster = cangl[7]
self.SD_IQR_angles_cluster = cangl[8]
self.SD_Q_CD_angles_cluster = cangl[9]
self.StdDev_angles_noise = cangl[10]
self.CV_angles_noise = cangl[11]
self.CD_angles_noise = cangl[12]
self.IQR_angles_noise = cangl[13]
self.Q_CD_angles_noise = cangl[14]
class Tissue_Angular_Homogeneity:
def __init__(self,tangl = np.zeros(shape=(10,))):
self.StdDev_nuc_angles = tangl[0]
self.CV_nuc_angles = tangl[1]
self.CD_nuc_angles = tangl[2]
self.IQR_nuc_angles = tangl[3]
self.Q_CD_nuc_angles = tangl[4]
self.StdDev_elg_nuc_angles = tangl[5]
self.CV_elg_nuc_angles = tangl[6]
self.CD_elg_nuc_angles = tangl[7]
self.IQR_elg_nuc_angles = tangl[8]
self.Q_CD_elg_nuc_angles = tangl[9]
class Cluster_Relative_Distances:
def __init__(self, cdist = np.zeros(shape=(16,))):
self.Median_bb_cluster_dist = cdist[0]
self.Min_bb_cluster_dist = cdist[1]
self.Max_bb_cluster_dist = cdist[2]
self.StdDev_bb_cluster_dist = cdist[3]
self.CV_bb_cluster_dist = cdist[4]
self.CD_bb_cluster_dist = cdist[5]
self.IQR_bb_cluster_dist = cdist[6]
self.Q_CD_bb_cluster_dist = cdist[7]
self.Median_cc_cluster_dist = cdist[8]
self.Min_cc_cluster_dist = cdist[9]
self.Max_cc_cluster_dist = cdist[10]
self.StdDev_cc_cluster_dist = cdist[11]
self.CV_cc_cluster_dist = cdist[12]
self.CD_cc_cluster_dist = cdist[13]
self.IQR_cc_cluster_dist = cdist[14]
self.Q_CD_cc_cluster_dist = cdist[15]
def distribution_statistics(feat):
"""
Function takes in an array and returns some central and dispersion measures
Outputs in order 1.median, 2.min, 3.max, 4.standard deviation, 5.Coefficient of variation (Std/Median),
6.Coefficient of dispersion (Var/Median),7.Interquantile range and 8.Quartile coeefficient of dispersion
"""
return [np.median(feat),np.min(feat),np.max(feat),np.std(feat),
np.std(feat)/abs(np.median(feat)), (np.var(feat))/abs(np.median(feat)),
np.subtract(*np.percentile(feat, [75, 25])),
np.subtract(*np.percentile(feat, [75, 25]))/np.add(*np.percentile(feat, [75, 25]))]
def clusterability(features,data):
"""Function to summarise the clustering results
Args:
features: Nuclear properties
data: results of clustering
"""
#Get together features describing clustering results
n_nuclei=len(features['label'])
n_ellongated_nuc=len(data['label'])
n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice
n_uncoupled_nuclei = list(data['clusters']).count(-1) # noise
n_coupled_nuclei = len(data['clusters'])-n_uncoupled_nuclei
if n_clusters>=2:
#calculate clustering robustness without noise
Silohhouette_score= sklm.silhouette_score(data.drop(['clusters','label'],axis=1)[data['clusters']> -1], data['clusters'][data['clusters']> -1])
else:
Silohhouette_score = 'NA'
basic_clustering_features = [Clusterability_Features([n_nuclei,n_ellongated_nuc,n_clusters, n_uncoupled_nuclei,n_coupled_nuclei,Silohhouette_score])]
basic_clustering_features = pd.DataFrame([o.__dict__ for o in basic_clustering_features])
return basic_clustering_features
def tissue_angular_homogenity(features,ell_threshold = 0.9):
"""Function to characterise the dispersion of angles within clusters
Args:
features: Nuclear properies; orientation and eccentricity
"""
ecc=features['eccentricity']
angles=np.vectorize(degrees)(features['orientation'])
angles=np.where(angles > 0, angles, abs(angles)+90)
#Filter to get only elongated nuclei
relevant_angles=(angles)[ecc > ell_threshold]
#Measuring orientation dispersion of all nuclei in the tissue
(_,_,_,
std_orientation_tissue,CV_orientation_tissue,CD_orientation_tissue,
IQR_orientation_tissue,Quartile_CD_orientation_tissue)= distribution_statistics(angles)
#Measuring orientation dispersion of only ellongated nuclei in the tissue
(_,_,_,
std_orientation_elong,CV_orientation_elong,CD_orientation_elong,
IQR_orientation_elong,Quartile_CD_orientation_elong)= distribution_statistics(relevant_angles)
t_angles=[Tissue_Angular_Homogeneity([std_orientation_tissue,CV_orientation_tissue,
CD_orientation_tissue,IQR_orientation_tissue,
Quartile_CD_orientation_tissue,
std_orientation_elong,CV_orientation_elong,
CD_orientation_elong,IQR_orientation_elong,
Quartile_CD_orientation_elong])]
t_angles = pd.DataFrame([o.__dict__ for o in t_angles])
return t_angles
def cluster_angular_homogenity(data):
"""Function to characterise the dispersion of angles within clusters
Args:
data: Results of clustering: centroids, angles and cluster membership
"""
n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice
if n_clusters <1:
#Setting cluster angluar features to default
c_angles=[Cluster_Angular_Homogeneity()]
c_angles = pd.DataFrame([o.__dict__ for o in c_angles])
elif n_clusters >=1:
#Summarizing dispersion statistics of the orientations of the clustered nuclei
# For each cluster and at the tissue level (elongated and all) we measure the disperision statistics
#Measuring clusterwise orientation dispersion
std_orientation=data.groupby('clusters')['angles'].std().array
CV_orientation=data.groupby('clusters')['angles'].std().array/abs(data.groupby('clusters')['angles'].median().array)
CD_orientation=(data.groupby('clusters')['angles'].var().array)/abs(data.groupby('clusters')['angles'].median().array)
IQR_orientation=(data.groupby('clusters')['angles'].quantile(.75).array-data.groupby('clusters')['angles'].quantile(.25).array)
Quartile_CD_orientation=((data.groupby('clusters')['angles'].quantile(.75).array-data.groupby('clusters')['angles'].quantile(.25).array))/(((data.groupby('clusters')['angles'].quantile(.75).array+data.groupby('clusters')['angles'].quantile(.25).array)))
c_angles=[Cluster_Angular_Homogeneity([np.median(np.delete(std_orientation,0)),
np.median(np.delete(CV_orientation,0)),
np.median(np.delete(CD_orientation,0)),
np.median(np.delete(IQR_orientation,0)),
np.median(np.delete(Quartile_CD_orientation,0)),
np.std(np.delete(std_orientation,0)),
np.std(np.delete(CV_orientation,0)),
np.std(np.delete(CD_orientation,0)),
np.std(np.delete(IQR_orientation,0)),
np.std(np.delete(Quartile_CD_orientation,0)),
std_orientation[0],CV_orientation[0],
CD_orientation[0],IQR_orientation[0],
Quartile_CD_orientation[0]])]
c_angles = pd.DataFrame([o.__dict__ for o in c_angles])
return c_angles
def cluster_spatial_positioning(data):
"""Function to characterise relative distances between identified clusters
Args:
data: Results of clustering: centroids, angles and cluster membership
"""
n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice
if n_clusters <2:
#Setting cluster angluar features to default
cdist=[Cluster_Relative_Distances()]
cdist = pd.DataFrame([o.__dict__ for o in cdist])
elif n_clusters >=2:
# Here we implement two approaches for measuring distances between clustes:
# (1) border-boder distances and (2) centroid-centroid distances.
# We compute dispersion measures for the distances obtained.
d = dict(tuple(data.groupby('clusters')))
d.pop(-1, None)
min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]),
np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])
min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)
cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),
np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))
cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)
(avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,
std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,
IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)
(avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,
std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,
IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)
cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,
std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,
IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,
avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,
std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,
IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]
cdist = pd.DataFrame([o.__dict__ for o in cdist])
return cdist
def cluster_membership_occupancy(data):
"""Function to characterise the number, area and density of nuclei in a cluster
Args:
data: Results of clustering: centroids, angles and cluster membership
"""
n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice
if n_clusters == 0:
membership=[Cluster_Membership_Features()]
membership = pd.DataFrame([o.__dict__ for o in membership])
areas=[Cluster_Area_Features()]
areas = pd.DataFrame([o.__dict__ for o in areas])
density=[Cluster_Density_Features()]
density = | pd.DataFrame([o.__dict__ for o in density]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import requests
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
MAX_RETRY = 5
PDF_PATH = 'mizoram_pdfs'
def download_file(url, target):
"""Download file from url and save to the target
"""
# Streaming, so we can iterate over the response.
r = requests.get(url, stream=True)
if r.status_code == 200:
chunk_size = (64 * 1024)
# Total size in bytes.
total_size = int(r.headers.get('content-length', 0)) / chunk_size
total_size += 1
with open(target, 'wb') as f:
for data in tqdm(r.iter_content(chunk_size), total=total_size,
unit_scale=chunk_size/1024, unit='KB'):
f.write(data)
return True
else:
print("ERROR: status_code={0:d}".format(r.status_code))
return False
def scrape_mizoram():
"""Scrape Electoral Rolls for Mizoram
Download and save all PDF files and out the metadata to mizoram.csv with
the following columns:-
district, leg_assembly, polling_station_name, file_name
"""
rows = []
r = requests.get('http://ceomizoram.nic.in/ElectoralRollPDF.html')
if r.status_code == 200:
soup = BeautifulSoup(r.content, 'html.parser')
i = 1
for dist in soup.select('h4.panel-title'):
data = {'district': dist.text.strip()}
for leg in soup.select('ul#myTab{0:d} li'.format(i)):
data['leg_assembly'] = leg.text.strip()
_id = leg.find('a')['href'][1:]
for a in soup.select('div#{0:s} a'.format(_id)):
data['polling_station_name'] = a.text.strip()
url = 'http://ceomizoram.nic.in/' + a['href']
print(url)
pdf_fn = url.split('/')[-1]
pdf_path = os.path.join(PDF_PATH, pdf_fn)
data['file_name'] = pdf_path
if not os.path.exists(pdf_path):
retry = 0
while retry < MAX_RETRY:
try:
download_file(url, pdf_path)
break
except Exception as e:
print("WARN: {0!s}".format(e))
print("Retry again in 5s...")
retry += 1
time.sleep(5)
rows.append(data.copy())
#break
#break
i += 1
#break
df = | pd.DataFrame(rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Main module."""
import pandas as pd
import numpy as np
from scipy import stats, linalg
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.lines as mlines
import matplotlib.gridspec as gridspec
from matplotlib.offsetbox import AnchoredText
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import seaborn as sns
import warnings
from .visualization import Visualize
from .clustering import Cluster
class Rm(Visualize, Cluster):
"""Random Matrix Theory Analysis of Principal Components
Parameters
----------
eigen_solver: string {'wishart'}. Find eigenvalue and eigenvectors
wishart: Compute wishart matrix and find eigenvalues and eigenvectors
of the Wishart matrix
tol: float >= 0, optional (default .0)
Tolerance for singular values computed by svd_solver == 'arpack'.
preprocessing: string {'sc', 'False'}
sc: run single cell preprocessing
False: skip preprocessing
random_states: int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_: array, shape (n_components, n_genes)
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_: array, shape (n_components,)
The amount of variance explained by each of the selected components.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
explained_variance_ratio_: array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_: array, shape (n_features,)
Per-gene empirical mean, estimated from the original set.
noise_variance_: float
The estimated noise covariance
cell_names: list of cell names
gene_names: list of gene names
X: 2D array
Preprocessed single cell matrix
n_cells: int
number of cells
n_genes: int
number of genes
L: list
Eigenvalues of the Wishart matrix
V: 2D array
Eigenvectors of the Wishart matrix
Lr: list
Eigenvalues of the Wishart randomized matrix
Vr: 2D array
Eigenvectors of the Wishart randomized matrix
L_mp: list
Estimated Marchenko - Pastur Eigenvalues
lambda_c: float
Critical eigenvalues estimated by Tracy - Widom distribution
n_components: int
Number of components above the Marchenko - Pastur distribution
gamma: float
Estimated gamma of the Marchenko - Pastur distribuiton
sigma: float
Estimated sigma of the Marchenko - Pastur distribution
b_plus: float
Estimated upper bound of the Marchenko - Pastur distribution
b_minus: float
Estimated lower bound of the Marchenko - Pastur distribution
"""
def __init__(self, tol=0.0, random_state=None):
self.eigen_solver = 'wishart'
self.tol = tol
self.random_state = random_state
self.n_cells = 0
self.n_genes = 0
self.cell_names = []
self.gene_names = []
self.normal_genes = []
self.normal_cells = []
self.L = []
self.V = None
self.X = None
self.X2 = None
self.X3 = None
self.Lr = []
self.Vr = None
self.explained_variance_ = []
self.total_variance_ = []
self.L_mp = []
self.lambda_c = 0
self.n_components = 0
self.gamma = 0
self.p = 0
self.sigma = 0
self.x_peak = 0
self.b_plus = 0
self.b_minus = 0
self._preprocessing_flag = False
self._selection_flag = True
self._preprocessing2_flag = False
self.X_vis = None
def style(self):
params = {'legend.fontsize': 'medium',
'axes.labelsize': 'large',
'axes.titlesize': 'medium',
'xtick.labelsize': 'medium',
'ytick.labelsize': 'medium',
'figure.dpi': 100,
'axes.linewidth': 0.5,
}
np.seterr(invalid='ignore')
sns.set_style("white")
plt.rcParams.update(params)
def style_mp_stat(self):
plt.style.use("ggplot")
np.seterr(invalid='ignore')
sns.set_style("white")
sns.set_context("paper")
sns.set_palette("deep")
plt.rcParams['axes.linewidth'] = 0.5
plt.rcParams['figure.dpi'] = 100
def fit(self, df=None, eigen_solver='wishart'):
"""Fit RM model
Parameters
----------
df: Pandas dataframe, shape (n_cells, n_genes)
where n_cells in the number of cells
and n_genes is the number of genes.
-------
self: object
Returns the instance itself.
"""
if self._preprocessing_flag:
print("Preprocessed data is being used for fitting")
else:
print('Data has not been preprocessed')
detected_genes = df.columns[(np.sum(df.values, axis=0) > 0)]
detected_cells = df.index[(np.sum(df.values, axis=1) > 0)]
df = df.loc[detected_cells, detected_genes]
self.X = df.values
self.n_cells = df.shape[0]
self.n_genes = df.shape[1]
self.normal_genes = df.columns
self.normal_cells = df.index
self.X3 = pd.DataFrame(self.X, index=self.normal_cells,
columns=self.normal_genes)
self._fit()
return
def _fit(self):
"""Fit the model for the dataframe df and apply
the dimensionality reduction by removing the eigenvalues
that follow Marchenko - Pastur distribution
"""
self.mean_ = np.mean(self.X, axis=0)
self.std_ = np.std(self.X, axis=0, ddof=0)
self.X = (self.X-self.mean_) / (self.std_+0.0)
"""Dispatch to the right submethod depending on
the chosen solver"""
if self.eigen_solver == 'wishart':
Y = self._wishart_matrix(self.X)
(self.L, self.V) = self._get_eigen(Y)
Xr = self._random_matrix(self.X)
Yr = self._wishart_matrix(Xr)
(self.Lr, self.Vr) = self._get_eigen(Yr)
self.explained_variance_ = (self.L**2) / (self.n_cells)
self.total_variance_ = self.explained_variance_.sum()
self.L_mp = self._mp_calculation(self.L, self.Lr)
self.lambda_c = self._tw()
self.peak = self._mp_parameters(self.L_mp)['peak']
else:
print('''Solver is undefined, please use
Wishart Matrix as eigenvalue solver''')
self.Ls = self.L[self.L > self.lambda_c]
Vs = self.V[:, self.L > self.lambda_c]
self.Vs = Vs
noise_boolean = ((self.L < self.lambda_c) & (self.L > self.b_minus))
Vn = self.V[:, noise_boolean]
self.Ln = self.L[noise_boolean]
self.n_components = len(self.Ls)
Vna = self.Vr[:, len(self.Lr)//2
- self.n_components//2: len(self.Lr)//2
+ self.n_components//2
+ (self.n_components) % 2]
signal_projected_genes = self._project_genes(self.X, Vs)
random_projected_genes = self._project_genes(self.X, Vna)
noise_left_projected_genes = self._project_genes(
self.X, Vn[:, :self.n_components])
noise_right_projected_genes = self._project_genes(
self.X, Vn[:, -self.n_components:])
noise_projected_genes = self._project_genes(self.X, Vn)
# print(noise_right_projected_genes)
self._s = np.square(signal_projected_genes).sum(axis=1)
self._sa = np.square(random_projected_genes).sum(axis=1)
self._snl = np.square(noise_left_projected_genes).sum(axis=1)
self._snr = np.square(noise_right_projected_genes).sum(axis=1)
self.components_genes = dict()
for j in range(self.n_components):
self.components_genes[j] = np.array(self.normal_genes)[
np.square(signal_projected_genes[:, -j - 1])
> 10 * np.max(np.square(noise_projected_genes),
axis=1)
]
self.X = np.dot(np.dot(Vs, Vs.T),
self.X)
# X = U S V^T= U(V S)^T = U (X^T U)^T = U U^T X ~ Us Us^T X
def return_cleaned(self,
fdr=None,
sample_variance=None,
path=False):
''' Method returns the dataframe with denoised single
cell data if fdr == True, return method returns structure
genes up to the fdr level
Parameters
----------
path: string
Path to save the plot
fdr_cut: float
Returns
-------
object: Pandas DataFrame shape(n_cells, n_genes)
Cleaned matrix
'''
self._selection_flag = True
if (fdr == 1) or (sample_variance == 0):
return self.X3
else:
genes = self.select_genes(fdr, sample_variance)
if self._selection_flag:
if path:
pd.DataFrame(self.X, index=self.normal_cells,
columns=self.normal_genes).loc[:, genes]\
.applymap(lambda x: round(x, 3)).to_csv(path, sep='\t')
return pd.DataFrame(self.X, index=self.normal_cells,
columns=self.normal_genes).loc[:, genes]
else:
print('''Criterium for selection of genes undefined,
please select FDR < 1 OR sample variance > 0''')
def _to_tpm(self, X):
'''Transform transcripts to transcripts per million'''
return np.transpose(np.transpose(X) /
np.sum(X, axis=1) * 10**(6)
)
def _tw(self):
'''Tracy-Widom critical eignevalue'''
gamma = self._mp_parameters(self.L_mp)['gamma']
p = len(self.L) / gamma
sigma = 1 / np.power(p, 2/3) * np.power(gamma, 5/6) * \
np.power((1 + np.sqrt(gamma)), 4/3)
lambda_c = np.mean(self.L_mp) * (1 + np.sqrt(gamma)) ** 2 + sigma
self.gamma = gamma
self.p = p
self.sigma = sigma
return lambda_c
def _wishart_matrix(self, X):
"""Compute Wishart Matrix of the cells"""
return np.dot(X, X.T) / X.shape[1]
def _random_matrix(self, X):
return np.apply_along_axis(np.random.permutation, 0, X)
def _get_eigen(self, Y):
"""Compute Eigenvalues of the real symmetric matrix"""
(L, V) = linalg.eigh(Y)
return (L, V)
def _mp_parameters(self, L):
"""Compute Parameters of the Marchenko
Pastur Distribution of eigenvalues L"""
moment_1 = np.mean(L)
moment_2 = np.mean(np.power(L, 2))
gamma = moment_2 / float(moment_1**2) - 1
s = moment_1
sigma = moment_2
b_plus = s * (1 + np.sqrt(gamma))**2
b_minus = s * (1 - np.sqrt(gamma))**2
x_peak = s * (1.0-gamma)**2.0 / (1.0+gamma)
dic = {'moment_1': moment_1,
'moment_2': moment_2,
'gamma': gamma,
'b_plus': b_plus,
'b_minus': b_minus,
's': s,
'peak': x_peak,
'sigma': sigma
}
return dic
def _marchenko_pastur(self, x, dic):
'''Distribution of eigenvalues'''
pdf = np.sqrt((dic['b_plus'] - x) * (x-dic['b_minus']))\
/ float(2 * dic['s'] * np.pi * dic['gamma'] * x)
return pdf
def _mp_pdf(self, x, L):
'''Marchnko-Pastur PDF'''
vfunc = np.vectorize(self._marchenko_pastur)
y = vfunc(x, self._mp_parameters(L))
return y
def _mp_calculation(self, L, Lr, eta=1, eps=10**-6, max_iter=1000):
converged = False
iter = 0
loss_history = []
b_plus = self._mp_parameters(Lr)['b_plus']
b_minus = self._mp_parameters(Lr)['b_minus']
L_updated = L[(L > b_minus) & (L < b_plus)]
new_b_plus = self._mp_parameters(L_updated)['b_plus']
new_b_minus = self._mp_parameters(L_updated)['b_minus']
while not converged:
loss = (1 - float(new_b_plus) / float(b_plus))**2
loss_history.append(loss)
iter += 1
if loss <= eps:
converged = True
elif iter == max_iter:
print('Max interactions exceeded!')
converged = True
else:
gradient = new_b_plus - b_plus
new_b_plus = b_plus + eta * gradient
L_updated = L[(L > new_b_minus) & (L < new_b_plus)]
b_plus = new_b_plus
b_minus = new_b_minus
new_b_plus = self._mp_parameters(L_updated)['b_plus']
new_b_minus = self._mp_parameters(L_updated)['b_minus']
self.b_plus = new_b_plus
self.b_minus = new_b_minus
return L[(L > new_b_minus) & (L < new_b_plus)]
def _project_genes(self, X, V):
'''Return (n_genes, n_components) matrix
of gene projections on components'''
return np.dot(X.T, V)
def _project_cells(self, X, V):
'''Return (n_cells, n_components) matrix
of cell projections on components'''
return np.dot(X, np.dot(X.T, V))
def _get_gene_norm(self, X):
return np.sqrt(np.square(X).sum(axis=0) / X.shape[0])
def plot_mp(self, comparison=True, path=False,
info=True, bins=None, title=None):
"""Plot Eigenvalues, Marchenko - Pastur distribution,
randomized data and estimated Marchenko - Pastur for
randomized data
Parameters
----------
path: string
Path to save the plot
fit: boolean
The data.
fdr_cut: float
Returns
-------
object: plot
"""
self.style_mp_stat()
if bins is None:
#bins = self.n_cells
bins = int(self.n_cells/3.0)
x = np.linspace(0, int(round(np.max(self.L_mp) + 0.5)), 2000)
y = self._mp_pdf(x, self.L_mp)
yr = self._mp_pdf(x, self.Lr)
if info:
fig = plt.figure(dpi=100)
fig.set_tight_layout(False)
ax = fig.add_subplot(111)
else:
plt.figure(dpi=100)
plot = sns.distplot(self.L,
bins=bins,
norm_hist=True,
kde=False,
hist_kws={"alpha": 0.85,
"color": sns.xkcd_rgb["cornflower blue"]
}
)
plot.set(xlabel='First cell eigenvalues normalized distribution')
plt.plot(x, y,
sns.xkcd_rgb["pale red"],
lw=2)
MP_data = mlines.Line2D([], [], color=sns.xkcd_rgb["pale red"],
label='MP for random part in data', linewidth=2)
MP_rand = mlines.Line2D([], [], color=sns.xkcd_rgb["sap green"],
label='MP for randomized data', linewidth=1.5,
linestyle='--')
randomized = mpatches.Patch(color=sns.xkcd_rgb["apple green"],
label='Randomized data', alpha=0.75,
linewidth=3, fill=False)
data_real = mpatches.Patch(color=sns.xkcd_rgb["cornflower blue"],
label='Real data', alpha=0.85)
if comparison:
sns.distplot(self.Lr, bins=30, norm_hist=True,
kde=False,
hist_kws={"histtype": "step", "linewidth": 3,
"alpha": 0.75,
"color": sns.xkcd_rgb["apple green"]}
)
plt.plot(x, yr,
sns.xkcd_rgb["sap green"],
lw=1.5,
ls='--'
)
plt.legend(handles=[data_real, MP_data, randomized, MP_rand],
loc="upper right", frameon=True)
else:
plt.legend(handles=[data_real, MP_data],
loc="upper right", frameon=True)
plt.xlim([0, int(round(max(np.max(self.Lr), np.max(self.L_mp))
+ 1.5))])
plt.grid(b=True, linestyle='--', lw=0.3)
if title:
plt.title(title)
if info:
dic = self._mp_parameters(self.L_mp)
info1 = (r'$\bf{Data Parameters}$' + '\n{0} cells\n{1} genes'
.format(self.n_cells, self.n_genes))
info2 = ('\n' + r'$\bf{MP\ distribution\ in\ data}$'
+ '\n$\gamma={:0.2f}$ \n$\sigma^2={:1.2f}$\
\n$b_-={:2.2f}$\n$b_+={:3.2f}$'
.format(dic['gamma'], dic['s'], dic['b_minus'],
dic['b_plus']))
info3 = ('\n' + r'$\bf{Analysis}$' +
'\n{0} eigenvalues > $\lambda_c (3 \sigma)$\
\n{1} noise eigenvalues'
.format(self.n_components, self.n_cells -
self.n_components))
ks = stats.kstest(self.L_mp, self._call_mp_cdf(self.L_mp, dic))
info4 = '\n'+r'$\bf{Statistics}$'+'\nKS distance ={0}'\
.format(round(ks[0], 4))\
+ '\nKS test p-value={0}'\
.format(round(ks[1], 2))
infoT = info1+info2+info4+info3
props = dict(boxstyle='round', facecolor='wheat', alpha=0.8)
at = AnchoredText(infoT, loc=2, prop=dict(size=10),
frameon=True,
bbox_to_anchor=(1., 1.024),
bbox_transform=ax.transAxes)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
lgd = ax.add_artist(at)
if path:
plt.savefig(path, bbox_extra_artists=(
lgd,), bbox_inches='tight')
else:
if path:
plt.savefig(path)
plt.show()
return
def plot_statistics(self, path=False, fit=True):
"""Plot statistics,
Parameters
----------
path: string
Path to save the plot
fit: boolean
The data.
fdr_cut: float
Returns
-------
object: plot
"""
self.style_mp_stat()
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(dpi=100, figsize=[9, 4.4])
fig.set_tight_layout(False)
ax = plt.subplot(gs[0, 0])
ax0 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
sns.distplot(self._sa,
norm_hist=True,
kde=False,
bins=100,
hist_kws={"alpha": 0.8,
"color": sns.xkcd_rgb["golden yellow"],
"zorder": 1})
sns.distplot(self._s,
norm_hist=True,
kde=False,
bins=200,
hist_kws={"alpha": 0.6,
"color": sns.xkcd_rgb["cerulean"],
"zorder": 3})
sns.distplot(self._snl,
norm_hist=True,
kde=False,
bins=100,
hist_kws={"alpha": 0.55,
"color": sns.xkcd_rgb["leaf green"],
"zorder": 5})
sns.distplot(self._snr,
norm_hist=True,
kde=False,
bins=100,
hist_kws={"alpha": 0.5,
"color": sns.xkcd_rgb["cerise"],
"zorder": 7})
plt.xlim([0, 2 * np.max(self._snr)])
plt.xlabel('Normalized sample variance')
plt.ylabel('Sample variance probability distribution')
hist_sa = mpatches.Patch(color=sns.xkcd_rgb["golden yellow"],
label='{0} random vectors'.format(
self.n_components),
alpha=0.8)
hist_s = mpatches.Patch(color=sns.xkcd_rgb["cerulean"],
label='All {0} signal eigenvectors'.format(
self.n_components),
alpha=0.6)
hist_snl = mpatches.Patch(color=sns.xkcd_rgb["leaf green"],
label='Lowest {0} MP eigenvectors'.format(
self.n_components),
alpha=0.55)
hist_snr = mpatches.Patch(color=sns.xkcd_rgb["cerise"],
label='Largest {0} MP eigenvectors'.format(
self.n_components),
alpha=0.5)
xgr = np.linspace(0, np.max(self._snr)+200, 1000)
y_fdr = np.vectorize(self._fdr)(xgr)
if fit:
xgl = np.linspace(0, np.max(self._snl), 1000)
xk = np.linspace(0, np.max(self._sa), 1000)
xs = np.linspace(0, np.max(self._s) + 0.0, 1000)
fits = self._fit_gamma(self._s)
fitl = self._fit_gamma(self._snl)
fitr = self._fit_gamma(self._snr)
ygl = self._gamma_pdf(xgl, fitl)
ygr = self._gamma_pdf(xgr, fitr)
ys = self._gamma_pdf(xs, fits)
y = stats.chi2.pdf(xk, self.n_components)
plt.ylim(0, max(y)+0.02)
plt.plot(xk, y,
zorder=2,
color=sns.xkcd_rgb["adobe"],
linestyle='--',
linewidth=1.1
)
plt.plot(xgl, ygl,
zorder=6,
color=sns.xkcd_rgb["grassy green"],
linestyle='-',
linewidth=1.5,
)
plt.plot(xgr, ygr,
zorder=8,
color=sns.xkcd_rgb["rose red"],
linestyle='-',
linewidth=1.5
)
plt.plot(xs, ys,
zorder=4,
color=sns.xkcd_rgb["blue blue"],
linestyle='-',
linewidth=1.5
)
line_gammal = mlines.Line2D([], [],
color=sns.xkcd_rgb["grassy green"],
label=r'Gamma PDF: $\alpha ={:0.1f}$, $\beta = {:1.1f}$'
.format(fitl[0], 1/fitl[2]),
linewidth=1.5)
line_gammar = mlines.Line2D([], [],
color=sns.xkcd_rgb["rose red"],
label=r'Gamma PDF: $\alpha ={:0.1f}$,$\beta ={:1.1f}$'
.format(fitr[0], 1 / fitr[2]),
linewidth=1.5)
line_chi = mlines.Line2D([], [],
color=sns.xkcd_rgb["adobe"],
label='Chi-Squared Distribution',
linewidth=1.1,
linestyle='--')
line_gammas = mlines.Line2D([], [],
color=sns.xkcd_rgb["blue blue"],
label=r'Gamma PDF: $\alpha =%.1f$,$\beta = %.2f$'
% (fits[0], 1/fits[2]),
linewidth=1.5)
plt.legend(handles=[hist_s, line_gammas, hist_snr,
line_gammar, hist_snl,
line_gammal, hist_sa, line_chi],
title=r'$\bf{Gene\ projection\ samples}$',
loc="upper right",
frameon=True)
else:
plt.legend(handles=[hist_s, hist_snr, hist_snl, hist_sa],
title=r'$\bf{Gene\ projection\ samples}$',
loc="upper right",
frameon=True)
ax1 = plt.subplot2grid((1, 3), (0, 2))
host = ax1
par = ax1.twinx()
host.set_ylabel("False Discovery Rate")
par.set_ylabel("Number of genes")
p1, = host.plot(xgr, y_fdr[0],
label="False Discovery Rate",
ls='--',
lw=2,
color=sns.xkcd_rgb["pumpkin orange"])
p2, = par.plot(xgr, y_fdr[1],
label="Number of genes",
ls='-.',
lw=1.5,
color=sns.xkcd_rgb["violet blue"])
host.yaxis.get_label().set_color(p1.get_color())
par.yaxis.get_label().set_color(p2.get_color())
line_fdr = mlines.Line2D([], [],
color=sns.xkcd_rgb["pumpkin orange"],
label='Ratio signal vs \nlargest {0} MP\ndistributions'
.format(self.n_components),
linewidth=2,
linestyle='--')
line_genes = mlines.Line2D([], [],
color=sns.xkcd_rgb["violet blue"],
label='Relevant genes',
linewidth=1.5,
linestyle='-.')
ax1.legend(handles=[line_fdr, line_genes],
loc="upper right",
frameon=True)
host.set_yscale("log")
host.grid(b=True, linestyle='--', lw=0.3)
host.set_xlabel('Normalized sample variance')
host.set_ylim(min(y_fdr[0][np.nonzero(y_fdr[0])]), 1.2)
host.set_xlim(-2, max(xgr))
plt.tight_layout()
if path:
plt.savefig(path)
return plt.show()
def _fdr(self, x):
'''Number of genes for the false discovery rate'''
area_noise = len(self._snr[self._snr > x])
area_signal = len(self._s[self._s > x])
if area_signal > 0:
fdr_x = area_noise/float(area_signal)
genes = self._s[self._s > x].tolist()
else:
fdr_x = 0
genes = []
return (fdr_x, len(genes))
def _fdr_genes(self, x):
'''Number of signal genes for the false discovery rate'''
area_noise = len(self._snr[self._snr > x])
area_signal = len(self._s[self._s > x])
fdr_x = area_noise / float(area_signal)
# genes = self._s[self._s > x].tolist()
return (fdr_x, x)
def select_genes(self, fdr=None, sample_variance=None):
if fdr is not None and fdr < 1.0 and sample_variance is None:
xgr = np.linspace(0, np.max(self._snr), 1000)
y_fdr = np.vectorize(self._fdr_genes)(xgr)
idx = np.abs(y_fdr[0] - fdr).argmin()
x = y_fdr[1][idx]
genes = np.array(self.normal_genes)[self._s > x].tolist()
elif sample_variance is not None and \
sample_variance > 0.0 and fdr is None:
genes = np.array(self.normal_genes)[
self._s > sample_variance].tolist()
else:
self._selection_flag = False
genes = np.array(self.normal_genes).tolist()
return genes
def _fit_gamma(self, x):
alpha, loc, beta = stats.gamma.fit(x, loc=0, scale=1)
return (alpha, loc, beta)
def _gamma_pdf(self, x, fits):
alpha, loc, beta = fits
y = stats.gamma(a=alpha, loc=loc, scale=beta).pdf(x)
return y
def _cdf_marchenko(self, x, dic):
if x < dic['b_minus']:
return 0.0
elif x > dic['b_minus'] and x < dic['b_plus']:
return 1/float(2*dic['s']*np.pi*dic['gamma']) *\
float(np.sqrt((dic['b_plus']-x)*(x-dic['b_minus'])) +
(dic['b_plus']+dic['b_minus'])/2*np.arcsin((2*x-dic['b_plus'] -
dic['b_minus'])/(dic['b_plus']-dic['b_minus'])) -
np.sqrt(dic['b_plus']*dic['b_minus'])*np.arcsin(((dic['b_plus'] +
dic['b_minus'])*x-2*dic['b_plus']*dic['b_minus']) /
((dic['b_plus']-dic['b_minus'])*x)))+np.arcsin(1)/np.pi
else:
return 1.0
def _call_mp_cdf(self, L, dic):
"CDF of Marchenko Pastur"
def func(y): return list(map(lambda x: self._cdf_marchenko(x, dic), y))
return func
def _shapiro(self, x):
return stats.shapiro(x)[0]
def refining(self, min_trans_per_gene=15):
if not self._preprocessing2_flag:
raise ValueError('''Cleaning function only works with
after running preprocess function
with option refined=True''')
# low-expressed genes
genesproj = np.dot(self.X3.T, (abs(self.Vr))[:, :]).T
warnings.filterwarnings('once')
shapiro_stat_gen = np.apply_along_axis(
self._shapiro, 0, genesproj[:, :])
warnings.filterwarnings('ignore')
dist_genes = | pd.Series(1-shapiro_stat_gen, index=self.X2.columns) | pandas.Series |
from pelkmans.bounding_box import BoundingBox
import os
import logging
import numpy as np
import pandas as pd
import json
class MPPData:
def __init__(self, metadata, channels, labels, x, y, mpp, mapobject_ids, mcu_ids=None, conditions=None, seed=42):
self.log = logging.getLogger(self.__class__.__name__)
np.random.seed(seed)
self.mapobject_ids = mapobject_ids
self.log.info('creating new MPPData with {}'.format(self.mapobject_ids))
# subset metadata to mapobbject_ids
self.metadata = metadata[metadata.mapobject_id.isin(np.unique(self.mapobject_ids))]
# leave only unique elements
self.metadata = self.metadata.groupby(['mapobject_id_cell'], sort=False, as_index=False).first()
self.channels = channels
self.labels = labels
self.x = x
self.y = y
self.seed = seed
if len(mpp.shape) == 2:
self.mpp = mpp[:,np.newaxis,np.newaxis,:]
else:
self.mpp = mpp
if mcu_ids is None:
mcu_ids = np.zeros(len(self.mapobject_ids), dtype=np.int64)
self.mcu_ids = mcu_ids
if conditions is None:
conditions = np.zeros([len(self.mapobject_ids),1], dtype=np.int64)
self.conditions = conditions
@property
def has_neighbor_data(self):
return (self.mpp.shape[1]!=1) and (self.mpp.shape[2]!=1)
@property
def center_mpp(self):
c = self.mpp.shape[1]//2
#print(self.mpp.shape)
#print(c)
return self.mpp[:,c,c,:]
def __str__(self):
return 'MPPData ({} mpps with shape {} from {} objects)'.format(self.mpp.shape[0], self.mpp.shape[1:], len(self.metadata))
@classmethod
def from_data_dir(cls, data_dir, dir_type='hannah', seed=42):
# read all data from data_dir
if dir_type == 'scott':
well = data_dir.split('/')[-1]
metadata = pd.read_csv(os.path.join(data_dir, '../METADATA/201908-NascentRNA-4i_cyc0-22_plate01_{}_Cells_metadata.csv'.format(well)))
elif dir_type == 'hannah':
metadata = pd.read_csv(os.path.join(data_dir, 'metadata.csv'), index_col=0)
channels = pd.read_csv(os.path.join(data_dir, 'channels.csv'), names=['channel_id', 'name'])
x = np.load(os.path.join(data_dir, 'x.npy'))
y = np.load(os.path.join(data_dir, 'y.npy'))
try:
labels = np.load(os.path.join(data_dir, 'labels.npy'))
except FileNotFoundError as e:
labels = np.zeros(len(x), dtype=np.uint16)
mpp = np.load(os.path.join(data_dir, 'mpp.npy'))
mapobject_ids = np.load(os.path.join(data_dir, 'mapobject_ids.npy'))
# init self
self = cls(metadata=metadata, channels=channels, labels=labels, x=x, y=y, mpp=mpp, mapobject_ids=mapobject_ids, seed=seed)
self.data_dir = data_dir
return self
@classmethod
def concat(cls, objs):
"""concatenate the mpp_data objects by concatenating all arrays and return a new one"""
# channels need to be the same
for mpp_data in objs:
assert (mpp_data.channels.name == objs[0].channels.name).all()
channels = objs[0].channels
# concatenate metadata (pandas)
metadata = pd.concat([mpp_data.metadata for mpp_data in objs], axis=0, ignore_index=True)
# concatenate numpy arrays
labels = np.concatenate([mpp_data.labels for mpp_data in objs], axis=0)
x = np.concatenate([mpp_data.x for mpp_data in objs], axis=0)
y = np.concatenate([mpp_data.y for mpp_data in objs], axis=0)
mpp = np.concatenate([mpp_data.mpp for mpp_data in objs], axis=0)
mapobject_ids = np.concatenate([mpp_data.mapobject_ids for mpp_data in objs], axis=0)
conditions = np.concatenate([mpp_data.conditions for mpp_data in objs], axis=0)
#print(mapobject_ids)
#for mpp_data in objs:
# print('\t', mpp_data.mapobject_ids)
mcu_ids = np.concatenate([mpp_data.mcu_ids for mpp_data in objs], axis=0)
self = cls(metadata=metadata, channels=channels, labels=labels, x=x, y=y, mpp=mpp, mapobject_ids=mapobject_ids, mcu_ids=mcu_ids,
conditions=conditions)
self.log.info('Concatenated several MPPDatas')
return self
def merge_instances(self, objs):
"""
Merge self variables with instances of same class (objs vars).
This method is very similar to classmethod concat. The difference is
that this modify the self instance instead of returning a new one.
The aim of this method is to add support to images and masks.
Input: List containing instances of class MPPData.
Output: None.
"""
for mpp_data in objs:
if not all(mpp_data.channels.name == self.channels.name):
raise Exception('Channels across MPPData instances are not the same!')
if (vars(mpp_data).keys() != vars(self).keys()):
raise Exception('Variables across MPPData instances are not the same!')
# concatenate metadata (pandas)
self.metadata = pd.concat([self.metadata]+[mpp_data.metadata for mpp_data in objs], axis=0, ignore_index=True)
# Concatenate instances variables
instance_vars = {'labels', 'x', 'y', 'mpp', 'mapobject_ids', 'mcu_ids','conditions', 'images', 'images_masks'}
for var in set(vars(self).keys()).intersection(instance_vars):
temp_var = np.concatenate([getattr(self, var)]+[getattr(mpp_data,var) for mpp_data in objs], axis=0)
setattr(self, var, temp_var)
self.log.info('Concatenated several MPPDatas')
def filter_cells(self, filter_criteria=['is_border_cell'], filter_values=[0]):
"""
Filter cells given the desired criteria (from metadata).
Input:
-filter_criteria: list containing the metadata column
names
-filter_values: list with values to be avoided (cutted)
corresponding to filter_criteria entrances
Output:
modify self atributes, so metadata lables, x, y,
mapobject_ids, mpp, conditions and mcu_ids only have cell
information that fulfill the given parameters
"""
msg = 'Starting filtering process with filters:\n{}\n{}'.format(filter_criteria, filter_values)
self.log.info(msg)
if len(filter_criteria) != len(filter_values):
msg = 'length of filter_criteria and filter_values defined in input parameters does not match!'
self.log.error(msg)
raise Exception(msg)
metadata_mask = np.ones(self.metadata.shape[0]).astype(bool)
msg = 'Total number of cells: {}'.format(int(np.sum(metadata_mask)))
self.log.info(msg)
print(msg)
for f, f_val in zip(filter_criteria, filter_values):
if (f_val == 'nan') or (f_val == 'NaN') or (f_val == 'NAN'):
mask_temp = ~self.metadata[f].isna().values
else:
mask_temp = ~(self.metadata[f] == f_val).values
msg = '{} cells cutted by filter: {} == {}'.format(self.metadata.shape[0]-np.sum(mask_temp), f, f_val)
self.log.info(msg)
print(msg)
metadata_mask &= mask_temp
msg = 'Number of cutted cells: {}'.format(int(self.metadata.shape[0] - np.sum(metadata_mask)))
self.log.info(msg)
print(msg)
# Filter metadata
self.metadata = self.metadata.iloc[metadata_mask]
# Get mapobject_ids from metadata that fulfill the given conditions
mapobject_ids = self.metadata.mapobject_id.values
# Get mask and filter lables, x, y, mapobject_ids, mpp, conditions and mcu_ids
mask = np.in1d(self.mapobject_ids, mapobject_ids)
instance_vars = {'labels', 'x', 'y', 'mpp', 'mapobject_ids', 'mcu_ids','conditions'}
for var in instance_vars:
setattr(self, var, getattr(self, var)[mask])
def train_val_test_split(self, train_frac=0.8, val_frac=0.1):
"""split along mapobject_ids for train/val/test split"""
ids = np.unique(self.mapobject_ids)
np.random.seed(self.seed)
np.random.shuffle(ids)
num_train = int(len(ids)*train_frac)
num_val = int(len(ids)*val_frac)
train_ids = ids[:num_train]
val_ids = ids[num_train:num_train+num_val]
test_ids = ids[num_train+num_val:]
self.log.info('splitting data in {} train, {} val, and {} test objects'.format(len(train_ids), len(val_ids), len(test_ids)))
splits = []
for split_ids in (train_ids, val_ids, test_ids):
#ind = []
#for cur_id in split_ids:
# ind.append(np.where(self.mapobject_ids==cur_id)[0])
#ind = np.concatenate(ind, axis=0)
ind = np.in1d(self.mapobject_ids, split_ids)
splits.append(MPPData(metadata=self.metadata, channels=self.channels, labels=self.labels[ind],
x=self.x[ind], y=self.y[ind], mpp=self.mpp[ind],
mapobject_ids=self.mapobject_ids[ind], mcu_ids=self.mcu_ids[ind],
conditions=self.conditions[ind]))
return splits
def add_cell_cycle_to_metadata(self, cc_file):
"""
Add Cell cycle information to metadata.
Input: Absolute path to cell cycle file
Output: self.metadata with cell cycle information
"""
msg = 'Adding Cell Cycle to metadata using file:\n{}'.format(cc_file)
self.log.info(msg)
if not os.path.exists(cc_file):
msg = 'Cell cycle file {} not found!'.format(cc_file)
self.log.error(msg)
raise Exception(msg)
cc_data = pd.read_csv(cc_file)
self.metadata = self.metadata.merge(cc_data,
left_on='mapobject_id_cell',
right_on='mapobject_id',
how='left',
suffixes=('','_cc'))
self.metadata = self.metadata.drop(['mapobject_id_cc'], axis=1)
def add_well_info_to_metadata(self, well_file):
"""
Add well information (cell line, perturbation and duration)
to metadata.
Input: Absolute path to well information file
Output: self.metadata with well information
"""
msg = 'Adding Well info to metadata using file:\n{}'.format(well_file)
self.log.info(msg)
if not os.path.exists(well_file):
msg = 'Well metadata file {} not found!'.format(well_file)
self.log.error(msg)
raise Exception(msg)
well_data = pd.read_csv(well_file)
well_data = well_data[['plate_name', 'well_name', 'cell_type', 'perturbation', 'duration']]
# Check for rows in well_data df with same plate_name and well_name values
if (np.sum(well_data.groupby(['plate_name', 'well_name']).size().values > 1) > 0):
msg = 'More than one row in {} with same combination of plate_name and well_name values!'.format(well_file)
self.log.error(msg)
raise Exception(msg)
self.metadata = self.metadata.merge(well_data,
left_on=['plate_name_cell', 'well_name_cell'],
right_on=['plate_name', 'well_name'],
how='left',
suffixes=('','_wmd'))
self.metadata = self.metadata.drop(['plate_name_wmd','well_name_wmd'], axis=1)
def add_image_and_mask(self, data='MPP', remove_original_data=False, channel_ids=None, img_size=None, pad=0):
"""
Add images and its respective mask to instance vars as numpy arrays.
IMPORTANT: Images are stored as np.uint16 which means that it can only hold values in [0,65535] (which is the range values for MPPData). This allows to store images without using tons of ram memory. Therefore, this method should be run BEFORE normalizing the original data! Otherwise most of the data will be set to 0!
The normalization of the images can be done only during saving into disk. To do it, after running this method run the function get_image_normalization_vals to get the normalizing values per channel, and then run the function normalize_and_save_MPPData_images to normalize them during saving.
TODO: Implement support for data different than 'MPP'
Input:
data: str indicating data type
channel_ids: 1D array indicating id channels to be contemplated in the returned image and mask
img_size: Natural Number, size for output images (i.e. shape: (img_size,img_size))
pad: amount of padding added to returned image (only used when img_size is None)
remove_original_data: boolean indicating if data used to create the images and masks should be deletad after the convertion.
Output (added to self):
-imgs: array of shape (n_observations,img_size,img_size,len(channel_ids))
-mask: boolean array of same shape as imgs. Array entrance = True
if value came from MPPData.
"""
msg = 'Adding images and masks to MPPData'
self.log.info(msg)
imgs = []
mask = []
for mapobject_id in self.metadata.mapobject_id:
if data == 'MPP':
res = self.get_mpp_img(mapobject_id, channel_ids, img_size=img_size, pad=pad)
res_m = self.get_mpp_img(mapobject_id, get_mask=True, img_size=img_size, pad=pad).astype(np.bool).reshape(res.shape[:-1])
else:
msg = 'Data type different that MPP given! Not implemented yet!'
self.error.info(msg)
raise NotImplementedError
if img_size is None:
res = res[0]
res_m = res_m[0]
imgs.append(res)
mask.append(res_m)
if remove_original_data:
msg = 'Deleting origin MPPData...'
self.log.info(msg)
print(msg)
del(self.labels, self.x, self.y, self.mpp, self.mapobject_ids, self.mcu_ids, self.conditions)
self.images = np.array(imgs).astype(np.uint16)
self.images_masks = np.array(mask)
def add_scalar_projection(self, method='avg'):
"""
This method projects each cell (and each one of its channels) into a scalar. For instance, assuming images, if one have data.images.shape = (100,240,240, 38) (100 images of size 240x240 and 38 channels), then this method projects data.images into an array of shape (100, 38).
Input:
-method: String indicating the used function to project each image into a single number. Available functions are the average ('avg') and the median ('median').
Output: No output. For instance, if 'avg' selected, then m columns are added to self.metadata containing the average number of each cell and channel (m represents the number of channels).
"""
n_cells = self.metadata.shape[0]
n_channels = self.mpp.shape[-1]
cell_ids = np.array(self.metadata.mapobject_id.values)
col_name = ['mapobject_id', 'cell_size']
if (method == 'size_and_sum'):
col_name += [self.channels.set_index('channel_id').loc[c].values[0]+'_sum' for c in range(n_channels)]
col_name += [self.channels.set_index('channel_id').loc[c].values[0]+'_size' for c in range(n_channels)]
else:
col_name += [self.channels.set_index('channel_id').loc[c].values[0]+'_'+method for c in range(n_channels)]
scalar_vals_df = | pd.DataFrame(columns=col_name) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import datetime
df_data = pd.read_excel("Wholesale_Electricity_Prices_2020.xlsx")
# print(df_data)
# df_data = df_data.set_index("Gewerbe allgemein")
df_data
#df.index = pd.to_datetime(df.index, errors='ignore')
#df
df_data
df_data = df_data[['dtDate', 'intHour','dblPrice']]
df_data
df_data.intHour = pd.to_timedelta(df_data.intHour, unit='h')
df_data
df_data.intHour = df_data.intHour.astype(str)
df_data["intHour"] = df_data["intHour"].str.slice(start = 7)
df_data.intHour = | pd.to_datetime(df_data.intHour, errors='ignore', format="%H:%M:%S") | pandas.to_datetime |
import pandas as pd
from datetime import date, timedelta
filepath = "people.xlsx"
# 读出工作簿名为Sheet1的工作表
people = pd.read_excel(filepath, sheet_name="Sheet1")
print(people)
print("=====1=====")
# header = 2 表示从第3行开始 相当于跳过了第2行
people1 = pd.read_excel(filepath, header=2, sheet_name="Sheet1")
print(people1)
print("=====2=====")
# skiprows 跳过开头几行 usecols表示使用哪些列的数据
people3 = | pd.read_excel(filepath, sheet_name="Sheet1", skiprows=4, usecols="B:C") | pandas.read_excel |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(pd.ewma(SOk, span=n, min_periods=n - 1), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = pd.Series(pd.rolling_mean(df[key], timeperiod, min_periods=timeperiod), name='SMA_' + str(timeperiod))
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = pd.ewma(df['Close'], span=n, min_periods=n - 1)
EX2 = pd.ewma(EX1, span=n, min_periods=n - 1)
EX3 = pd.ewma(EX2, span=n, min_periods=n - 1)
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span=n, min_periods=n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1) / ATR,name='PosDI')
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1) / ATR,name='NegDI')
result = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span=n_ADX, min_periods=n_ADX - 1), name='ADX_' + str(n) + '_' + str(n_ADX))
result = pd.concat([df,PosDI,NegDI,result], join='outer', axis=1,ignore_index=True)
result.columns=["High","Low","Close","PosDI","NegDI","ADX"]
return result
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(pd.ewma(df[price], span=n_fast, min_periods=n_slow - 1))
EMAslow = pd.Series(pd.ewma(df[price], span=n_slow, min_periods=n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span=9, min_periods=8), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span=9, min_periods=8)
EX2 = pd.ewma(EX1, span=9, min_periods=8)
Mass = EX1 / EX2
result = pd.Series(pd.rolling_sum(Mass, 25), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
result = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iloc[i + 1]['High'] - df.iloc[i]['High']
DoMove = df.iloc[i]['Low'] - df.iloc[i + 1]['Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = | pd.Series(DoI) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# ## Bad LR Performance
# This notebook pokes at the results to try to figure out what is causing LR methods to do worse than random when the linear signal is removed
# In[66]:
import collections
import glob
import itertools
import json
import os
import numpy as np
import pandas as pd
import sklearn.metrics
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# In[3]:
top_five_tissues = ['Blood', 'Breast', 'Stem_Cell', 'Cervix', 'Brain']
#top_five_tissues = ['Blood', 'Breast', 'Stem_Cell']
combo_iterator = itertools.combinations(top_five_tissues, 2)
tissue_pairs = [pair for pair in combo_iterator]
tissue_pairs[:3]
# In[4]:
in_files = []
for pair in tissue_pairs:
in_files.extend(glob.glob('../../results/{}.{}.*signal_removed*.tsv'.format(pair[0], pair[1])))
len(in_files)
# In[5]:
run_results = pd.DataFrame()
for path in in_files:
new_df = | pd.read_csv(path, sep='\t') | pandas.read_csv |
import pandas as pd
import numpy as np
class DataParser:
@staticmethod
def _parse_companies(cmp_list):
"""
Создает DataFrame компаний по списку словарей из запроса
:param cmp_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])
if cmp_list:
cmp_df = pd.DataFrame(cmp_list)
cmp_df['CMP_TYPE_CUSTOMER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'CUSTOMER') else 0)
cmp_df['CMP_TYPE_PARTNER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'PARTNER') else 0)
cmp_df = cmp_df.drop(columns=['COMPANY_TYPE'], axis=1)
ret_df = pd.concat([ret_df, cmp_df])
return ret_df
@staticmethod
def _parse_deals(deal_list):
"""
Создает DataFrame сделок по списку словарей из запроса
:param deal_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_DEAL_Q01', 'PROBABILITY_DEAL_Q01', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q01',
'OPPORTUNITY_DEAL_Q09', 'PROBABILITY_DEAL_Q09', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q09',
'OPPORTUNITY_DEAL_MEAN', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEAN', 'CLOSED',
'OPPORTUNITY_DEAL_MEDIAN', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEDIAN', 'DEAL_BY_YEAR'])
ret_df.index.name = 'COMPANY_ID'
if deal_list:
deal_df = pd.DataFrame(deal_list)
deal_df['CLOSED'] = deal_df['CLOSED'].apply(lambda x: 1 if (x == 'Y') else 0)
deal_df['OPPORTUNITY'] = pd.to_numeric(deal_df['OPPORTUNITY'])
deal_df['PROBABILITY'] = pd.to_numeric(deal_df['PROBABILITY'])
deal_df['BEGINDATE'] = pd.to_datetime(deal_df['BEGINDATE'])
deal_df['CLOSEDATE'] = pd.to_datetime(deal_df['CLOSEDATE'])
deal_df['TIME_DIFF_BEGIN_CLOSE'] = (deal_df['CLOSEDATE'] - deal_df['BEGINDATE']).astype(
'timedelta64[h]') / 24
deal_group = deal_df.groupby(by='COMPANY_ID')
deal_count = pd.DataFrame(deal_group['CLOSED'].count())
deal_date_max = deal_group['CLOSEDATE'].max()
deal_date_min = deal_group['BEGINDATE'].min()
d = {'YEAR': (deal_date_max - deal_date_min).astype('timedelta64[h]') / (24 * 365)}
deal_date_max_min_diff = | pd.DataFrame(data=d) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy import dtype
from matplotlib.pyplot import ylabel
from matplotlib.cm import ScalarMappable
from matplotlib.pyplot import savefig
import math
from getCpuUsageForStage import *
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i", "--inputFile")
parser.add_argument("-t", "--topFile")
parser.add_argument("-o", "--outputFile")
args = parser.parse_args(sys.argv[1:])
inputFileName = args.inputFile
topFileName = args.topFile
outputFileName = args.outputFile
| pd.set_option('display.max_rows', 500) | pandas.set_option |
import numpy as nmp
import pandas as pnd
import theano.tensor as tns
import pymc3 as pmc
##
def bin_lpdf(r, R, theta):
return tns.gammaln(R + 1.0) - tns.gammaln(r + 1.0) - tns.gammaln(R - r + 1.0)\
+ r*tns.log(theta) + (R - r)*tns.log1p(-theta)
##
def binmix_logp_fcn(R, theta, lw):
def logp(r):
lp = lw + bin_lpdf(r, R, theta).sum(1)
return pmc.logsumexp(lp, 0)#.sum()
return logp
##
def betabin_lpdf(r, R, a, b):
return tns.gammaln(R + 1.0) - tns.gammaln(r + 1.0) - tns.gammaln(R - r + 1.0)\
+ tns.gammaln(r + a) + tns.gammaln(R - r + b) - tns.gammaln(R + a + b)\
+ tns.gammaln(a + b) - tns.gammaln(a) - tns.gammaln(b)
##
def betabinmix_logp_fcn(R, u, theta, lw):
a = u * theta
b = u * (1.0 - theta)
def logp(r):
lp = lw + betabin_lpdf(r, R, a, b).sum(1)
return pmc.logsumexp(lp, 0)#.sum()
return logp
##
def cov_expquad(x1, x2, tau):
return tns.exp(-0.5 * tau * (x1 - x2)**2)
##
def cov_exp(x1, x2, tau):
return tns.exp(-tns.sqrt(tau) * tns.abs_(x1 - x2))
##
def cov_mat32(x1, x2, tau):
r = tns.abs_(x1 - x2)
c = tns.sqrt(3.0) * r * tns.sqrt(tau)
return (1.0 + c) * tns.exp(-c)
##
def cov_mat52(x1, x2, tau):
r = tns.abs_(x1 - x2)
c = tns.sqrt(5.0) * r * tns.sqrt(tau)
return (1.0 + c + 5.0/3.0 * r**2 * tau) * tns.exp(-c)
##
def stick_breaking_log(u):
"""Return log of weights from stick-breaking process."""
lu = tns.concatenate((tns.log(u), [0.0]))
cs = tns.concatenate(([0.0], tns.cumsum(tns.log1p(-u))))
lw = lu + cs
return lw
##
COV_FCNS = {
'ExpQ': cov_expquad,
'Exp': cov_exp,
'Mat32': cov_mat32,
'Mat52': cov_mat52
}
##
def calculate_cluster_weights(trace, threshold, alpha):
w_samples = nmp.exp(trace['lw'])
# re-weight cluster weights
w = nmp.median(w_samples, 0)
wids = w < threshold
w_samples[:, wids] = 0
w_samples = w_samples / nmp.sum(w_samples, 1, keepdims=True)
# median, credible interval
w_lo, w, w_hi = nmp.quantile(w_samples, [0.5*alpha, 0.5, 1 - 0.5*alpha], axis=0)
#
return pnd.DataFrame({
'CLUSTERID': nmp.arange(w.size) + 1,
'W': w,
'W_LO': w_lo,
'W_HI': w_hi
})
##
def calculate_cluster_centres(data, trace, alpha):
phi_samples = trace['phi']
phi_lo, phi, phi_hi = nmp.quantile(phi_samples, [0.5*alpha, 0.5, 1 - 0.5*alpha], axis=0)
sid = data['samples'].SAMPLEID
cid = nmp.arange(phi_samples.shape[1]) + 1
centres = pnd.concat({
'PHI': pnd.DataFrame(phi, index=cid, columns=sid),
'PHI_LO': pnd.DataFrame(phi_lo, index=cid, columns=sid),
'PHI_HI': pnd.DataFrame(phi_hi, index=cid, columns=sid)
}, axis=1).stack().reset_index().rename(columns={'level_0': 'CLUSTERID'})
if 'TIME2' in data['samples']:
centres = pnd.merge(centres, data['samples'][['SAMPLEID', 'TIME2']], how='left', on = 'SAMPLEID')
centres = centres[['CLUSTERID', 'SAMPLEID', 'TIME2', 'PHI', 'PHI_LO', 'PHI_HI']]
#
return centres
##
def calculate_ccf_and_hard_clusters(data, trace, threshold, alpha):
r, R, VAF0 = data['r'].values.T, data['R'].values.T, data['VAF0'].values.T
r, R, VAF0 = r[None, None, :, :], R[None, None, :, :], VAF0[None, None, :, :]
phi, lw = trace.phi, trace.lw
theta = VAF0 * phi[:, :, :, None]
# re-weight cluster weights
w_samples = nmp.exp(lw)
w = nmp.median(w_samples, 0)
wids = w < threshold
w_samples[:, wids] = 0
w_samples = w_samples / nmp.sum(w_samples, 1, keepdims=True)
lw = nmp.log(w_samples)
# calculate logliks
if 'u' in trace.varnames: # implies BetaBinomial model
u = trace.u[:, None, :, None]
a = u * theta
b = u * (1.0 - theta)
lp = betabin_lpdf(r, R, a, b).eval()
else: # implies Binomial model
lp = bin_lpdf(r, R, theta).eval()
# ppd
w = nmp.exp(lp + lw[:, :, None, None])
ppd_ = nmp.sum(w * R, axis=1)
ppd_lo, ppd, ppd_hi = nmp.quantile(ppd_, [alpha * 0.5, 0.5, 1 - alpha * 0.5], axis=0)
ppd = pnd.concat({
'PPD': pnd.DataFrame(ppd.T, index=data['r'].index, columns=data['r'].columns),
'PPD_LO': pnd.DataFrame(ppd_lo.T, index=data['r'].index, columns=data['r'].columns),
'PPD_HI': | pnd.DataFrame(ppd_hi.T, index=data['r'].index, columns=data['r'].columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 19 18:47:49 2018
@author: deepayanbhadra
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from hmwk1 import buildmat
#Q1(a) The function generates Nd data vectors, each containing Nf features
def create_classification_problem(Nd,Nf,kappa):
D = buildmat(Nd,Nf,kappa)
w = np.random.randn(Nf,1);
c = np.sign(D.dot(w));
flipper = np.ones((Nd,1)) - 2*(np.random.rand(Nd,1)>.9);
c = c*flipper;
return D,c
#Q1(b) The function evaluates the logistic regression objective function
def logreg_objective(x,D,c):
z = np.diagflat(c).dot(D).dot(x)
idxN, idxP = z<0, z>=0
y1 = np.sum(-z[idxN]) + np.sum([math.log(np.exp(x)+1) for x in z[idxN]])
y2 = np.sum([math.log(1+np.exp(-x)) for x in z[idxP]])
y = y1+y2
return y
#Q2(a) This function should load the mnist dataset
def load_mnist():
import h5py
f = h5py.File("mnist.h5")
x_test = f["x_test"]
x_train = f["x_train"]
y_test = f["y_test"]
y_train = f["y_train"]
with x_test.astype("float"):
x_test = x_test[:]
with y_test.astype("float"):
y_test = y_test[:]
with x_train.astype("float"):
x_train = x_train[:]
with y_train.astype("float"):
y_train = y_train[:]
return x_test,x_train,y_test,y_train
#Q2(b) This function will implement the “SoftPlus” non-linearity which is a smoothed form of the RELU#
def softplus(X):
y = np.zeros(X.shape)
idxN, idxP = X<0, X>=0
xn,xp = X[idxN],X[idxP]
y[X<0] = [math.log(1+np.exp(x)) for x in xn]
y[X>=0] = xp+[math.log(np.exp(-x)+1) for x in xp];
return y
# Q2(c) This function will implement cross-entropy loss (measures the performance of a classification model)
def cross_entropy(X,Y):
Xm = X - np.max(X,axis = 1, keepdims = True)
softmax = np.divide(np.exp(Xm),np.sum(np.exp(Xm),axis = 1,keepdims = True))
y = -math.log(np.mean(np.sum(softmax*Y,axis=1,keepdims = True)))
return y
# Q2(d) This function will implement the loss of a neural network
def net_objective(W,D,L):
temp = D
for i in range(0,len(W)):
temp = softplus(np.dot(temp,W[i]))
y = cross_entropy(temp,L);
return y
# Q3(a) This function will compute image gradient
def grad2d(X):
# Computing the x-differences
kernel = np.zeros(X.shape)
kernel[0,0] = 1
kernel[0,1] = -1
Dx = np.fft.fft2(kernel)
Gx = np.fft.ifft2(Dx*np.fft.fft2(X))
#Computing the y-differences
kernel = np.zeros(X.shape)
kernel[0,0] = 1
kernel[1,0] = -1
Dy = np.fft.fft2(kernel)
Gy = np.fft.ifft2(Dy*np.fft.fft2(X))
return np.stack([Gx, Gy], axis=2)
# Q3(b) This function compute the divergence
def div2d(G):
# Computing the x-differences
# extract the x- and y- derivatives
Gx = G[:,:,0]
Gy = G[:,:,1]
# We must convolve with the FLIPPED stencil to get the linear filtering we want
kernel = np.zeros(Gx.shape)
kernel[0,0]=1
kernel[0][-1] =-1
# create diagonal matrix in decomposition K=F'DF
Dx = np.fft.fft2(kernel)
# Use the eigen-decomposition to convolve the stencil with X, and get the differences in the horizontal direction.
Divx = np.fft.ifft2(Dx*np.fft.fft2(Gx))
kernel = np.zeros(Gy.shape)
kernel[0,0]=1
kernel[-1][0]=-1
Dy = np.fft.fft2(kernel)
Divy = np.fft.ifft2(Dy*np.fft.fft2(Gy))
return Divx+Divy
if __name__ == "__main__":
#Q1(a) to create a classification
[ D,c ] = create_classification_problem(100, 2, 1)
valid1 = D[np.where(c==1)[0]]
valid2 = D[np.where(c==-1)[0]]
plt.scatter(valid1[:, 0], valid1[:, 1])
plt.scatter(valid2[:, 0], valid2[:, 1])
plt.title("Scatter plot with condition no = 1")
plt.show()
[ D,c ] = create_classification_problem(100, 2, 10)
valid1 = D[np.where(c==1)[0]]
valid2 = D[np.where(c==-1)[0]]
plt.scatter(valid1[:, 0], valid1[:, 1])
plt.scatter(valid2[:, 0], valid2[:, 1])
plt.title("Scatter plot with condition no = 10")
plt.show()
input("Press Enter to continue...")
#Q1(b) to evaluate logistic regression
Nd, Nf, kappa = 1000,100,10
[D,c] = create_classification_problem(Nd,Nf,kappa)
x = np.random.randn(Nf,1);
print("The logistic regression objective is")
y = logreg_objective(x,D,c)
print(y)
input("Press Enter to continue...")
print("The logistic regression objective for a large x is");
x = 10000*np.random.randn(Nf,1)
#Calling on a large input to verify that we don't get any NaNs
y = logreg_objective(x,D,c)
print(y)
input("Press Enter to continue...")
#Q2(a) to load the dataset
x_test,x_train,y_test,y_train = load_mnist()
D = x_train
W = [None] * 2
W[0] = np.random.randn(784,100)
W[1] = np.random.randn(100,10)
y_train += np.ones((60000,))
# One HOT Encoding
s = | pd.Series(y_train) | pandas.Series |
#########################################################
### DNA variant annotation tool
### Version 1.0.0
### By <NAME>
### <EMAIL>
#########################################################
import pandas as pd
import numpy as np
import allel
import argparse
import subprocess
import sys
import os.path
import pickle
import requests
import json
def extract_most_deleterious_anno(row, num_ann_max):
ann_order = pd.read_csv(anno_order_file, sep=' ')
alt = row[:num_ann_max]
anno = row[num_ann_max:]
alt.index = range(0, len(alt))
anno.index = range(0, len(anno))
ann_all_alt = pd.DataFrame()
alt_unique = alt.unique()
for unique_alt in alt_unique:
if unique_alt != '':
anno_all = anno[alt == unique_alt]
ann_order_all = pd.DataFrame()
for ann_any in anno_all:
if sum(ann_any == ann_order.Anno) > 0:
ann_any_order = ann_order[ann_order.Anno == ann_any]
else:
ann_any_order = ann_order.iloc[ann_order.shape[0]-1]
ann_order_all = ann_order_all.append(ann_any_order)
small_ann = ann_order_all.sort_index(ascending=True).Anno.iloc[0]
ann_unique_alt = [unique_alt, small_ann]
ann_all_alt = ann_all_alt.append(ann_unique_alt)
ann_all_alt.index = range(0, ann_all_alt.shape[0])
return ann_all_alt.T
def run_snpeff(temp_out_name):
snpeff_command = ['java', '-Xmx4g', '-jar', snpeff_path, \
'-ud', '0', \
# '-v', \
'-canon', '-noStats', \
ref_genome, vcf_file]
temp_output = open(temp_out_name, 'w')
subprocess.run(snpeff_command, stdout=temp_output)
temp_output.close()
def get_max_num_ann(temp_out_name):
num_ann_guess = 500
callset = allel.vcf_to_dataframe(temp_out_name, fields='ANN', numbers={'ANN': num_ann_guess})
num_ann = callset.apply(lambda x: sum(x != ''), axis=1)
num_ann_max = num_ann.max() # num_ann_max = 175
return num_ann_max
def get_ann_from_output_snpeff(temp_out_name):
callset = allel.read_vcf(temp_out_name, fields='ANN', transformers=allel.ANNTransformer(), \
numbers={'ANN': num_ann_max})
df1 = pd.DataFrame(data=callset['variants/ANN_Allele'])
df2 = pd.DataFrame(data=callset['variants/ANN_Annotation'])
df3 = pd.concat((df1, df2), axis=1)
df3.columns = range(0, df3.shape[1])
return df3
def get_anno_total(anno_from_snpeff):
anno_total = pd.DataFrame()
pickle_dump = 'pickle_dump.temp'
if not os.path.isfile(pickle_dump):
print('Extracting most deleterious annotations generated by SnpEff')
for index, row in anno_from_snpeff.iterrows():
anno_row = extract_most_deleterious_anno(row, num_ann_max)
anno_total = anno_total.append(anno_row)
print('done')
dump_file = open(pickle_dump, 'wb')
pickle.dump(anno_total, dump_file, pickle.HIGHEST_PROTOCOL)
dump_file.close()
dump_file = open(pickle_dump, 'rb')
anno_total = pickle.load(dump_file)
a = ['Alt_' + str(i) for i in range(1, num_alt + 1)]
b = ['Anno_' + str(i) for i in range(1, num_alt + 1)]
c = list(range(0, num_alt * 2))
c[::2] = a
c[1::2] = b
anno_total.columns = c
anno_total.replace(np.nan, -1, inplace=True)
anno_total.index = range(0, anno_total.shape[0])
return anno_total
def get_num_alternate(vcf_file):
num_alt = allel.read_vcf(vcf_file, fields='numalt')['variants/numalt'].max()
return num_alt
def get_dp_ro_ao(temp_out_name):
callset_dp_ro_ao = allel.vcf_to_dataframe(temp_out_name, fields=['DP', 'RO', 'AO'], alt_number=num_alt)
callset_dp_ro_ao.index = range(0, callset_dp_ro_ao.shape[0])
return callset_dp_ro_ao
def get_alt_ref_ratio(callset_dp_ro_ao):
callset_ratio = pd.DataFrame()
for i in range(0, num_alt):
# print('run ratio: ', i)
callset_ratio[i] = callset_dp_ro_ao.apply(lambda x: x[i + 2] / x[1], axis=1)
# print('run ratio: ', i, ' done')
# print('callset_ratio is done')
callset_ratio.columns = ['RatioAR_Alt_' + str(i) for i in range(1, num_alt + 1)]
callset_ratio.index = range(0, callset_ratio.shape[0])
return callset_ratio
def combine_anno_and_callset(anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs):
anno_and_callset = pd.concat([anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs], axis=1)
return anno_and_callset
def combine_with_comma(row):
a = []
for i in range(0, len(row)):
if row.iloc[i][0] != '-':
a.append(True)
else:
a.append(False)
b = ",".join(row[a])
return b
def get_anno_good(anno_and_callset):
anno_columns = pd.DataFrame()
for i in range(1, num_alt + 1):
Alt_i = 'Alt_' + str(i)
Anno_i = 'Anno_' + str(i)
AO_i = 'AO_' + str(i)
RatioAR_Alt_i = 'RatioAR_Alt_' + str(i)
exac_var_af = 'exac_' + search_af + "_" + str(i)
exac_ordered_csqs = 'exac_' + search_ordered_csqs + '_' + str(i)
column_i = anno_and_callset[[Alt_i, Anno_i, 'DP', 'RO', AO_i, RatioAR_Alt_i, exac_var_af, exac_ordered_csqs]].apply(lambda x: '|'.join(x.map(str)), axis=1)
anno_columns = pd.concat([anno_columns, column_i], axis=1)
anno_one_column = anno_columns.apply(combine_with_comma, axis=1)
anno_good = ["ANN="] * len(anno_one_column) + anno_one_column
return anno_good
def get_num_lines_header(contents):
lines_header = 0
for i in range(0, len(contents)):
if contents[i][0] == '#' and contents[i + 1][0] != '#':
# print(contents[i])
# print(i)
lines_header = i # lines_header 142
return lines_header
def generate_output_vcf(vcf_file, anno_good):
input_vcf = pd.read_csv(vcf_file, sep='\t', skiprows=lines_header)
anno_good_all = input_vcf.INFO + ';' + anno_good
input_vcf.INFO = anno_good_all
output_vcf = input_vcf.copy()
return output_vcf
def generate_header(contents):
header = contents[0:lines_header]
header_add1 = """##SimpleAnnotation Version="0.0.1" By <NAME> <EMAIL> \n"""
header_add2 = """##SimpleAnnotation Cmd="python3 SimpleAnnotation.py -input {} -snpeff {} -genome {} "\n""".format(vcf_file, snpeff_path, ref_genome)
header_add3 = """##INFO=<ID=ANN,Number=.,Type=String, Description="Simple annotations: 'Alternate allele | Type of variation most deleterious | Sequence depth at the site of variation | Number of reads of reference | Number of reads of alternate | Ratio of read counts of alt vs ref | ExAC variant Allele Frequency | ExAC variant consequence most deleterious' ">\n"""
header.append(header_add1)
header.append(header_add2)
header.append(header_add3)
return header
def search_REST_ExAC(row, search_type):
row_var = [-1] * len(row)
url_1 = 'http://exac.hms.harvard.edu/rest/variant/{}/'.format(search_type)
for i in range(0, len(row)):
if row.iloc[i][-1] != '-':
url = url_1 + row.iloc[i]
my_response = requests.get(url)
if my_response.ok:
j_data = json.loads(my_response.content)
if search_type == search_af:
if 'allele_freq' in j_data.keys():
row_var[i] = j_data['allele_freq']
else:
row_var[i] = 'Not_found'
elif search_type == search_ordered_csqs:
if j_data != None and len(j_data) > 1:
row_var[i] = j_data[1]
else:
row_var[i] = 'Not_found'
else:
row_var[i] = 'Not_found'
return row_var
def ExAC_search_variant(var_all, search_type):
exac = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pickle
import pandas as pd
import sklearn
import keras
from keras import Sequential
from keras.layers import LeakyReLU
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop, Adam
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn import linear_model
from sklearn.feature_selection import f_classif, mutual_info_classif
from imblearn.over_sampling import SMOTE
def fit_and_calibrate_classifier(classifier, X, y):
X_train, X_cal, y_train, y_cal = train_test_split(
X, y, train_size=0.85, random_state=0)
classifier = classifier.fit(X_train, y_train)
# This line does the calibration
calibrated_classifier = CalibratedClassifierCV(
classifier, method='sigmoid', cv='prefit').fit(X_cal, y_cal)
return calibrated_classifier
class PricingModel():
def __init__(self, calibrate_probabilities=False):
self.calibrate = calibrate_probabilities
self.batch_size = 150
self.epochs = 10
self.X_raw = pd.read_csv('part3_data.csv')
self.claims_raw = np.array(pd.read_csv('part3_data.csv')['claim_amount'])
self.y_raw = None
self.X_test = None
self.y_test = None
self.y_mean = None
self.S_x = 5.0/6
self.base_classifier = self.probability_model()
self.pc = None
self.ppf_pp = None
self.pu_pic = None
self.drv = None
self.vh = None
def _preprocessor(self, X_raw, training = None):
"""Data preprocessing function.
This function prepares the features of the data for training,
evaluation, and prediction.
Parameters
----------
X_raw : ndarray
An array, this is the raw data as downloaded
Returns
-------
X: ndarray
A clean data set that is used for training and prediction.
"""
# =============================================================
# Select Features
# NB: 'claim_amount' and 'made_claim' already removed in hidden dataset, so no need to drop these
if training == True:
part3_df = X_raw.drop(['id_policy', 'drv_sex2', 'vh_make', 'vh_model', 'regional_department_code', 'pol_insee_code', 'claim_amount'], axis = 1)
else:
part3_df = X_raw.drop(['id_policy', 'drv_sex2', 'vh_make', 'vh_model', 'regional_department_code', 'pol_insee_code'], axis=1)
if 'made_claim' in X_raw:
part3_df = part3_df.drop(['made_claim'], axis=1)
if 'claim_amount' in X_raw:
part3_df = part3_df.drop(['claim_amount'], axis=1)
# For the feature 'pol_insee_code' we want to extract the first 2 digits as these are one of 96 departments:
# part3_df['pol_insee_code'] = part3_df['pol_insee_code'].astype(str).str[:2]
# Replace NaN values with mthe mean of the feature
part3_df[['town_mean_altitude', 'town_surface_area', 'population', 'commune_code', 'canton_code', 'city_district_code']] = part3_df[['town_mean_altitude', 'town_surface_area', 'population', 'commune_code', 'canton_code', 'city_district_code']].replace(np.nan, part3_df[['town_mean_altitude', 'town_surface_area', 'population', 'commune_code', 'canton_code', 'city_district_code']].mean())
# Deal with NaN values in column of type 'object'
# part3_df.dropna(subset=['regional_department_code'], inplace=True)
# Convert categorical variables into one-hot encoding
if training == True:
part3_df_new = pd.get_dummies(part3_df, columns=['pol_coverage'])
self.pc = part3_df_new
part3_df_new = pd.get_dummies(part3_df_new, columns=['pol_pay_freq', 'pol_payd'])
self.ppf_pp = part3_df_new
part3_df_new = pd.get_dummies(part3_df_new, columns=['pol_usage'])
self.pu_pic = part3_df_new
part3_df_new = pd.get_dummies(part3_df_new, columns=['drv_drv2', 'drv_sex1'])
self.drv = part3_df_new
part3_df_new = pd.get_dummies(part3_df_new, columns=['vh_fuel', 'vh_type'])
self.vh = part3_df_new
print("Training shape:", part3_df_new.shape)
self.vh = part3_df_new
else:
part3_df_new = pd.get_dummies(part3_df, columns=['pol_coverage'])
part3_df_new.reindex(columns=part3_df_new.columns, fill_value=0)
part3_df_new = | pd.get_dummies(part3_df_new, columns=['pol_pay_freq', 'pol_payd']) | pandas.get_dummies |
# Library imports
import os
import argparse
import random
import math
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input,Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LeakyReLU, Activation,BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import img_to_array,load_img
import matplotlib.pyplot as plt
from phoc_label_generator import gen_phoc_label
from tensorflow.keras.utils import Sequence
from tensorflow_addons.layers import SpatialPyramidPooling2D
# Uncomment the following line and set appropriate GPU if you want to set up/assign a GPU device to run this code
# os.environ["CUDA_VISIBLE_DEVICES"]="1"
# Setting random seeds
tf.random.set_seed(73)
random.seed(73)
np.random.seed(73)
# Argument parser variables
ap = argparse.ArgumentParser()
ap.add_argument("-idn", type=str,required=False,
help="Identifier Name (Prefer Train Set Name)")
ap.add_argument("-batch", type=int,default=10,required=False,
help="Batch Size")
ap.add_argument("-epoch", type=int,default=20,required=False,
help="Number of Epochs")
ap.add_argument("-lr", type=float,default=1e-4,required=False,
help="Learning rate for optimizer")
ap.add_argument("-mp", type=str,required=True,
help="CSV file for Train Image to Class Label map")
ap.add_argument("-vi", type=str,required=True,
help="Folder for Validation Images")
ap.add_argument("-vmap", type=str,required=True,
help="CSV file for Validation Image to Class Label map")
ap.add_argument("-umap", type=str,default=None,required=False,
help="CSV file for Unseen images to Class Label map")
ap.add_argument("-tr", type=str,required=True,
help="Folder having Train Images")
args = vars(ap.parse_args())
MODEL=args['idn']
BATCH_SIZE=args['batch']
EPOCHS=args['epoch']
LR=args['lr']
train_csv_file=args['mp']
valid_csv_file=args['vmap']
train_unseen_csv_file=args['umap']
train_folder=args['tr']
valid_folder=args['vi']
model_name="new_"+MODEL+"_"+str(BATCH_SIZE)+"_"
# DataSequence class to pass data(images/vector) in batches
class DataSequence(Sequence):
def __init__(self, df, batch_size):
self.df = df # your pandas dataframe
self.bsz = batch_size # batch size
# Take labels and a list of image locations in memory
self.labels = np.asarray(self.df['Label'].tolist()).astype(np.float32)
self.im_list = self.df['Image'].tolist()
def __len__(self):
# compute number of batches to yield
return int(math.ceil(len(self.df) / float(self.bsz)))
def on_epoch_end(self):
# Shuffles indexes after each epoch if in training mode
self.indexes = range(len(self.im_list))
self.indexes = random.sample(self.indexes, k=len(self.indexes))
def get_batch_labels(self, idx):
# Fetch a batch of labels
return np.array(self.labels[idx * self.bsz: (idx + 1) * self.bsz])
def get_batch_features(self, idx):
# Fetch a batch of inputs
return np.array([img_to_array(load_img(im)) for im in self.im_list[idx * self.bsz: (1 + idx) * self.bsz]])
def __getitem__(self, idx):
batch_x = self.get_batch_features(idx)
batch_y = self.get_batch_labels(idx)
return batch_x, batch_y
# Function to build and return SPP-PHOCNet model
def build_phocnet():
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',activation='relu',input_shape=(None,None,3)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(SpatialPyramidPooling2D([1,2,4]))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(4096, activation='linear'))
model.add(Dense(604, activation='sigmoid'))
#optimizer = tf.keras.optimizers.SGD(lr=1e-4, momentum=.9, decay=5e-5)
loss = tf.keras.losses.binary_crossentropy
optimizer = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-07,decay=5e-5)
model.compile(loss=loss, optimizer=optimizer, metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
model.summary()
return model
def getlabel(x):
return all_labels[x]
# Build train set dataframe after removing validation set and seen class test set images from train
df_train=pd.read_csv(train_csv_file)
df_valid=pd.read_csv(valid_csv_file)
if train_unseen_csv_file!=None:
df_unseen=pd.read_csv(train_unseen_csv_file)
df_train = df_train.merge(df_unseen, how='left', indicator=True)
df_train= df_train[df_train['_merge'] == 'left_only']
df_train = df_train[['Image', 'Word']]
if train_folder==valid_folder:
df_train = df_train.merge(df_valid, how='left', indicator=True)
df_train= df_train[df_train['_merge'] == 'left_only']
df_train = df_train[['Image', 'Word']]
print("Train_Images=",len(df_train),"Valid_Images=",len(df_valid))
# Generating dictionaries of words mapped to PHOC vectors
train_word_label=gen_phoc_label(list(set(df_train['Word'])))
valid_word_label=gen_phoc_label(list(set(df_valid['Word'])))
all_labels={**train_word_label,**valid_word_label}
# Adding folder names to file names
df_train['Image']=train_folder+"/"+df_train['Image']
df_valid['Image']=valid_folder+"/"+df_valid['Image']
df_train['Label']=df_train['Word'].apply(getlabel)
df_valid['Label']=df_valid['Word'].apply(getlabel)
# Build model
model=build_phocnet()
# Sequence for passing data(images, PHOC labels) to model
train_sequence = DataSequence(df_train, BATCH_SIZE)
valid_sequence = DataSequence(df_valid, BATCH_SIZE)
STEPS=len(df_train)//BATCH_SIZE
EPOCHS=70000//STEPS+10000//STEPS+1
# Function for LR-scheduler
def learning_rate_scheduler(epoch, lr):
#decay_rate = 1.1
#decay_step = 2
if epoch > 70000//STEPS:
return 1e-5
return lr
print("Model Built")
# early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-7, patience=5, verbose=2,mode='auto', baseline=None, restore_best_weights=False)
# rlp=tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.25, patience=3, verbose=1,mode='auto', min_delta=1e-7, cooldown=3, min_lr=1e-7)
# LR scheduler callback
lrs=tf.keras.callbacks.LearningRateScheduler(learning_rate_scheduler, verbose=0)
callbacks_list = [lrs]
# Training the model
history=model.fit(train_sequence, epochs=EPOCHS, validation_data=valid_sequence,shuffle=True,callbacks=callbacks_list)
# Save the model after training completes
model.save(model_name+".h5")
# Create directory to store training history
if not os.path.exists("Train_History"):
os.makedirs("Train_History")
# Store train history as CSV file
hist_df = | pd.DataFrame(history.history) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=copy)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = _ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq) for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = _ensure_object(data)
if freq is None:
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data._values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._values,
base1, base2, 1)
else:
if is_object_dtype(data):
inferred = infer_dtype(data)
if inferred == 'integer':
data = data.astype(np.int64)
if freq is None and is_object_dtype(data):
# must contain Period instance and thus extract ordinals
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
if freq is None:
msg = 'freq not specified and cannot be inferred'
raise ValueError(msg)
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
data = _ensure_object(data)
data = period.extract_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
if values is None:
values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
@property
def asi8(self):
return self._values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.asobject.values
@property
def _values(self):
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.asobject.values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif | is_integer_dtype(dtype) | pandas.types.common.is_integer_dtype |
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"),
],
)
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
@pytest.mark.parametrize("box_mask", [True, False])
@pytest.mark.parametrize("frame", [True, False])
def test_series_mask_boolean(values, dtype, mask, box_mask, frame):
ser = pd.Series(values, dtype=dtype, index=["a", "b", "c"])
if frame:
ser = ser.to_frame()
mask = pd.array(mask, dtype="boolean")
if box_mask:
mask = pd.Series(mask, index=ser.index)
expected = ser[mask.astype("bool")]
result = ser[mask]
tm.assert_equal(result, expected)
if not box_mask:
# Series.iloc[Series[bool]] isn't allowed
result = ser.iloc[mask]
tm.assert_equal(result, expected)
result = ser.loc[mask]
tm.assert_equal(result, expected)
# empty
mask = mask[:0]
ser = ser.iloc[:0]
expected = ser[mask.astype("bool")]
result = ser[mask]
tm.assert_equal(result, expected)
if not box_mask:
# Series.iloc[Series[bool]] isn't allowed
result = ser.iloc[mask]
tm.assert_equal(result, expected)
result = ser.loc[mask]
| tm.assert_equal(result, expected) | pandas._testing.assert_equal |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import numpy as np
from misc import set_size
from scipy import stats
from scipy.interpolate import interp1d
from pandas.plotting import table
import statsmodels.api as sm
df_knolls_grund = pd.read_csv("data-set\knolls_grund.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
df_huvudskar = pd.read_csv("data-set\huvudskar.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)")
df_huvudskar = df_huvudskar.loc[df_huvudskar["Matdjup"]==1]
df_huvudskar = df_huvudskar.drop(columns=["Kvalitet", "Matdjup"])
df_finngrundet = pd.read_csv("data-set/finngrundet.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
start, end = '2020-09-28', '2020-11-29'
df_finngrundet = df_finngrundet.loc[start:end]
df_huvudskar = df_huvudskar.loc[start:end]
df_knolls_grund = df_knolls_grund.loc[start:end]
smhi_mean = | pd.concat([df_knolls_grund, df_huvudskar, df_finngrundet]) | pandas.concat |
'''
this script downloads sequencing files from ncbi with sratoolkit using file
accession numbers
-- executes download on front end
-- script should be called as part of snakemake pipeline
-- used from within snakemake, so print statements not part of interactive
testing or module loading should include flush=True
-- usage: snakemake download_ncbi
'''
import subprocess
import pandas as pd
import os
import argparse
import glob
import logging
modname = 'ncbi_download'
def read_inputs(a, b, c):
'''
read target tables and merge them into a single dataframe
'''
dfa = pd.read_csv(a, sep='\t')
dfb = pd.read_csv(b, sep='\t')
dfc = pd.read_csv(c, sep='\t')
df = | pd.concat([dfa, dfb, dfc], ignore_index=True) | pandas.concat |
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
For the purpose of cytometry analysis we often think of a population
of cells as having a particular phenotype that can be identified by
sub-setting cells in one or two dimensional space. This results in
geometric objects that define a population. This module houses the
functionality around those geometric objects.
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from ..flow import transform
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
from warnings import warn
from functools import partial
from matplotlib.patches import Ellipse
from scipy import linalg, stats
from scipy.spatial.qhull import ConvexHull, QhullError
from shapely.geometry import Polygon, Point
import mongoengine
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, cytopy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class PopulationGeometry(mongoengine.EmbeddedDocument):
"""
Geometric shape generated by non-threshold generating Gate
Attributes
-----------
x: str
Name of the X-dimension e.g. CD3, FSC-A etc
y: str
Name of the Y-dimension e.g. CD3, FSC-A etc
transform_x: str
Transformation method applied to the x-axis
transform_y: str
Transformation method applied to the y-axis
transform_x_kwargs: dict
Transformation keyword arguments for transform method applied to the x-axis
transform_y_kwargs: str
Transformation keyword arguments for transform method applied to the y-axis
"""
x = mongoengine.StringField()
y = mongoengine.StringField()
transform_x = mongoengine.StringField()
transform_y = mongoengine.StringField()
transform_x_kwargs = mongoengine.DictField()
transform_y_kwargs = mongoengine.DictField()
meta = {'allow_inheritance': True}
class ThresholdGeom(PopulationGeometry):
"""
Threshold shape. Inherits from PopulationGeometry.
Attributes
-----------
x_threshold: float
Threshold applied to the X-axis
y_threshold: float
Threshold applied to the Y-axis
"""
x_threshold = mongoengine.FloatField()
y_threshold = mongoengine.FloatField()
def transform_to_linear(self):
"""
Thresholds are transformed to their equivalent value in linear space
according to the transform defined. If transform is None, thresholds
are returned as saved.
Returns
-------
float, float
"""
x, y = self.x_threshold, self.y_threshold
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
transformer = transform.TRANSFORMERS.get(self.transform_x)(**kwargs)
x = transformer.inverse_scale(pd.DataFrame({"x": [self.x_threshold]}), features=["x"])["x"].values[0]
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
transformer = transform.TRANSFORMERS.get(self.transform_y)(**kwargs)
y = transformer.inverse_scale(pd.DataFrame({"y": [self.y_threshold]}), features=["y"])["y"].values[0]
return x, y
class PolygonGeom(PopulationGeometry):
"""
Polygon shape. Inherits from PopulationGeometry.
Attributes
-----------
x_values: list
X-axis coordinates
y_values: list
Y-axis coordinates
"""
x_values = mongoengine.ListField()
y_values = mongoengine.ListField()
@property
def shape(self):
assert self.x_values is not None and self.y_values is not None, \
"x and y values not defined for this Polygon"
return create_polygon(self.x_values, self.y_values)
def transform_to_linear(self):
"""
x,y coordinates are transformed to their equivalent value in linear space
according to the transform defined. If transform is None, coordinates
are returned as saved.
Returns
-------
numpy.ndarray, numpy.ndarray
"""
x_values, y_values = self.x_values, self.y_values
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
transformer = transform.TRANSFORMERS.get(self.transform_x)(**kwargs)
x_values = transformer.inverse_scale( | pd.DataFrame({"x": self.x_values}) | pandas.DataFrame |
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
image_props = measure.regionprops_table(temp['masks'],
intensity_image=temp['img'],
properties=('label','area','filled_area','bbox', 'centroid',
'eccentricity','solidity','convex_area',
'mean_intensity','min_intensity','max_intensity',
'orientation','major_axis_length','minor_axis_length',
'perimeter','extent','intensity_image'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def buildOffsetFrame(filename_tminus1,filename_tplus1):
temp1 = np.asarray(np.load(filename_tminus1,allow_pickle=True)).item()
temp2 = np.asarray(np.load(filename_tplus1,allow_pickle=True)).item()
image_props = measure.regionprops_table(temp1['masks'],
intensity_image=temp2['img'],
properties=('label','centroid','area',"mean_intensity"))
im_df = pd.DataFrame(image_props)
im_df['time'] = None
return(im_df)
def linkEnergy(image1, image2, im1_select, im2_select):
deltaX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'][im2_select])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'][im2_select])**2)
deltaA = np.absolute(image1['area'][im1_select] - image2['area'][im2_select])
score = deltaX + np.sqrt(deltaA)
return(score)
def generateLinks(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def ScoreMotherArea(motherArea, ProgArea_1, Prog_Area_2, threshold=0.95):
if (motherArea/(ProgArea_1 + Prog_Area_2)) > threshold :
return(0)
else:
return(10)
def ScoreMotherInt(motherCurrentInt, motherNextInt):
normInt = (motherCurrentInt/(motherNextInt + 10**-4))
if (normInt > 2):
return(-1)
elif (normInt <= 2)&(normInt > 1.4):
return(0)
else:
return(1)
def ScoreProjInt(projCurrentInt, projPrevInt):
normInt = (projCurrentInt/(projPrevInt + 10**-4))
if (normInt > 2):
return(-1)
elif (normInt <= 2)&(normInt > 1):
return(0)
else:
return(1)
def ScoreProjDiff(proj1Int, proj2Int):
return(np.abs(proj1Int - proj2Int)/2)
def ScoreDivisionTotal(motherFrameCurr, motherFrameNext,
projFrameCurr, projFramePrev,
motherCell,projCell_1, projCell_2):
motherArea = ScoreMotherArea(motherFrameCurr["area"][motherCell],
projFrameCurr["area"][projCell_1],
projFrameCurr["area"][projCell_2])
motherInt = ScoreMotherInt(motherFrameCurr["mean_intensity"][motherCell],
motherFrameNext["mean_intensity"][motherCell])
projInt = -1 + ScoreProjInt(projFrameCurr["mean_intensity"][projCell_1], projFramePrev["mean_intensity"][projCell_1]) +ScoreProjInt(projFrameCurr["mean_intensity"][projCell_2], projFramePrev["mean_intensity"][projCell_2])
projIntDiff = ScoreProjDiff(projFrameCurr["mean_intensity"][projCell_1],
projFrameCurr["mean_intensity"][projCell_2])
projAreaDiff = ScoreProjDiff(projFrameCurr["area"][projCell_1],
projFrameCurr["area"][projCell_2])
return(motherArea + motherInt + projInt + projIntDiff + projAreaDiff)
def DivisionCandidate(motherFrameCurr, motherFrameNext,
projFrameCurr, projFramePrev,
motherCell, projCell_1, projCell_2_candidates, threshold=3):
tru_vec=[]
for i in projCell_2_candidates:
if(ScoreDivisionTotal(motherFrameCurr,motherFrameNext,
projFrameCurr,projFramePrev,
motherCell,projCell_1,i) < threshold):
tru_vec=np.append(tru_vec,True)
else:
tru_vec=np.append(tru_vec,False)
return(np.any(tru_vec))
def buildConnections(filename_t0,greedy=False,openingCost=2, nnDist=3, DivScoreThreshold=12):
time0 = filename_t0.split("/")[-1].split("_")[0] ;
time1 = str(int(time0)+1) ;
tmp_filename_t1 = time1+"_"+filename_t0.split("/")[-1].split("_")[1] ;
dirs = filename_t0.split("/")[:-1] ;
filename_t1 = "/".join(dirs)+"/"+tmp_filename_t1 ;
ip0 = buildFeatureFrame(filename_t0,time0)
ip1 = buildFeatureFrame(filename_t1,time1)
fx0 = buildOffsetFrame(filename_t0,filename_t1)
fx1 = buildOffsetFrame(filename_t1,filename_t0)
num=0
arr = pd.DataFrame([]).T
for i in np.array(ip0.index):
candidates = generateLinks(ip0, ip1, i, dist_multiplier=nnDist)
for j in range(len(candidates)):
proj1 = candidates[j]
proj2pairs = np.delete(candidates,j)
if(len(proj2pairs)>0):
divscore = (DivisionCandidate(motherFrameCurr=ip0,motherFrameNext=fx0,projFrameCurr=ip1,projFramePrev=fx1,motherCell=i,projCell_1=candidates[j],projCell_2_candidates=proj2pairs,threshold=DivScoreThreshold))
else:
divscore = False
arr = arr.append(pd.DataFrame([num,i,proj1,linkEnergy(ip0, ip1, i, proj1),divscore]).T)
num=num+1
arr.columns = ['index','prev','next','score','divisionCandidate']
arr.index = arr['index']
arr.iloc[np.array(arr[arr['divisionCandidate']].index),3] = np.array(arr[arr['divisionCandidate']]['score']/2)
if(greedy ==True):
nextFeatList = np.unique(arr['next'])
nextCrop = pd.DataFrame()
for next in nextFeatList:
subarr = arr[arr['next'] == next]
new = subarr[subarr.score == subarr.score.min()]
nextCrop = nextCrop.append(new)
prevFeatList = np.unique(nextCrop['prev'])
prevCrop = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 14:28:27 2020
@author: <NAME>
"""
import difflib # 计算两个字符串相似度的
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import copy #用来深度复制
import matplotlib.ticker as mtick # 用来改变坐标抽格式
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
# 做分类汇总的函数
def pivot1(listn, version):
# csv_data[csv_data['area'].isna()]
subset = csv_data[csv_data['area'].isin(listn)]
subset['list_date_short'] = subset['list_date'].apply(str).str[0:4]
global result
result = pd.crosstab(subset.list_date_short, subset.industry, margins = True)
result.to_excel(r'D:\桌面的文件夹\实习\睿丛\output_%s.xls' %version)
return
# 统计的三个层次
list1 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '合肥 ', '马鞍山', '淮北', '宿州', '阜阳', '蚌埠', '淮南', '滁州', '六安', '巢湖', '芜湖', '亳州', '安庆', '池州', '铜陵', '宣城', '黄山', '上海', '江苏', '安徽', '浙江']
list2 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '上海', '江苏', '浙江']
list3 = ['上海']
# 导入数据
csv_file = r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\df_stock.csv'
csv_data = pd.read_csv(csv_file, low_memory = False)#防止弹出警告
print(csv_data)
csv_data.info()
csv_data.head()
csv_data.describe()
csv_data.head(50)
# 进行三个层次的分类汇总
pivot1(list1,'list1')
pivot1(list2,'list2')
pivot1(list3,'list3')
result # 查看分类汇总的结果
# 处理行业名称
# 准备好申万行业分类的数据
Tpye=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\申银万国行业分类标准 .xlsx',sheet_name='处理', header=None) # 导入行业分类
type1 = Tpye.sort_values(1, axis=0) # 按照行业编号有小到大排序
type1=type1.drop_duplicates(subset=0, keep='first', inplace=False, ignore_index=False) # 去除重复行。有些母分类和子分类是同名的,就只保留母分类。
type1=type1.rename(columns={0:'industry'}) # 给行业名称的列命名。
type1=type1.rename(columns={1:'code'}) # 给行业名称的列命名。
type1 = type1.set_index("industry") # 让行业名称成为行标签,便于后续合并
print(type1.index.is_unique) # 发现行标签没有重复的
type1
# 在最前面插入一个空列,用来保存匹配的结果
test=result.T.iloc[0:79,:] # 取消行业类型里面的“all”
col_name=test.columns.tolist() # 将数据框的列名全部提取出来存放在列表里
col_name.insert(0,'new') # 在列索引为0的位置插入一列,列名为:new,刚插入时不会有值,整列都是NaN
test=test.reindex(columns=col_name) # DataFrame.reindex() 对原行/列索引重新构建索引值
test
# 把申万分类匹配到原始分类上
test.iloc[:,0] = test.index.map(lambda x: difflib.get_close_matches(x, type1.index, cutoff=0.3,n=1)[0]) # map()就是对于一个可迭代对象中的元素,轮流执行一个function
test.head(60) # 查看匹配结果
test.iloc[61:81,:] # 查看匹配结果
test.to_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果.xls') # 导出匹配结果,手工在excel里面处理匹配不正确的项目。发现有11个需要手工调整
# 把行业名称转换为申万的命名体系。
#导入并整理
data=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果_修改后.xls', index_col = 'industry') # 重新导入匹配好分类的行业汇总
data = data.groupby(data.index).sum() # 把重复的行业进行加和。因为concat要求index不能重复。注:此时子行业和母行业是混乱出现的。
# 合并
outcome = pd.concat([data, type1], axis=1, join='inner', ignore_index=False) # 这里是按照index合并数据,可以合并object类型的。inner表示求交集,outer表示求并集。由于data里面的index是type1的子集,所以可以用inner方式。axis=1表示横向合并。
# 改行业代码
outcome['code'] = outcome['code'].apply(str).str[0:2].map(lambda x: x+'0000') # 把行业代码改成一级行业的代码,即后四位全是0
outcome['code'] = outcome['code'].astype('int64')
# 生成新的index
outcome1 = outcome.set_index('code')
outcome1 = outcome1.groupby(outcome1.index).sum()
type2 = type1.reset_index().set_index('code') # 把原来作为index的‘industry’还原成一列数据
outcome2 = | pd.concat([outcome1, type2], axis=1, join='inner', ignore_index=False) | pandas.concat |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
batsman4s("tendulkar.csv", "<NAME>")
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
df['Runs'] = pd.to_numeric(df['Runs'])
df['4s'] = pd.to_numeric(df['4s'])
df1 = df[['Runs','4s']].sort_values(by=['Runs'])
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
runs = pd.to_numeric(df1['Runs'])
x4s = pd.to_numeric(df1['4s'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# batsman6s("tendulkar.csv","<NAME>")
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsGround("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation="vertical",fontsize=8)
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsOpposition("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation="vertical",fontsize=8)
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost("tendulkarsp.csv","<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
batsmanCumulativeAverageRuns("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Aug 2019
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']=pd.to_numeric(batsman['Runs'])
a=(batsman['Runs'] > bins[i-1]) & (batsman['Runs'] <= bins[i])
df=batsman[a]
SR.append(np.mean(pd.to_numeric(df['SR']))) # Changed 28-8-2019
atitle = name + "-" + "Strike rate in run ranges"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(midBin, SR, alpha=0.5)
plt.plot(midBin, SR,color="r", alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle)
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanMovingAverage
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def batsmanMovingAverage(file,name="A Squarecut") :
'''
Calculate and plot the Moving Average of the batsman in his career
Description
This function calculates and plots the Moving Average of the batsman in his career
Usage
batsmanMovingAverage(file,name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMovingAverage("tendulkar.csv","<NAME>")
'''
# Compute the moving average of the time series
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
date= pd.to_datetime(batsman['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,runs,"-",color = '0.75')
# Compute and plot moving average
y_av = movingaverage(runs, 50)
plt.xlabel('Date')
plt.ylabel('Runs')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanPerfBoxHist
# This function makes a box plot showing the mean, median and the 25th & 75th percentile runs. The
# histogram shows the frequency of scoring runs in different run ranges
#
###########################################################################################
# Plot the batting performance as a combined box plot and histogram
def batsmanPerfBoxHist(file, name="A Hitter"):
'''
Make a boxplot and a histogram of the runs scored by the batsman
Description
Make a boxplot and histogram of the runs scored by the batsman. Plot the Mean, Median, 25th and 75th quantile
Usage
batsmanPerfBoxHist(file, name="A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsman4s("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
plt.subplot(2,1,1)
sns.boxplot(batsman['Runs'])
plt.subplot(2,1,2);
atitle = name + "'s" + " - Runs Frequency vs Runs"
plt.hist(batsman['Runs'],bins=20, edgecolor='black')
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle,size=16)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from statsmodels.tsa.arima_model import ARIMA
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: batsmanPerfForecast
# This function forecasts the batsmans performance based on past performance -
# To update
###########################################################################################
def batsmanPerfForecast(file, name="A Squarecut"):
'''
# To do: Currently ARIMA is used.
Forecast the batting performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the batsman based on past performances using HoltWinters forecasting model
Usage
batsmanPerfForecast(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanPerfForecast("tendulkar.csv","<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=batsman['Runs'].astype('float')
# Fit a ARIMA model
date= pd.to_datetime(batsman['Start Date'])
df=pd.DataFrame({'date':date,'runs':runs})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
plt.gcf().clear()
print(residuals.describe())
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanPerfHomeAway
# This plots the batsman's performance in home versus abroad
#
###########################################################################################
def batsmanPerfHomeAway(file,name="A Hitter"):
'''
This function analyses the performance of the batsman at home and overseas
Description
This function plots the runs scored by the batsman at home and overseas
Usage
batsmanPerfHomeAway(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist bowlerContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarSp <-getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanPerfHomeAway("tendulkarsp.csv","<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create separate DFs for home and away
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Runs']= pd.to_numeric(df['Runs'])
atitle = name + "-" + "- - Runs-Home & overseas"
ax = sns.boxplot(x='venue',y='Runs',data=df)
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 30 Jun 2015
# Function: batsmanRunsFreqPerf
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Plot the performance of the batsman as a continous graph
# Create a performance plot between Runs and RunsFrequency
def batsmanRunsFreqPerf(file, name="A Hookshot"):
'''
Calculate and run frequencies in ranges of 10 runs and plot versus Runs the performance of the batsman
Description
This function calculates frequencies of runs in 10 run buckets and plots this percentage
Usage
batsmanRunsFreqPerf(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsFreqPerf("tendulkar.csv","<NAME>")
'''
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(df['Runs'])
# Plot histogram
runs.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Runs histogram"
plt.title(atitle)
plt.xlabel('Runs')
plt.grid(axis='y', alpha=0.75)
plt.text(180, 90,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanRunsLikelihood
# This function used K-Means to compute and plot the runs likelihood for the batsman
# To do - Include scatterplot
###########################################################################################
def batsmanRunsLikelihood(file, name="A Squarecut") :
'''
This function uses K-Means to determine the likelihood of the batsman to get runs
Description
This function used K-Means to get the likelihood of getting runs based on clusters of runs the batsman made in the past.It uses K-Means for this.
Usage
batsmanRunsLikelihood(file, name = "A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsLikelihood("tendulkar.csv","Sachin Tendulkar")
'''
batsman =clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
data = batsman[['Runs','BF','Mins']]
# Create 3 different clusters
kmeans = KMeans(n_clusters=3,max_iter=300)
# Compute the clusters
kmeans.fit(data)
y_kmeans = kmeans.predict(data)
# Get the cluster centroids
centers = kmeans.cluster_centers_
centers
# Add a title
atitle= name + '-' + "Runs Likelihood"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Draw vertical line 1st centroid
x=[centers[0][0],centers[0][0]]
y=[centers[0][1],centers[0][1]]
z=[0,centers[0][2]]
ax.plot(x,y,z,'k-',color='r',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[1][0],centers[1][0]]
y=[centers[1][1],centers[1][1]]
z=[0,centers[1][2]]
ax.plot(x,y,z,'k-',color='b',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[2][0],centers[2][0]]
y=[centers[2][1],centers[2][1]]
z=[0,centers[2][2]]
ax.plot(x,y,z,'k-',color='k',alpha=0.8, linewidth=2)
ax.set_xlabel('BallsFaced')
ax.set_ylabel('Minutes')
ax.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
from sklearn.linear_model import LinearRegression
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanRunsPredict
# This function predicts the runs that will be scored by the batsman for a given numbers
# of balls faced and minutes at crease
#
###########################################################################################
def batsmanRunsPredict(file, newDF, name="A Coverdrive"):
'''
Predict the runs for the batsman given the Balls Faced and Minutes in crease
Description
Fit a linear regression plane between Runs scored and Minutes in Crease and Balls Faced. This will be used to predict the batsman runs for time in crease and balls faced
Usage
batsmanRunsPredict(file, name="A Coverdrive", newdataframe)
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
newdataframe
This is a data frame with 2 columns BF(Balls Faced) and Mins(Minutes)
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns a data frame with the predicted runs for the Balls Faced and Minutes at crease
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting",
# homeOrAway=c(1,2), result=c(1,2,4))
# Use a single value for BF and Mins
BF= 30
Mins= 20
BF = np.linspace( 10, 400,15)
Mins = np.linspace(30,220,15)
newDF= pd.DataFrame({'BF':BF,'Mins':Mins}
# retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "tendulkar.csv", package = "cricketr")
batsmanRunsPredict("tendulkar.csv",newDF, "<NAME>")
'''
batsman = clean(file)
df=batsman[['BF','Mins','Runs']]
df['BF']=pd.to_numeric(df['BF'])
df['Runs']=pd.to_numeric(df['Runs'])
xtrain=df.iloc[:,0:2]
ytrain=df.iloc[:,2]
linreg = LinearRegression().fit(xtrain, ytrain)
newDF['Runs']=linreg.predict(newDF)
return(newDF)
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanRunsRanges
# This plots the percentage runs in different run ranges
#
###########################################################################################
def batsmanRunsRanges(file, name= "A Hookshot") :
'''
Compute and plot a histogram of the runs scored in ranges of 10
Description
Compute and plot a histogram of the runs scored in ranges of 10
Usage
batsmanRunsRanges(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsRanges(pathToFile,"<NAME>")
'''
# Clean file
batsman = clean(file)
runs= pd.to_numeric(batsman['Runs'])
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Compute binWidth. Subtract '2' to separate the bars
binWidth=bins[1]-bins[0]-2
# Plot a barplot
plt.bar(midBin, hist, bins[1]-bins[0]-2, color="blue")
plt.xlabel('Run ranges')
plt.ylabel('Frequency')
# Add a title
atitle= name + '-' + "Runs % vs Run frequencies"
plt.title(atitle)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: battingPerf3d
# This function creates a 3D scatter plot of Runs scored vs Balls Faced and Minutes in crease.
# A regression plane is fitted to this.
#
###########################################################################################
def battingPerf3d(file, name="A Hookshot") :
'''
Make a 3D scatter plot of the Runs scored versus the Balls Faced and Minutes at Crease.
Description
Make a 3D plot of the Runs scored by batsman vs Minutes in crease and Balls faced. Fit a linear regression plane
Usage
battingPerf3d(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar<- getPlayerData(35320,file="tendulkar.csv",type="batting",
#homeOrAway=[1,2],result=[1,2,4])
battingPerf3d("tendulkar.csv","<NAME>")
'''
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
batsman = clean(file)
# Make a 3 D plot and fit a regression plane
atitle = name + "- Runs vs BallsFaced & Minutes"
df2=batsman[['BF','Mins','Runs']]
df2['BF']=pd.to_numeric(df2['BF'])
df2['Mins']=pd.to_numeric(df2['Mins'])
df2['Runs']=pd.to_numeric(df2['Runs'])
X=df2.iloc[:,0:2]
Y=df2.iloc[:,2]
# Fit a Regression place
linreg = LinearRegression().fit(X,Y)
bf= np.linspace(0,400,20)
mins=np.linspace(0,620,20)
xx, yy = np.meshgrid(bf,mins)
xx1=xx.reshape(-1)
yy1=yy.reshape(-1)
test=pd.DataFrame({"BallsFaced": xx1, "Minutes":yy1})
predictedRuns=linreg.predict(test).reshape(20,20)
plt3d = plt.figure().gca(projection='3d')
plt3d.scatter(df2['BF'],df2['Mins'],df2['Runs'])
plt3d.plot_surface(xx.reshape(20,20),yy,predictedRuns, alpha=0.2)
plt3d.set_xlabel('BallsFaced')
plt3d.set_ylabel('Minutes')
plt3d.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append number of matches to Ground
###########################################################################################
def bowlerAvgWktsGround(file, name="A Chinaman"):
'''
This function computes and plot the average wickets in different ground
Description
This function computes the average wickets taken against different grounds by the bowler. It also shows the number innings at each venue
Usage
bowlerAvgWktsGround(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsGround("kumble.csv","<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Wkts','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets at Ground"
plt.xticks(rotation="vertical",fontsize=8)
plt.axhline(y=4, color='r', linestyle=':')
plt.title(atitle)
ax=sns.barplot(x='Ground', y="Wkts_mean", data=df1)
#plt.bar(df1['Ground'],df1['Wkts_mean'])
plt.text(15, 4,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsOpposition
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append no of matches in Opposition
###########################################################################################
def bowlerAvgWktsOpposition(file, name="A Chinaman"):
'''
This function computes and plot the average wickets against different oppositon
Description
This function computes the average wickets taken against different opposition by the bowler. It also shows the number innings against each opposition
Usage
bowlerAvgWktsOpposition(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerAvgWktsGround
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsOpposition("kumble.csv","Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Opposition','Wkts']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets vs Opposition"
plt.xticks(rotation="vertical",fontsize=8)
plt.axhline(y=3, color='r', linestyle=':')
ax=sns.barplot(x='Opposition', y="Wkts_mean", data=df1)
plt.title(atitle)
plt.text(2, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerContributionWonLost
# This plots the bowler's contribution to won and lost matches
#
###########################################################################################
def bowlerContributionWonLost(file,name="A Doosra"):
'''
Display the bowler's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the bowler in matches that were won and lost as box plots
Usage
bowlerContributionWonLost(file, name = "A Doosra")
Arguments
file
CSV file of bowler from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerContributionWonLost("kumblesp.csv","<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create DFs for won and lost/drawn
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack DFs
df= pd.concat([won,lost])
df['Wkts']= pd.to_numeric(df['Wkts'])
ax = sns.boxplot(x='status',y='Wkts',data=df)
atitle = name + "-" + "- Wickets in games won/lost-drawn"
plt.xlabel('Status')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgEconRate
# This function computes and plots the cumulative average economy rate of a bowler
#
###########################################################################################
def bowlerCumulativeAvgEconRate(file,name="A Googly"):
'''
Bowler's cumulative average economy rate
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgEconRate("kumble.csv","<NAME>")
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
economyRate=pd.to_numeric(bowler['Econ'])
cumEconomyRate = economyRate.cumsum()/pd.Series(np.arange(1, len(economyRate)+1), economyRate.index)
atitle = name + "- Cumulative Economy Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Economy Rate')
plt.title(atitle)
plt.plot(cumEconomyRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgWickets
# This function computes and plots the cumulative average wickets of a bowler
#
###########################################################################################
def bowlerCumulativeAvgWickets(file,name="A Googly"):
'''
Bowler's cumulative average wickets
Description
This function computes and plots the cumulative average wickets of a bowler
Usage
bowlerCumulativeAvgWickets(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgWickets("kumble.csv","<NAME>")
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wktRate=pd.to_numeric(bowler['Wkts'])
cumWktRate = wktRate.cumsum()/pd.Series(np.arange(1, len(wktRate)+1), wktRate.index)
atitle = name + "- Cumulative Mean Wicket Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Mean Wickets')
plt.title(atitle)
plt.plot(cumWktRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerEconRate
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerEconRate(file, name="A Bowler") :
'''
Compute and plot the Mean Economy Rate versus wickets taken
Description
This function computes the mean economy rate for the wickets taken and plot this
Usage
bowlerEconRate(file, name = "A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble <- getPlayerData(30176,dir=".", file="kumble.csv",type="batting",
# homeOrAway=[1,2],result=[1,2,4])
bowlerEconRate("kumble.csv","Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
bowler['Econ']=pd.to_numeric(bowler['Econ'])
atitle = name + "-" + "- Mean economy rate vs Wkts"
df=bowler[['Wkts','Econ']].groupby('Wkts').mean()
df = df.reset_index(inplace=False)
ax=plt.plot('Wkts','Econ',data=df)
plt.xlabel('Wickets')
plt.ylabel('Economy Rate')
plt.title(atitle)
plt.text(6, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerMovingAverage
# This function computes and plots the Moving Average of the Wickets taken for a bowler
# across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def bowlerMovingAverage(file,name="A Doosra") :
'''
Compute and plot the moving average of the wickets taken for a bowler
Description
This function plots the wickets taken by a bowler as a time series and plots the moving average over the career
Usage
bowlerMovingAverage(file, name = "A Doosra")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble = getPlayerData(30176,file="kumble.csv",type="bowling", homeOrAway=[1,2],result=[1,2,4])
bowlerMovingAverage("kumble.csv","Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=pd.to_numeric(bowler['Wkts'])
date= pd.to_datetime(bowler['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,wkts,"-",color = '0.75')
y_av = movingaverage(wkts, 50)
plt.xlabel('Date')
plt.ylabel('Wickets')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: bowlerPerfForecast
# This function forecasts the bowler's performance based on past performance
#
###########################################################################################
def bowlerPerfForecast(file, name="A Googly"):
'''
# To do- Currently based on ARIMA
Forecast the bowler performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the bowler based on past performances using HoltWinters forecasting model
Usage
bowlerPerfForecast(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerEconRate, bowlerMovingAverage, bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble = getPlayerData(30176,file="kumble.csv",type="bowling", homeOrAway=[1,2],result=[1,2,4])
bowlerPerfForecast("kumble.csv","Anil Kumble")
'''
bowler= cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=bowler['Wkts'].astype('float')
date= pd.to_datetime(bowler['Start Date'])
df=pd.DataFrame({'date':date,'Wickets':wkts})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
residuals.plot(kind='kde')
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
plt.gcf().clear()
print(residuals.describe())
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerPerfHomeAway
# This plots the bowler's performance home and abroad
#
###########################################################################################
def bowlerPerfHomeAway(file,name="A Googly") :
'''
This function analyses the performance of the bowler at home and overseas
Description
This function plots the Wickets taken by the batsman at home and overseas
Usage
bowlerPerfHomeAway(file, name = "A Googly")
Arguments
file
CSV file of the bowler from ESPN Cricinfo (for e.g. Kumble's profile no:30176)
name
Name of bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumblesp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerPerfHomeAway(kumblesp.csv,"Anil Kumble")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
#
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Wkts']= pd.to_numeric(df['Wkts'])
atitle = name + "-" + "- - Wickets-Home & overseas"
ax = sns.boxplot(x='venue',y='Wkts',data=df)
plt.xlabel('Venue')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsFreqPercent
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerWktsFreqPercent(file, name="A Bowler"):
'''
Plot the Wickets Frequency as a percentage against wickets taken
Description
This function calculates the Wickets frequency as a percentage of total wickets taken and plots this agains the wickets taken.
Usage
bowlerWktsFreqPercent(file, name="A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a =getPlayerData(30176,file="kumble.csv",type="bowling", homeOrAway=[1,2],result=[1,2,4])
bowlerWktsFreqPercent("kumble.csv","Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a table of wickets
wkts = pd.to_numeric(bowler['Wkts'])
wkts.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Wickets histogram"
plt.title(atitle)
plt.xlabel('Wickets')
plt.grid(axis='y', alpha=0.75)
plt.text(5,10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsRunsPlot
# This function makes boxplot of Wickets versus Runs concded
###########################################################################################
def bowlerWktsRunsPlot(file, name="A Googly"):
'''
Compute and plot the runs conceded versus the wickets taken
Description
This function creates boxplots on the runs conceded for wickets taken for the bowler
Usage
bowlerWktsRunsPlot(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerHistWickets
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble =getPlayerData(30176,file="kumble.csv",type="bowling", homeOrAway=[1,2],result=[1,2,4])
bowlerWktsRunsPlot("kumble.csv","Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
atitle = name + "- Wickets vs Runs conceded"
ax = sns.boxplot(x='Wkts', y='Runs', data=bowler)
plt.title(atitle)
plt.xlabel('Wickets')
plt.show()
plt.gcf().clear()
return
import pandas as pd
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Aug 2019
# Function : clean
# This function cleans the batsman's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def clean(batsmanCSV):
'''
Create a batsman data frame given the batsman's CSV file
Description
The function removes rows from the batsman dataframe where the batsman did not bat (DNB) or the team did not bat (TDNB). COnverts not outs '*' (97*, 128*) to 97,128 by stripping the '*' character. It picks all the complete cases and returns the data frame
Usage
clean(file)
Arguments
file
CSV file with the batsman data obtained with getPlayerData
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the cleaned batsman dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html https://gigadom.wordpress.com/
See Also
cleanBowlerData getPlayerData batsman4s batsmanMovingAverage
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
clean(pathToFile)
'''
df = pd.read_csv(batsmanCSV,dtype=str,na_values=['-'])
a = df['Runs'] != "DNB"
batsman = df[a]
# Remove rows with 'TDNB'
c =batsman['Runs'] != "TDNB"
batsman = batsman[c]
# Remove rows with absent
d = batsman['Runs'] != "absent"
batsman = batsman[d]
# Remove the "* indicating not out
batsman['Runs']= batsman['Runs'].str.replace(r"[*]","")
# Fix the Opposition column, remove "^ v"
batsman['Opposition'] =batsman['Opposition'].str.replace("v ","")
# Drop rows which have NA
batsman = batsman.dropna()
#Return the data frame
return(batsman)
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Aug 2019
# Function : cleanBowlerData
# This function cleans the bowler's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def cleanBowlerData(file):
'''
Clean the bowlers data frame
Description
Clean the bowler's CSV fileand remove rows DNB(Did not bowl) & TDNB (Team did not bowl). Also normalize all 8 ball over to a 6 ball over for earlier bowlers
Usage
cleanBowlerData(file)
Arguments
file
The <bowler>.csv file
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
A cleaned bowler data frame with complete cases
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
clean
Examples
# Get bowling data and store in file for future
# kumble <- getPlayerData(30176,dir="./mytest", file="kumble.csv",type="bowling",
# homeOrAway=[1],result=[1,2])
cleanBowlerData(pathToFile)
'''
# Read the <bowler>.csv file
df = pd.read_csv(file,dtype=str,na_values=['-'])
# Remove rows with did not bowl
a = df['Overs']!= "DNB"
df = df[a]
# Remove rows with 'TDNB' - team did not bowl
c =df['Overs'] != "TDNB"
df = df[c]
# Fix the Opposition column, remove "^ v"
df['Opposition'] =df['Opposition'].str.replace("v ","")
# Get all complete cases
bowlerComplete = df.dropna(axis=1)
# Normalize overs which had 8 balls per over to the number of overs if there 8 balls per over
if bowlerComplete.columns[2] =="BPO":
bowlerComplete['Overs'] = pd.to_numeric(bowlerComplete['Overs']) *8/6
return(bowlerComplete)
import pandas as pd
import os
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function : getPlayerData
# This function gets the data of batsman/bowler and returns the data frame. This data frame can
# stored for use in other functions
##########################################################################################
def getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",type="batting",
homeOrAway=[1,2],result=[1,2,4],create=True) :
'''
Get the player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
Description
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
Usage
getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
Arguments
profile
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For <NAME> this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
opposition
The numerical value of the opposition country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
host
The numerical value of the host country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
dir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./data"
file
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
type
type of data required. This can be "batting" or "bowling"
homeOrAway
This is a list with either 1,2 or both. 1 is for home 2 is for away
result
This is a list that can take values 1,2,4. 1 - won match 2- lost match 4- draw
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerDataSp
Examples
## Not run:
# Both home and away. Result = won,lost and drawn
tendulkar = getPlayerData(35320,dir=".", file="tendulkar1.csv",
type="batting", homeOrAway=[1,2],result=[1,2,4])
# Only away. Get data only for won and lost innings
tendulkar = getPlayerData(35320,dir=".", file="tendulkar2.csv",
type="batting",homeOrAway=[2],result=[1,2])
# Get bowling data and store in file for future
kumble = getPlayerData(30176,dir=".",file="kumble1.csv",
type="bowling",homeOrAway=[1],result=[1,2])
#Get the Tendulkar's Performance against Australia in Australia
tendulkar = getPlayerData(35320, opposition = 2,host=2,dir=".",
file="tendulkarVsAusInAus.csv",type="batting")
'''
# Initial url to ""
url =""
suburl1 = "http://stats.espncricinfo.com/ci/engine/player/"
suburl2 ="?class=1;"
suburl3 = "template=results;"
suburl4 = "view=innings"
#Set opposition
theOpposition = "opposition=" + opposition + ";"
# Set host country
hostCountry = "host=" + host + ";"
# Create a profile.html with the profile number
player = str(profile) + ".html"
# Set the home or away
str1=str2=""
#print(len(homeOrAway))
for i in homeOrAway:
if i == 1:
str1 = str1 + "home_or_away=1;"
elif i == 2:
str1 = str1 + "home_or_away=2;"
elif i == 3:
str1 = str1 + "home_or_away=3;"
HA= str1
# Set the type batting or bowling
t = "type=" + type + ";"
# Set the result based on input
str2=""
for i in result:
if i == 1:
str2 = str2+ "result=1;"
elif i == 2:
str2 = str2 + "result=2;"
elif i == 4:
str2 = str2 + "result=4;"
result = str2
# Create composite URL
url = suburl1 + player + suburl2 + hostCountry + theOpposition + HA + result + suburl3 + t + suburl4
#print(url)
# Read the data from ESPN Cricinfo
dfList= pd.read_html(url)
# Choose appropriate table from list of returned tables
df=dfList[3]
colnames= df.columns
# Select coiumns based on batting or bowling
if type=="batting" :
# Select columns [1:9,11,12,13]
cols = list(range(0,9))
cols.extend([10,11,12])
elif type=="bowling":
# Check if there are the older version of 8 balls per over (BPO) column
# [1:8,10,11,12]
# Select BPO column for older bowlers
if colnames[1] =="BPO":
# [1:8,10,11,12]
cols = list(range(0,9))
cols.extend([10,11,12])
else:
# Select colu mns [1:7,9,10,11]
cols = list(range(0,8))
cols.extend([8,9,10])
#Subset the necessary columns
df1 = df.iloc[:, cols]
if not os.path.exists(dir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(dir,file)
if create:
# Write to file
df1.to_csv(path)
# Return the data frame
return(df1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: getPlayerDataSp
# This function is a specialized version of getPlayer Data. This function gets the players data
# along with details on matches' venue( home/abroad) and the result (won,lost,drawn) as
# 2 separate columns
#
###########################################################################################
def getPlayerDataSp(profileNo,tdir="./data",tfile="player001.csv",ttype="batting"):
'''
Get the player data along with venue and result status
Description
This function is a specialized version of getPlayer Data. This function gets the players data along with details on matches' venue (home/abroad) and the result of match(won,lost,drawn) as 2 separate columns (ha & result). The column ha has 1:home and 2: overseas. The column result has values 1:won , 2;lost and :drawn match
Usage
getPlayerDataSp(profileNo, tdir = "./data", tfile = "player001.csv",
ttype = "batting")
Arguments
profileNo
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For <NAME> this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
tdir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./tdata"
tfile
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
ttype
type of data required. This can be "batting" or "bowling"
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe along with the homeAway and the result columns
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerData
Examples
## Not run:
# Only away. Get data only for won and lost innings
tendulkar = getPlayerDataSp(35320,tdir="..", tfile="tendulkarsp.csv",ttype="batting")
# Get bowling data and store in file for future
kumble = getPlayerDataSp(30176,tdir="..",tfile="kumblesp.csv",ttype="bowling")
## End(Not run)
'''
# Get the data for the player i
# Home & won
hw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[1],type=ttype,create=False)
# Home & lost
hl = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[2],type=ttype,create=False)
# Home & drawn
hd = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[4],type=ttype,create=False)
# Away and won
aw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[1],type=ttype,create=False)
#Away and lost
al = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[2],type=ttype,create=False)
# Away and drawn
ad = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[4],type=ttype,create=False)
# Set the values as follows
# ha := home = 1, away =2
# result= won = 1, lost = 2, drawn=4
hw['ha'] = 1
hw['result'] = 1
hl['ha'] = 1
hl['result'] = 2
hd['ha'] = 1
hd['result'] = 4
aw['ha'] = 2
aw['result'] = 1
al['ha'] = 2
al['result'] = 2
ad['ha'] = 2
ad['result'] = 4
if not os.path.exists(tdir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(tdir,tfile)
df= pd.concat([hw,hl,hd,aw,al,ad])
# Write to file
df.to_csv(path,index=False)
return(df)
import pandas as pd
import os
##########################################################################################
# Designed and developed by <NAME>
# Date : 7 Oct 2018
# Function : getPlayerDataOD
# This function gets the One Day data of batsman/bowler and returns the data frame. This data frame can
# stored for use in other functions
##########################################################################################
def getPlayerDataOD(profile,opposition="",host="",dir="./data",file="player001.csv",type="batting",
homeOrAway=[1,2,3],result=[1,2,3,5],create=True) :
'''
Get the One day player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
Description
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
Usage
getPlayerDataOD(profile, opposition="",host="",dir = "../", file = "player001.csv",
type = "batting", homeOrAway = c(1, 2, 3), result = c(1, 2, 3,5))
Arguments
profile
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For <NAME> this turns out to be http://www.espncricinfo.com/india/content/player/35263.html. Hence the profile for Sehwag is 35263
opposition The numerical value of the opposition country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,Bermuda:12, England:1,Hong Kong:19,India:6,Ireland:29, Netherlands:15,New Zealand:5,Pakistan:7,Scotland:30,South Africa:3,Sri Lanka:8,United Arab Emirates:27, West Indies:4, Zimbabwe:9; Africa XI:405 Note: If no value is entered for opposition then all teams are considered
host The numerical value of the host country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,Ireland:29,Malaysia:16,New Zealand:5,Pakistan:7, Scotland:30,South Africa:3,Sri Lanka:8,United Arab Emirates:27,West Indies:4, Zimbabwe:9 Note: If no value is entered for host then all host countries are considered
dir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "../data". Default="../data"
file
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
type
type of data required. This can be "batting" or "bowling"
homeOrAway
This is vector with either or all 1,2, 3. 1 is for home 2 is for away, 3 is for neutral venue
result
This is a vector that can take values 1,2,3,5. 1 - won match 2- lost match 3-tied 5- no result
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerDataSp getPlayerData
Examples
## Not run:
# Both home and away. Result = won,lost and drawn
sehwag =getPlayerDataOD(35263,dir="../cricketr/data", file="sehwag1.csv",
type="batting", homeOrAway=[1,2],result=[1,2,3,4])
# Only away. Get data only for won and lost innings
sehwag = getPlayerDataOD(35263,dir="../cricketr/data", file="sehwag2.csv",
type="batting",homeOrAway=[2],result=[1,2])
# Get bowling data and store in file for future
malinga = getPlayerData(49758,dir="../cricketr/data",file="malinga1.csv",
type="bowling")
# Get Dhoni's ODI record in Australia against Australua
dhoni = getPlayerDataOD(28081,opposition = 2,host=2,dir=".",
file="dhoniVsAusinAusOD",type="batting")
## End(Not run)
'''
# Initial url to ""
url =""
suburl1 = "http://stats.espncricinfo.com/ci/engine/player/"
suburl2 ="?class=2;"
suburl3 = "template=results;"
suburl4 = "view=innings"
#Set opposition
theOpposition = "opposition=" + opposition + ";"
# Set host country
hostCountry = "host=" + host + ";"
# Create a profile.html with the profile number
player = str(profile) + ".html"
# Set the home or away
str1=str2=""
#print(len(homeOrAway))
for i in homeOrAway:
if i == 1:
str1 = str1 + "home_or_away=1;"
elif i == 2:
str1 = str1 + "home_or_away=2;"
elif i == 3:
str1 = str1 + "home_or_away=3;"
HA= str1
# Set the type batting or bowling
t = "type=" + type + ";"
# Set the result based on input
str2=""
for i in result:
if i == 1:
str2 = str2+ "result=1;"
elif i == 2:
str2 = str2 + "result=2;"
elif i == 3:
str2 = str2 + "result=3;"
elif i == 5:
str2 = str2 + "result=5;"
result = str2
# Create composite URL
url = suburl1 + player + suburl2 + hostCountry + theOpposition + HA + result + suburl3 + t + suburl4
#print(url)
# Read the data from ESPN Cricinfo
dfList= pd.read_html(url)
# Choose appropriate table from list of returned tables
df=dfList[3]
colnames= df.columns
# Select coiumns based on batting or bowling
if type=="batting" :
# Select columns [1:9,11,12,13]
cols = list(range(0,9))
cols.extend([10,11,12])
elif type=="bowling":
# Check if there are the older version of 8 balls per over (BPO) column
# [1:8,10,11,12]
# Select BPO column for older bowlers
if colnames[1] =="BPO":
# [1:8,10,11,12]
cols = list(range(0,9))
cols.extend([10,11,12])
else:
# Select columns [1:7,9,10,11]
cols = list(range(0,8))
cols.extend([8,9,10])
#Subset the necessary columns
df1 = df.iloc[:, cols]
if not os.path.exists(dir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(dir,file)
if create:
# Write to file
df1.to_csv(path)
# Return the data frame
return(df1)
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeAvgRuns(filelist, names):
'''
Relative batsman's cumulative average runs
Description
This function computes and plots the relative cumulative average runs of batsmen
Usage
relativeBatsmanCumulativeAvgRuns(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeStrikeRate relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
df=clean(file)
runs=pd.to_numeric(df['Runs'])
df1[names[idx]] = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Runs')
plt.title('Relative batsmen cumulative average runs')
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeStrikeRate (filelist, names):
'''
Relative batsmen cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of batsmen
Usage
relativeBatsmanCumulativeStrikeRate(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
df=clean(file)
strikeRate=pd.to_numeric(df['SR'])
df1[names[idx]] = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title('Relative batsmen cumulative strike rate')
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBowlerCumulativeAvgEconRate
# This function computes and plots the relativecumulative average economy rates bowlers
#
###########################################################################################
def relativeBowlerCumulativeAvgEconRate(filelist, names):
'''
Relative Bowler's cumulative average economy rate
Description
This function computes and plots the relative cumulative average economy rate of bowlers
Usage
relativeBowlerCumulativeAvgEconRate(frames, names)
Arguments
frames
This is a list of <bowler>.csv files obtained with an initial getPlayerData()
names
A list of Twenty20 bowlers names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgWickets relativeBatsmanCumulativeStrikeRate
Examples
frames = ["kumble.csv","warne.csv","murali.csv"]
names = ["Kumble","Warne","Murali"]
relativeBowlerCumulativeAvgEconRate(frames,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
#print(idx)
#print(file)
bowler = cleanBowlerData(file)
economyRate=pd.to_numeric(bowler['Econ'])
df1[names[idx]]= economyRate.cumsum()/pd.Series(np.arange(1, len(economyRate)+1), economyRate.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Econmy Rate')
plt.title('Relative Cumulative Average Economy Rate')
plt.text(150, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBowlerCumulativeAvgWickets
# This function computes and plots the relative cumulative average wickets of bowlers
#
###########################################################################################
def relativeBowlerCumulativeAvgWickets(filelist, names):
'''
Relative bowlers cumulative average wickets
Description
This function computes and plots the relative cumulative average wickets of a bowler
Usage
relativeBowlerCumulativeAvgWickets(frames, names)
Arguments
frames
This is a list of <bowler>.csv files obtained with an initial getPlayerData()
names
A list of Twenty20 bowlers names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgEconRate relativeBatsmanCumulativeStrikeRate
Examples
## Not run: )
# Retrieve the file path of a data file installed with cricketr
frames = ["kumble.csv","warne.csv","murali.csv"]
names = ["Kumble","Warne","Murali"]
relativeBowlerCumulativeAvgEconRate(frames,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
bowler = cleanBowlerData(file)
wkts=pd.to_numeric(bowler['Wkts'])
df1[names[idx]]= wkts.cumsum()/pd.Series(np.arange(1, len(wkts)+1), wkts.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Wicket Rate')
plt.title('Relative Cumulative Average Wicket Rate')
plt.text(150, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBowlingER
# This function computes and plots the relative bowling Economy Rate of the bowlers
#
###########################################################################################
def relativeBowlingER(filelist, names):
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
bowler = cleanBowlerData(file)
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
bowler['Econ']=pd.to_numeric(bowler['Econ'])
df=bowler[['Wkts','Econ']].groupby('Wkts').mean()
df1[names[idx]]=bowler[['Wkts','Econ']].groupby('Wkts').mean()
df1.plot()
plt.xlabel('Wickets')
plt.ylabel('Economy Rate')
plt.title("Relative Bowling Economy Rate vs Wickets")
plt.text(5, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 10 Oct 2018
# Function: batsmanScoringRateODTT
# This function computes and plots the batsman scoring rate of a One Day batsman
# or a Twenty20 batsman
#
###########################################################################################
def batsmanScoringRateODTT(file, name="A Hookshot"):
'''
Compute and plot the predicted scoring rate for a One day batsman or Twenty20
Description
This function computes and plots a 2nd order polynomial between the balls faced and runs scored for ODI or Twenty20
Usage
batsmanScoringRateODTT(file, name = "A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerDataOD() or getPlayerTT()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s relativeBatsmanSRODTT relativeRunsFreqPerfODTT
Examples
# Get or use the <batsman>.csv obtained with getPlayerDataOD() or or getPlayerTT()
#sehwag =-getPlayerDataOD(35263,dir="./mytest", file="sehwag.csv",type="batting",
# homeOrAway=c(1,2,3),result=c(1,2,3,5))
# Retrieve the file path of a data file installed with cricketr
batsmanScoringRateODTT("sehwag.csv","Sehwag")
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
df['BF'] = pd.to_numeric(df['BF'])
df['Runs'] = pd.to_numeric(df['Runs'])
df1 = df[['BF','Runs']].sort_values(by=['BF'])
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
bf = pd.to_numeric(df1['BF'])
runs = pd.to_numeric(df1['Runs'])
atitle = name + "-" + "Balls Faced vs Runs scored"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(bf,runs, alpha=0.5)
plt.xlabel('Balls Faced')
plt.ylabel('Runs')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
bfPoly = poly.fit_transform(bf.reshape(-1,1))
linreg = LinearRegression().fit(bfPoly,runs)
plt.plot(bf,linreg.predict(bfPoly),'-r')
# Predict the number of runs for 50 balls faced
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of runs for 100 balls faced
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 10 Nov 2018
# Function: batsman4s6s
# This function computes and plots the percent of 4s,6s in total runs
#
###########################################################################################
def batsman4s6s(frames, names) :
'''
Compute and plot a stacked barplot of runs,4s and 6s
Description
Compute and plot a stacked barplot of percentages of runs in (1s,2s and 3s),4s and 6s
Usage
batsman4s6s(frames, names)
Arguments
frames
List of batsman
names
Names of batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanScoringRateODTT, relativeRunsFreqPerfODTT, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerDataOD()
frames = ["./sehwag.csv","./devilliers.csv","./gayle.csv"]
names = ["Sehwag","<NAME>","Gayle"]
batsman4s6s(frames,names)
'''
df2=pd.DataFrame()
for file in frames:
df = clean(file)
runs = pd.to_numeric(df['Runs']).sum()
x4s = (pd.to_numeric(df['4s']) * 4).sum()
x6s = (pd.to_numeric(df['6s']) * 6).sum()
# Find numbers of runs from 1,2 and 3s
runs = runs - (x4s +x6s)
a=[runs,x4s,x6s]
df1= | pd.DataFrame(a) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = | DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7]) | pandas.DataFrame |
# flask 서버
import sys
import os
import dateutil.relativedelta
from flask import Flask,request,Response
from multiprocessing import Process
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import json
from functools import wraps
import mpld3
# koapy
from koapy import KiwoomOpenApiPlusEntrypoint, KiwoomOpenApiPlusTrInfo
from pandas import Timestamp
import matplotlib.pyplot as plt
import pandas as pd
from exchange_calendars import get_calendar
# DB
from DataBase.SqliteDB import StockDB
# Custom
from datetime import datetime
import logging
# Telegram
import telepot
if not os.path.exists('log'):
os.mkdir('log')
fh = logging.FileHandler(filename=os.path.join('log', '{:%Y-%m-%d}.log'.format(datetime.now())),
encoding="utf-8")
format = '[%(asctime)s] I %(filename)s | %(name)s-%(funcName)s-%(lineno)04d I %(levelname)-8s > %(message)s'
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
logging.basicConfig(format=format, handlers=[fh, sh], level=logging.DEBUG)
########### init ###########
app = Flask(__name__)
server = Process()
stock_db = StockDB()
# 1. 엔트리포인트 객체 생성
entrypoint = KiwoomOpenApiPlusEntrypoint()
# 2. 로그인
print('Logging in...')
entrypoint.EnsureConnected()
logging.info('Logged in.')
base_account = entrypoint.GetAccountList()[0]
# 3. kospi/kosdaq 종목리스트 저장
# 종목 리스트 확인 (기본 함수 호출 예시)
print('Getting stock codes and names...')
codes = entrypoint.GetKospiCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kospi = dict(zip(names, codes))
names_by_codes_dict_kospi = dict(zip(codes, names))
codes = entrypoint.GetKosdaqCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kosdaq = dict(zip(names, codes))
names_by_codes_dict_kosdaq = dict(zip(codes, names))
logging.info('End stock codes and names...')
# 6.주문처리
krx_calendar = get_calendar('XKRX')
# 7.telegram 등록
def getToken():
f = open("telebot.txt")
token = f.readline().strip()
userid = f.readline().strip()
f.close()
return (token , userid)
(token, chat_id) = getToken()
bot = telepot.Bot(token)
def as_json(f):
@wraps(f)
def decorated_function(*args, **kwargs):
res = f(*args, **kwargs)
res = json.dumps(res, ensure_ascii=False, indent=4).encode('utf8')
return Response(res, content_type='application/json; charset=utf-8')
return decorated_function
@app.route('/')
def home():
# 접속 상태 확인 (기본 함수 호출 예시)
print('Checking connection status...')
status = entrypoint.GetConnectState()
print('Connection status: %s', status)
return 'Kiwoom Bridge Made By Dotz'
@app.route('/disconnect', methods=['GET'])
def disconnect():
# 리소스 해제
entrypoint.close()
shutdown_server()
print('Server shutting down...')
@app.route('/myaccount', methods=['GET'])
def myaccount():
sAccNo = base_account
account = stock_db.load_account_table().to_html()
tname = 'account_detail_{}'.format(sAccNo)
account_detail = stock_db.load_account_detail_table(tname)
result = account + '</br></br>'
result += account_detail.to_html()
return result
@app.route('/stock_list/<kind>')
@as_json
def get_stock_list(kind):
if kind == 'kospi':
return names_by_codes_dict_kospi
elif kind == 'kosdaq':
return names_by_codes_dict_kosdaq
@app.route('/basic_info/<code>')
@as_json
def get_basic_info(code): # 업데이트 예정
print('Getting basic info of %s', code)
info = entrypoint.GetStockBasicInfoAsDict(code)
print('Got basic info data (using GetStockBasicInfoAsDict):')
return info
@app.route('/index_stock_data/<name>')
def get_index_stock_data(name):
# date, open, high, low, close, volume
tname = stock_db.getTableName(name)
result = stock_db.load(tname)
if result is None:
return ('', 204)
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+name+"지수 차트</h1>"
result = result.astype({'date': 'str', 'open': 'int', 'high': 'int', 'low': 'int', 'close': 'int', 'volume': 'int'})
result['open'] = result['open'].apply(lambda _: _ / 100 if _ > 0 else _)
result['high'] = result['high'].apply(lambda _: _ / 100 if _ > 0 else _)
result['low'] = result['low'].apply(lambda _: _ / 100 if _ > 0 else _)
result['close'] = result['close'].apply(lambda _: _ / 100 if _ > 0 else _)
dates = pd.to_datetime(result['date'], format='%Y%m%d')
closes = pd.to_numeric(result['close'])
f = plt.figure()
plt.plot(dates, closes)
html += mpld3.fig_to_html(f, figid='Index_Chart')
html += '</br></br>'
html += result.to_html()
return html
@app.route('/daily_stock_data/<code>')
def get_daily_stock_data(code):
parameter = request.args.to_dict()
startdate = ''
if len(parameter) > 0 and 'startdate' in parameter.keys():
startdate = parameter['startdate']
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+get_name_by_code(code)+" 종목차트</h1>"
#date, open, high, low, close, volume
tname = stock_db.getTableName(code)
if validate(startdate):
result = stock_db.load_detail(tname, startdate)
else:
result = stock_db.load_detail(tname)
if result is None:
return ('', 204)
dates = | pd.to_datetime(result['date'], format='%Y%m%d') | pandas.to_datetime |
from multiprocessing import Pool, cpu_count, freeze_support
import numpy as np
import pandas as pd
import silicone.time_projectors as tp
import silicone.utils
"""
This script measures how accurate the different projectors are at recreating known data.
We remove the data after 2050 and infill to find it and compare the true and infilled
values. It normalises this difference by the total range of the data. It runs on
multiple cores and saves the resulting statistics to disk. It may run for either a list
of specified cases, or for all possible cases. Optionally it can also
plot graphs comparing the infilled and original data - this is best done with only a
small number of cases, otherwise a huge number of files will be created.
The options controlling how it works are listed below.
"""
def main():
freeze_support()
# __________________________________Input options___________________________________
# Where is the file stored for data used to fill in the sheet?
input_data = "./sr_15_complete.csv"
# A list of all projectors to investigate, here a reference to the actual projector
extenders_list = [
tp.ExtendLatestTimeQuantile
]
options_list = [{}]
# This list must agree with the above list, but is the name of the projectors
projectors_name_list = [
x.__name__ for x in extenders_list
]
# Leader is a single data class presented as a list.
leaders = ["Emissions|CO2"]
# Place to save the infilled data as a csv
save_file = "../Output/projectorResults/projectorComparisonLead_stdv_{}.csv".format(
leaders[0].split("|")[-1]
)
# Do we want to save plots? If not, leave as None, else the location to save them.
# Note that these are not filter-dependent and so only the results of the last
# filter will persist
save_plots = None # "../output/projectorResults/plots/"
# Do we want to run this for all possible filters? If so, choose none,
# otherwise specify the filter here as a list of tuples
to_compare_filter = None
"""[
("AIM/CGE 2.0", "SSP1-19"),
("AIM/CGE 2.1", "TERL_15D_NoTransportPolicy"),
]"""
years = range(2020, 2101, 10)
test_year = 2050
variables_investigated = ["Emissions|CO2"]
# Uncomment the below
# __________________________________end options_____________________________________
years_to_investigate = [year for year in years if year > test_year]
assert len(extenders_list) == len(projectors_name_list)
assert len(options_list) == len(projectors_name_list)
db_all = silicone.utils.download_or_load_sr15(
input_data, valid_model_ids="*"
).filter(region="World", year=years)
db_all.filter(variable=variables_investigated, inplace=True)
# This is the model/scenario combination to compare.
if to_compare_filter:
all_possible_filters = to_compare_filter
else:
all_possible_filters = (
db_all.data[["model", "scenario"]]
.groupby(["model", "scenario"])
.size()
.index.values
)
all_args = [
(
filter_instance,
db_all,
variables_investigated,
projectors_name_list,
extenders_list,
save_plots,
leaders,
years_to_investigate,
options_list,
)
for filter_instance in all_possible_filters
]
# Perform the loop
with Pool(cpu_count() - 1) as pool:
results_db = list(pool.map(_recalc_and_compare_results, all_args))
results_count = sum([result.notnull() for result in list(results_db)])
overall_results = sum([result.fillna(0) for result in list(results_db)])
overall_results = overall_results / results_count
overall_results.to_csv(save_file)
def _recalc_and_compare_results(args):
(
one_filter,
db_all,
vars_to_crunch,
projectors_name_list,
projectors_list,
save_plots,
leaders,
years_to_investigate,
options_list,
) = args
combo_filter = {"model": one_filter[0], "scenario": one_filter[1]}
investigated_scen_df = db_all.filter(**combo_filter)
input_to_fill = investigated_scen_df.filter(
year=years_to_investigate, keep=False
)
results_db = pd.DataFrame(index=vars_to_crunch, columns=projectors_name_list)
if leaders not in input_to_fill.variables(False).values:
print(
"No data for {} in model {}, scen {}".format(
leaders, one_filter[0], one_filter[1]
)
)
return results_db
# Remove all items that overlap directly with this
db_filter = db_all.filter(**combo_filter, keep=False)
# Set up normalisation
norm_factor = | pd.Series(index=years_to_investigate, dtype=float) | pandas.Series |
import pandas as pd
import numpy as np
import tensorflow as tf
import os, pickle
class Reader(object):
def read(self, data_path):
self.read_data()
self.merge_id()
self.add_reverse()
if self.args.reindex:
self.reindex_kb()
self.gen_t_label()
self._ent_num = self._entity_num
self._rel_num = self._relation_num
self._ent_mapping = | pd.DataFrame({'kb_1':{}, 'kb_2':{}}) | pandas.DataFrame |
from models import MotionDetection, ObjectDetection
import config
import pandas as pd
from datetime import datetime
import logging
from edgetpu.detection.engine import DetectionEngine
from database import Session
def get_obj_det_comps(model_file: str, labels_file: str) -> tuple:
"""Load object detection model file and labels"""
logging.info("Loading Edge TPU object detection model and labels")
model = DetectionEngine(model_file)
labels = {}
# loop over the class labels file
for row in open(labels_file):
# unpack the row and update the labels dictionary
(classID, curr_label) = row.strip().split(maxsplit=1)
labels[int(classID)] = curr_label.strip()
return model, labels
def save_detections(detections: list) -> None:
"""Save detections into DB"""
logging.debug(f'Saving {len(detections)} detection(s)')
try:
# stage detections for DB insert
Session.add_all(detections)
# flush inserts into DB
Session.commit()
except Exception as e:
logging.error(f'Detection(s) not saved: {str(e)}')
def get_max_obj_ids(now, db_conn) -> dict:
"""Get a dictionary of labels and max object IDs for current date"""
# calculate start of current hour
curr_dt_start = f'{str(now.date())} 00:00:00'
# fetch results from DB
df = pd.read_sql(f"""
SELECT label, COUNT (DISTINCT obj_id) as next_obj_id
FROM object_detections
WHERE create_ts >= :curr_dt_start
GROUP BY 1
""", params={'curr_dt_start': curr_dt_start}, con=db_conn)
# convert dataframe to a {label:count} dictionary
return {rec['label']: rec['next_obj_id'] for rec in df.to_dict(orient='records')}
def get_motion_analysis(db_conn) -> list:
"""Get means of motion detections by hour for last N-days and today"""
# fetch results from DB
df = pd.read_sql(f"""
SELECT id, create_ts, '1' as motion_count
FROM motion_detections
WHERE create_ts BETWEEN datetime('now', '-{config.USE_HISTORICAL_DAYS} days') AND datetime('now', 'localtime')
GROUP BY 1,2,3
""", con=db_conn)
# update data types
df.create_ts = pd.to_datetime(df.create_ts)
df.motion_count = df.motion_count.astype(int)
# resample data by hour
motion_det_df_resampled = df.set_index('create_ts').resample(
'H').count().reset_index()[['create_ts', 'motion_count']].fillna(0)
# split historical dates and today
td = datetime.now().date()
motion_det_df_resampled_hist = motion_det_df_resampled.loc[motion_det_df_resampled['create_ts'].dt.date != td]
motion_det_df_resampled_td = motion_det_df_resampled.loc[motion_det_df_resampled['create_ts'].dt.date == td]
# calculate avg hourly count for historical detections
motion_det_df_resampled_avg_hist = motion_det_df_resampled_hist.groupby(
motion_det_df_resampled_hist.create_ts.dt.hour)['motion_count'].mean()
hist = motion_det_df_resampled_avg_hist.reset_index()
hist.columns = ['Hour', 'Historical']
# calculate hourly count for today's detections
motion_det_df_resampled_avg_td = motion_det_df_resampled_td.groupby(
motion_det_df_resampled_td.create_ts.dt.hour)['motion_count'].sum()
today = motion_det_df_resampled_avg_td.reset_index()
today.columns = ['Hour', 'Today']
# return merged: historical and today's datasets
return hist.merge(today, how='left').fillna(0).to_dict(orient='records')
def get_objects_analysis(db_conn) -> dict:
"""Get means of object detections by hour for last N-days and today"""
# fetch detections from db
object_det_df = pd.read_sql(f"""
SELECT id, label, create_ts, obj_id
FROM object_detections
WHERE create_ts BETWEEN datetime('now', '-{config.USE_HISTORICAL_DAYS} days') AND datetime('now', 'localtime')
GROUP BY 1,2,3,4
""", con=db_conn)
# update data types
object_det_df.create_ts = | pd.to_datetime(object_det_df.create_ts) | pandas.to_datetime |
import gc
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from src.utility_functions import display, extract_num, reduce_mem_usage
def encode_categorical(df: pd.DataFrame, cols: list):
"""Encode categorical columns in _cols_ for _df_ whilst maintaining NaN values"""
for col in cols:
# Leave NaN as it is.
le = LabelEncoder()
not_null = df[col][df[col].notnull()]
df[col] = pd.Series(le.fit_transform(not_null), index=not_null.index)
return df
def reshape_sales(
sales: pd.DataFrame,
submission: pd.DataFrame,
days_pred: int,
d_thresh: int = 0,
verbose: bool = True,
) -> pd.DataFrame:
"""Convert from wide to long data format"""
# melt sales data
id_columns = ["id", "item_id", "dept_id", "cat_id", "store_id", "state_id"]
product = sales[id_columns]
sales = sales.melt(id_vars=id_columns, var_name="d", value_name="demand")
sales = reduce_mem_usage(sales)
# separate test dataframes.
vals = submission[submission["id"].str.endswith("validation")]
evals = submission[submission["id"].str.endswith("evaluation")]
# change column names.
vals.columns = ["id"] + [f"d_{d}" for d in range(1914, 1914 + days_pred)]
evals.columns = ["id"] + [f"d_{d}" for d in range(1942, 1942 + days_pred)]
# merge with product table
evals["id"] = evals["id"].str.replace("_evaluation", "_validation")
vals = vals.merge(product, how="left", on="id")
evals = evals.merge(product, how="left", on="id")
evals["id"] = evals["id"].str.replace("_validation", "_evaluation")
if verbose:
print("validation")
display(vals)
print("evaluation")
display(evals)
vals = vals.melt(id_vars=id_columns, var_name="d", value_name="demand")
evals = evals.melt(id_vars=id_columns, var_name="d", value_name="demand")
sales["part"] = "train"
vals["part"] = "validation"
evals["part"] = "evaluation"
data = pd.concat([sales, vals, evals], axis=0)
del sales, vals, evals
data["d"] = extract_num(data["d"])
data = data[data["d"] >= d_thresh]
# delete evaluation for now.
data = data[data["part"] != "evaluation"]
gc.collect()
if verbose:
print("data")
display(data)
return data
def merge_calendar(data: pd.DataFrame, calendar: pd.DataFrame) -> pd.DataFrame:
"""Merge _calendar_ into _data_"""
calendar = calendar.drop(["weekday", "wday", "month", "year"], axis=1)
return data.merge(calendar, how="left", on="d")
def merge_prices(data: pd.DataFrame, prices: pd.DataFrame) -> pd.DataFrame:
"""Merge _prices_ into _data_"""
return data.merge(prices, how="left", on=["store_id", "item_id", "wm_yr_wk"])
def make_base_dataset() -> pd.DataFrame:
"""Prepare, save and return base dataset for which features can be engineered"""
ROOT_DIR = Path(__file__).resolve().parent.parent.parent
DATA_DIR = ROOT_DIR.joinpath("data/")
print("Reading files...")
calendar = | pd.read_csv(f"{DATA_DIR}/raw/calendar.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, 8, 2, 6],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
np.nan,
np.nan,
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,pct,exp",
[
(
"average",
True,
"keep",
False,
[2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
),
(
"average",
True,
"keep",
True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
),
(
"average",
False,
"keep",
False,
[4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
),
(
"average",
False,
"keep",
True,
[0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
),
("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
(
"min",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
(
"max",
False,
"keep",
False,
[5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
(
"first",
True,
"keep",
False,
[1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
),
(
"first",
True,
"keep",
True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
),
(
"first",
False,
"keep",
False,
[3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
(
"first",
False,
"keep",
True,
[0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
),
(
"dense",
True,
"keep",
False,
[1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
),
(
"dense",
True,
"keep",
True,
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
3.0 / 3.0,
1.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
(
"dense",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
(
"dense",
False,
"keep",
True,
[
3.0 / 3.0,
3.0 / 3.0,
np.nan,
1.0 / 3.0,
3.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
(
"average",
True,
"bottom",
True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
),
("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
(
"average",
False,
"bottom",
True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
),
("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
(
"min",
True,
"bottom",
True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
),
("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
(
"min",
False,
"bottom",
True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
),
("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
(
"max",
False,
"bottom",
True,
[0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
),
("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
(
"first",
True,
"bottom",
True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
),
("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
(
"first",
False,
"bottom",
True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
),
("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
],
)
def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = | DataFrame({"key": key, "val": vals}) | pandas.DataFrame |
from inspect import isclass
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Datetime
import featuretools as ft
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.primitives import (
Absolute,
AddNumeric,
AddNumericScalar,
Age,
Count,
Day,
Diff,
DivideByFeature,
DivideNumeric,
DivideNumericScalar,
Equal,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
Haversine,
Hour,
IsIn,
IsNull,
Latitude,
LessThanEqualToScalar,
LessThanScalar,
Longitude,
Minute,
Mode,
Month,
MultiplyNumeric,
MultiplyNumericScalar,
Not,
NotEqual,
NotEqualScalar,
NumCharacters,
NumWords,
Percentile,
ScalarSubtractNumericFeature,
Second,
SubtractNumeric,
SubtractNumericScalar,
Sum,
TimeSince,
TransformPrimitive,
Year,
get_transform_primitives
)
from featuretools.primitives.base import make_trans_primitive
from featuretools.primitives.utils import (
PrimitivesDeserializer,
serialize_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.tests.testing_utils import feature_with_name, to_pandas
from featuretools.utils.gen_utils import Library
from featuretools.utils.koalas_utils import pd_to_ks_clean
def test_init_and_name(es):
log = es['log']
rating = ft.Feature(ft.IdentityFeature(es["products"].ww["rating"]), "log")
log_features = [ft.Feature(es['log'].ww[col]) for col in log.columns] +\
[ft.Feature(rating, primitive=GreaterThanScalar(2.5)),
ft.Feature(rating, primitive=GreaterThanScalar(3.5))]
# Add Timedelta feature
# features.append(pd.Timestamp.now() - ft.Feature(log['datetime']))
customers_features = [ft.Feature(es["customers"].ww[col]) for col in es["customers"].columns]
# check all transform primitives have a name
for attribute_string in dir(ft.primitives):
attr = getattr(ft.primitives, attribute_string)
if isclass(attr):
if issubclass(attr, TransformPrimitive) and attr != TransformPrimitive:
assert getattr(attr, "name") is not None
trans_primitives = get_transform_primitives().values()
# If Dask EntitySet use only Dask compatible primitives
if es.dataframe_type == Library.DASK.value:
trans_primitives = [prim for prim in trans_primitives if Library.DASK in prim.compatibility]
if es.dataframe_type == Library.KOALAS.value:
trans_primitives = [prim for prim in trans_primitives if Library.KOALAS in prim.compatibility]
for transform_prim in trans_primitives:
# skip automated testing if a few special cases
features_to_use = log_features
if transform_prim in [NotEqual, Equal]:
continue
if transform_prim in [Age]:
features_to_use = customers_features
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features_to_use)
else:
matching_inputs = match(input_types, features_to_use)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for prim in matching_inputs:
instance = ft.Feature(prim, primitive=transform_prim)
# try to get name and calculate
instance.get_name()
ft.calculate_feature_matrix([instance], entityset=es)
def test_relationship_path(es):
f = ft.TransformFeature(ft.Feature(es['log'].ww['datetime']), Hour)
assert len(f.relationship_path) == 0
def test_serialization(es):
value = ft.IdentityFeature(es['log'].ww['value'])
primitive = ft.primitives.MultiplyNumericScalar(value=2)
value_x2 = ft.TransformFeature(value, primitive)
dictionary = {
'name': None,
'base_features': [value.unique_name()],
'primitive': serialize_primitive(primitive),
}
assert dictionary == value_x2.get_arguments()
assert value_x2 == \
ft.TransformFeature.from_dictionary(dictionary, es,
{value.unique_name(): value},
PrimitivesDeserializer())
def test_make_trans_feat(es):
f = ft.Feature(es['log'].ww['datetime'], primitive=Hour)
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == 10
@pytest.fixture
def pd_simple_es():
df = pd.DataFrame({
'id': range(4),
'value': pd.Categorical(['a', 'c', 'b', 'd']),
'value2': pd.Categorical(['a', 'b', 'a', 'd']),
'object': ['time1', 'time2', 'time3', 'time4'],
'datetime': pd.Series([pd.Timestamp('2001-01-01'),
pd.Timestamp('2001-01-02'),
| pd.Timestamp('2001-01-03') | pandas.Timestamp |
import sys
import os
#handling the paths and the model
cwd = os.getcwd()
sys.path.append(cwd)
import pysd
from pathlib import Path
from pysd.py_backend.functions import Model
import matplotlib.pyplot as plt
import pandas as pd
import varcontrol
import time
start = time.time()
model = Model('corona_base_hackathon_treated.py')
path = Path.cwd()
out_path = path / 'output'
set_path = path / 'settings'
try:
file_lst = list(out_path.glob('*'))
for file in file_lst:
file.unlink()
except FileNotFoundError:
pass
out_path.mkdir(exist_ok=True)
#reading the settings
policy_df = | pd.read_csv(set_path / 'policy.csv',index_col=0) | pandas.read_csv |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-31
"""
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
import gc, six
import json
import numpy as np
import pandas as pd
from sklearn import linear_model
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
from pandas.io.json import json_normalize
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
@six.add_metaclass(Singleton)
class FactorEarning(object):
"""
盈利能力
"""
def __init__(self):
__str__ = 'factor_earning'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '盈利能力'
self.description = '财务指标的二级指标-盈利能力'
@staticmethod
def _Rev5YChg(tp_earning, factor_earning, dependencies=['operating_revenue',
'operating_revenue_pre_year_1',
'operating_revenue_pre_year_2',
'operating_revenue_pre_year_3',
'operating_revenue_pre_year_4']):
"""
五年营业收入增长率
:name:
:desc:
:unit:
:view_dimension: 0.01
"""
regr = linear_model.LinearRegression()
# 读取五年的时间和净利润
historical_growth = tp_earning.loc[:, dependencies]
if len(historical_growth) <= 0:
return
def has_non(a):
tmp = 0
for i in a.tolist():
for j in i:
if j is None or j == 'nan':
tmp += 1
if tmp >= 1:
return True
else:
return False
def fun2(x):
aa = x[dependencies].fillna('nan').values.reshape(-1, 1)
if has_non(aa):
return None
else:
regr.fit(aa, range(0, 5))
return regr.coef_[-1]
historical_growth['coefficient'] = historical_growth.apply(fun2, axis=1)
historical_growth['mean'] = historical_growth[dependencies].fillna(method='ffill').mean(axis=1)
fun1 = lambda x: x[0] / abs(x[1]) if x[1] is not None and x[0] is not None and x[1] != 0 else None
historical_growth['Rev5YChg'] = historical_growth[['coefficient', 'mean']].apply(fun1, axis=1)
historical_growth = historical_growth[['Rev5YChg']]
factor_earning = pd.merge(factor_earning, historical_growth, on='security_code')
return factor_earning
@staticmethod
def _NetPft5YAvgChg(tp_earning, factor_earning,
dependencies=['net_profit', 'net_profit_pre_year_1', 'net_profit_pre_year_2',
'net_profit_pre_year_3', 'net_profit_pre_year_4']):
"""
5年收益增长率
:name:
:desc:
:unit:
:view_dimension: 0.01
"""
regr = linear_model.LinearRegression()
# 读取五年的时间和净利润
historical_growth = tp_earning.loc[:, dependencies]
if len(historical_growth) <= 0:
return
def has_non(a):
tmp = 0
for i in a.tolist():
for j in i:
if j is None or j == 'nan':
tmp += 1
if tmp >= 1:
return True
else:
return False
def fun2(x):
aa = x[dependencies].fillna('nan').values.reshape(-1, 1)
if has_non(aa):
return None
else:
regr.fit(aa, range(0, 5))
return regr.coef_[-1]
historical_growth['coefficient'] = historical_growth.apply(fun2, axis=1)
historical_growth['mean'] = historical_growth[dependencies].fillna(method='ffill').mean(axis=1)
fun1 = lambda x: x[0] / abs(x[1]) if x[1] != 0 and x[1] is not None and x[0] is not None else None
historical_growth['NetPft5YAvgChg'] = historical_growth[['coefficient', 'mean']].apply(fun1, axis=1)
# historical_growth = historical_growth.drop(
# columns=['net_profit', 'net_profit_pre_year_1', 'net_profit_pre_year_2', 'net_profit_pre_year_3',
# 'net_profit_pre_year_4', 'coefficient', 'mean'], axis=1)
historical_growth = historical_growth[['security_code', 'NetPft5YAvgChg']]
factor_earning = pd.merge(factor_earning, historical_growth, on='security_code')
return factor_earning
@staticmethod
def _DGPR(ttm_earning, ttm_earning_p1y, factor_earning, dependencies=['operating_revenue', 'operating_cost']):
"""
毛利率增长率,与去年同期相比
:name:
:desc:
:unit:
:view_dimension: 0.01
"""
earning = ttm_earning.loc[:, dependencies]
earning_p1y = ttm_earning_p1y.loc[:, dependencies]
earning['gross_income_ratio'] = np.where(
CalcTools.is_zero(earning.operating_revenue.values), 0,
(earning.operating_revenue.values -
earning.operating_cost.values)
/ earning.operating_revenue.values
)
earning_p1y['gross_income_ratio'] = np.where(
CalcTools.is_zero(earning_p1y.operating_revenue.values), 0,
(earning_p1y.operating_revenue.values -
earning_p1y.operating_cost.values)
/ earning_p1y.operating_revenue.values)
earning["DGPR"] = earning["gross_income_ratio"] - earning_p1y["gross_income_ratio"]
dependencies = dependencies + ['gross_income_ratio']
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def ROA5YChg(ttm_earning_5y, factor_earning, dependencies=['net_profit', 'total_assets']):
"""
:name: 5年资产回报率
:desc: 5年收益关于时间(年)进行线性回归的回归系数/(5年收益均值的绝对值)对于上市新股以上市前已披露的3年净利润计算之后新的年报数据披露后再计算四年、五年的收益增长率数据每年变化一次,在年报披露日
:unit:
:view_dimension: 0.01
"""
earning = ttm_earning_5y.loc[:, dependencies]
earning['ROA5YChg'] = np.where(
CalcTools.is_zero(earning.total_assets.values), 0,
earning.net_profit.values / earning.total_assets.values / 4)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def ROE5Y(ttm_earning_5y, factor_earning, dependencies=['net_profit', 'total_owner_equities']):
"""
:name: 5年平均权益回报率
:desc: AVG(净利润*2/(本年股东权益(MRQ)+上年末股东权益(MRQ) 限定只计算过去五年的年报
:unit:
:view_dimension: 0.01
"""
earning = ttm_earning_5y.loc[:, dependencies]
earning['ROE5Y'] = np.where(
CalcTools.is_zero(earning.total_owner_equities.values), 0,
earning.net_profit.values / earning.total_owner_equities.values / 4)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
# @staticmethod
# def NPCutToNP(tp_earning, factor_earning, dependencies=['adjusted_profit', 'net_profit']):
# """
# :name: 扣除非经常损益后的净利润/净利润
# :desc: 扣除非经常损益后的净利润/净利润
# :unit:
# :view_dimension: 0.01
# """
# earning = tp_earning.loc[:, dependencies]
# earning['NPCutToNP'] = np.where(
# CalcTools.is_zero(earning.net_profit.values), 0,
# earning.adjusted_profit.values
# / earning.net_profit.values)
# earning = earning.drop(dependencies, axis=1)
# factor_earning = pd.merge(factor_earning, earning, on="security_code")
# return factor_earning
@staticmethod
def ROE(tp_earning, factor_earning,
dependencies=['np_parent_company_owners', 'equities_parent_company_owners']):
"""
:name: 净资产收益率(摊薄)
:desc: 归属于母公司的净利润/期末归属于母公司的所有者权益
:unit:
:view_dimension: 0.01
"""
earning = tp_earning.loc[:, dependencies]
earning['ROE'] = np.where(
CalcTools.is_zero(earning.equities_parent_company_owners.values), 0,
earning.np_parent_company_owners.values /
earning.equities_parent_company_owners.values / 4)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def ROEAvg(tp_earning, factor_earning,
dependencies=['np_parent_company_owners', 'equities_parent_company_owners']):
"""
:name: 净资产收益率(平均)
:desc: 净资产收益率(平均)=归属于母公司的净利润*2/(期末归属于母公司的所有者权益 + 期初归属于母公司的所有者权益)
:unit:
:view_dimension: 0.01
"""
earning = tp_earning.loc[:, dependencies]
earning['ROEAvg'] = np.where(
CalcTools.is_zero(earning.equities_parent_company_owners.values), 0,
earning.np_parent_company_owners.values /
earning.equities_parent_company_owners.values / 4)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def ROEcut(tp_earning, factor_earning, dependencies=['adjusted_profit', 'equities_parent_company_owners']):
"""
:name: 净资产收益率(扣除/摊薄)
:desc: 净资产收益率(扣除/摊薄)
:unit:
:view_dimension: 0.01
"""
earning = tp_earning.loc[:, dependencies]
earning['ROEcut'] = np.where(
CalcTools.is_zero(earning.equities_parent_company_owners.values), 0,
earning.adjusted_profit.values /
earning.equities_parent_company_owners.values / 4)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def _invest_r_associates_to_tp_latest(tp_earning, factor_earning, dependencies=['invest_income_associates', 'total_profit']):
"""
对联营和营公司投资收益/利润总额
:name:
:desc:
:unit:
:view_dimension: 0.01
"""
earning = tp_earning.loc[:, dependencies]
earning['invest_r_associates_to_tp_latest'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
earning.invest_income_associates.values
/ earning.total_profit.values)
earning = earning.drop(dependencies, axis=1)
factor_earning = pd.merge(factor_earning, earning, on="security_code")
return factor_earning
@staticmethod
def NetPft5YAvgChgTTM(ttm_earning, factor_earning,
dependencies=['net_profit', 'net_profit_pre_year_1', 'net_profit_pre_year_2',
'net_profit_pre_year_3', 'net_profit_pre_year_4']):
"""
:name: 5年收益增长率(TTM)
:desc: 5年收益关于时间(年)进行线性回归的回归系数/(5年收益均值的绝对值)对于上市新股以上市前已披露的3年净利润计算之后新的年报数据披露后再计算四年、五年的收益增长率数据每年变化一次,在年报披露日
:unit:
:view_dimension: 0.01
"""
regr = linear_model.LinearRegression()
# 读取五年的时间和净利润
historical_growth = ttm_earning.loc[:, dependencies]
if len(historical_growth) <= 0:
return
def has_non(a):
tmp = 0
for i in a.tolist():
for j in i:
if j is None or j == 'nan':
tmp += 1
if tmp >= 1:
return True
else:
return False
def fun2(x):
aa = x[dependencies].fillna('nan').values.reshape(-1, 1)
if has_non(aa):
return None
else:
regr.fit(aa, range(0, 5))
return regr.coef_[-1]
historical_growth['coefficient'] = historical_growth.apply(fun2, axis=1)
historical_growth['mean'] = historical_growth[dependencies].fillna(method='ffill').mean(axis=1)
fun1 = lambda x: x[0] / abs(x[1]) if x[1] != 0 and x[1] is not None and x[0] is not None else None
historical_growth['NetPft5YAvgChgTTM'] = historical_growth[['coefficient', 'mean']].apply(fun1, axis=1)
dependencies = dependencies + ['coefficient', 'mean']
# historical_growth = historical_growth[['security_code', 'NetPft5YAvgChgTTM']]
historical_growth = historical_growth.drop(dependencies, axis=1)
factor_earning = | pd.merge(factor_earning, historical_growth, how='outer', on='security_code') | pandas.merge |
import os
import sys
import time
import yaml
import json
import pickle
import string
import pathlib
import warnings
import requests
import threading
import numpy as np
import pandas as pd
# Suppress warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from efficient_apriori import apriori
# Workaround for Keras issue #1406
# "Using X backend." always printed to stdout #1406
# https://github.com/keras-team/keras/issues/1406
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
from keras import backend as kerasbackend
sys.stderr = stderr
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import _utils as utils
import ServerSideExtension_pb2 as SSE
# Add Generated folder to module path
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
class CommonFunction:
"""
A class to implement common data science functions for Qlik.
"""
# Counter used to name log files for instances of the class
log_no = 0
# A locking mechanism to allow tensorflow graphs to be setup uninterrupted by multithreaded requests to the same model
thread_lock = None
def __init__(self, request, context, path="../models/"):
"""
Class initializer.
:param request: an iterable sequence of RowData
:param context:
:param path: a directory path to look for persistent models
:Sets up the model parameters based on the request
"""
# Set the request, context and path variables for this object instance
self.request = request
self.context = context
self.path = path
self.logfile = None
def association_rules(self):
"""
Use an apriori algorithm to uncover association rules between items.
"""
# Interpret the request data based on the expected row and column structure
self._initiate(row_template = ['strData', 'strData', 'strData'], col_headers = ['group', 'item', 'kwargs'])
# Create a list of items for each group
transactions = []
# Iterate over each group and add a tuple of items to the list
for group in self.request_df['group'].unique():
transactions.append(tuple(self.request_df.item[self.request_df.group == group]))
# Get the item sets and association rules from the apriori algorithm
_, rules = apriori(transactions, **self.pass_on_kwargs)
# Prepare the response
response = []
# for each rule get the left hand side and right hand side together with support, confidence and lift
for rule in sorted(rules, key=lambda rule: rule.lift, reverse=True):
lhs = ", ".join(map(str, rule.lhs))
rhs = ", ".join(map(str, rule.rhs))
desc = "{0} -> {1}".format(lhs, rhs)
response.append((desc, lhs, rhs, rule.support, rule.confidence, rule.lift))
# if no association rules were found the parameters may need to be adjusted
if len(response) == 0:
err = "No association rules could be found. You may get results by lowering the limits imposed by " + \
"the min_support and min_confidence parameters.\ne.g. by passing min_support=0.2|float in the arguments."
raise Exception(err)
self.response_df = | pd.DataFrame(response, columns=['rule', 'rule_lhs', 'rule_rhs', 'support', 'confidence', 'lift']) | pandas.DataFrame |
from logging import error, warning
from unittest.loader import VALID_MODULE_NAME
from matplotlib.patches import Polygon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#from shapely.geometry import Poligon
from scipy.interpolate import make_interp_spline, BSpline
#paameters to assamble the directory of the videos
main_folder = 'D:\\Vida\\Trabajo de titulo\\Python_code\\'
vid_names = ["d4_perfil_S_liso","d4_perfil_S_rugoso","d3_perfil_S",
"d2_perfil_S","cola_S_frontal","cola_S_lateral", "d4_4f_2f",
"d4_S_ortogonal_frente","d4_S_ortogonal_lateral", "d4_diagonal",
"d4_diagonal_n", "d4_4f_2f"]
#directory of a document with the vacuum measurement of each video. The headers of the document must have the same name as the folder
vacuum_file_name = "presiones_tesis.csv"
vacuum_dir = main_folder + vacuum_file_name
#Calculates the coordintares of the middle point
def calc_half(coord1, coord2):
x1 = coord1[0]
y1 = coord1[1]
x2 = coord1[0]
y2 = coord1[1]
xm = (x2-x1)/2
ym = (y2-y1)/2
half_coord = (xm,ym)
return half_coord
#Calculate the distance between 2 points
def calc_distance(coord1, coord2):
x1 = coord1[0]
y1 = coord1[1]
x2 = coord2[0]
y2 = coord2[1]
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
return d
#3rd degree polinome
def fit_func_poli(x,a,b,c,d):
return a + (b*x) + (c*x**2) + (d*x**3)
#first derivative
def fit_func_poli_deriv(x,b,c,d):
return b + (2*c*x) + (3*d*x**2)
#second derivative
def fit_func_poli_2deriv(x,c,d):
return 2*c + 6*d*x
#sine equation
def fit_func_sin(x, freq, amplitude, phase, offset):
return np.sin(x * freq + phase) * amplitude + offset
#first derivative
def fit_func_sin_deriv(x, freq, amplitude, phase):
return -freq*amplitude*np.cos(x*freq+phase)
#second derivative
def fit_func_sin_2deriv(x, freq, amplitude, phase):
return -(freq**2)*amplitude*np.sin(x*freq+phase)
#Calculate the theoretical curvature
def calc_curvature_t(x,b,c,d):
numer = np.abs(fit_func_poli_2deriv(x,c,d))
denom = (1+(fit_func_poli_deriv(x,b,c,d))**2)**(3/2)
return numer/denom
#calculate the theoretical deflection using design parameters to estimate the curvature radius angle
def calc_deflection_t(R,side_l,shortening):
#print(np.pi,side_l,shortening)
beta = np.pi - 2 * np.arctan(2*side_l/shortening)
deflect = R * (1-np.cos(beta/2))
return deflect
#Return y value calculating the slope and intercept using 2 set of coordinates as reference
def point_slope(coord1, coord2):
x1 = coord1[0]
y1 = coord1[1]
x2 = coord2[0]
y2 = coord2[1]
m, b = np.polyfit([x1,x2],[y1,y2],1)
return m, b
def analize_deflection_t(x,y,a,b,c,d):
#Designs parameters passed as global variables
l = 30.62
dh = 2.12*4
deflections_t = []
x_tm = []
y_tm = []
m1, b1 = point_slope((x[0],y[0]), (x[-1],y[-1]))
#uses only the lines perpendicular to the line from the first and last data
m2 = -1/m1
for i in range(1, len(x)-1,1):
y_t = y[i]
x_t = a + b*y_t + c*y_t**2 + d*y_t**3
b_t = y_t - x_t*m2
x_tm.append((b_t-b1)/(m1-m2))
y_tm.append(m1*x_tm[-1]+b1)
d_t = calc_distance((x_t,y_t),(x_tm[-1],y_tm[-1]))
side_t = np.sign(x_tm[-1]-x_t)
deflections_t.append(side_t*d_t)
index_t_der = deflections_t.index(min(deflections_t))
index_t_izq = deflections_t.index(max(deflections_t))
deflect_t_der = deflections_t[index_t_der]
deflect_t_izq = deflections_t[index_t_izq]
x_td = a + b*y[index_t_der+1] + c*y[index_t_der+1]**2 + d*y[index_t_der+1]**3
y_td = y[index_t_der+1]
x_ti = a + b*y[index_t_izq+1] + c*y[index_t_izq+1]**2 + d*y[index_t_izq+1]**3
y_ti = y[index_t_izq+1]
coord_ti = (x_ti,y_ti)
coord_td = (x_td,y_td)
coord_tmi = (x_tm[index_t_izq],y_tm[index_t_izq])
coord_tmd = (x_tm[index_t_der],y_tm[index_t_der])
curv_radius_ci = 1 / calc_curvature_t(y_ti,b,c,d)
curv_radius_cd = 1 / calc_curvature_t(y_td,b,c,d)
deflect_c_izq = calc_deflection_t(curv_radius_ci,l,dh)
deflect_c_der = calc_deflection_t(curv_radius_cd,l,dh)
deflect_c = (deflect_c_der,deflect_c_izq)
deflect_t = (-deflect_t_der,deflect_t_izq)
print(deflect_c, deflect_t)
return deflect_c, deflect_t, coord_td, coord_ti, coord_tmd, coord_tmi
def analize_deflection_e(x,y):
deflections_e = []
x_em = []
y_em = []
m1, b1 = point_slope((x[0],y[0]), (x[-1],y[-1]))
#uses only the lines perpendicular to the line from the first and last data
m2 = -1/m1
theta_inclination = np.arctan(m2) * 180 / np.pi
for i in range(1, len(x)-1,1):
b_e = y[i] - x[i]*m2
x_em.append((b_e-b1)/(m1-m2))
y_em.append(m1*x_em[-1]+b1)
d_e = calc_distance((x[i],y[i]),(x_em[-1],y_em[-1]))
side_e = np.sign(x_em[-1]-x[i])
deflections_e.append(side_e*d_e)
index_e_der = deflections_e.index(min(deflections_e))
index_e_izq = deflections_e.index(max(deflections_e))
deflect_e_der = deflections_e[index_e_der]
deflect_e_izq = deflections_e[index_e_izq]
coord_emi = (x_em[index_e_izq],y_em[index_e_izq])
coord_ei = (x[index_e_izq+1], y[index_e_izq+1])
coord_emd = (x_em[index_e_der],y_em[index_e_der])
coord_ed = (x[index_e_der+1], y[index_e_der+1])
deflect_e = (-deflect_e_der,deflect_e_izq)
return deflect_e, coord_emd, coord_emi, coord_ed, coord_ei, theta_inclination
#given the directory sorts the data and return it
def handle_dataset(main_dir, vid_name, ds, file_name):
coord_dict = {}
file_loc = main_dir + vid_name + file_name + str(ds) + ".csv"
data = pd.read_csv(file_loc, header=None)
data = data.reset_index()
for index, row in data.iterrows():
coord_dict[index] = (row[0],(1080*ds)-row[1])
x, y = zip(*coord_dict.values())
order = np.argsort(y)
x_n = np.array(x)[order]
y_n = np.array(y)[order]
return x_n, y_n
def calc_area(coords):
polygon = Polygon(coords)
return polygon.area
#given the data and fitted data calculate the residual squared sum and error
def calc_residual_stats(x,fitted_x):
residuals = []
sqrd_residuals = []
for i in range(len(x)):
residuals.append(x[i]-fitted_x[i])
sqrd_residuals.append((x[i]-fitted_x[i])**2)
RSS = sum(sqrd_residuals)
RSE = np.sqrt(RSS/(len(x)-2))
print("RSS:", RSS, "RSE:", RSE)
return RSS, RSE
#Finds a polinome parameters that fits the data
def fit_to_poli(x,y):
popt, pcov = curve_fit(fit_func_poli, y, x)
fitted_x = []
for item in y:
fitted_x.append(fit_func_poli(item, *popt))
return fitted_x, popt, pcov
#Finds a sine parameters that fits the data
def fit_to_sine(x,y):
#initial guesses
initial_freq = 0.025
initial_amplitude = 3*np.std(x)/(2**0.5)
initial_phase = 0.025
initial_offset = np.mean(x)
p0=[initial_freq, initial_amplitude,initial_phase, initial_offset]
popt, pcov = curve_fit(fit_func_sin, y, x, p0=p0)
fitted_x = []
for item in y:
fitted_x.append(fit_func_poli(item, *popt))
return fitted_x, popt, pcov
#Calculate the closest shape of the apendix using a 3rd degree polinome
def graph_dataset(main_dir, vid_name, ds, file_name,x,fitted_x,y,popt,coord_td, coord_ti, coord_tmd, coord_tmi, coord_emd, coord_emi, coord_ed, coord_ei):
#print(x.shape )
#xnew = np.linspace(min(np.array(x)), max(np.array(x)), 300)
#spl = make_interp_spline(np.array(x), np.array(y), k=3) # type: BSpline
#power_smooth = spl(xnew)
file_path = main_dir + vid_name + file_name + str(ds) + ".png"
plt.rc('axes', titlesize=36) # fontsize of the axes title
plt.rc('axes', labelsize=32) # fontsize of the x and y labels
fig, ax1 = plt.subplots(1,1)
fig.set_size_inches(19.2,10.8)
ax1.set_title("Discretización Eje Neutro del Apéndice")
ax1.scatter(x,y,color="blue", alpha=0.3)
ax1.plot(x,y,color="blue")
ax1.plot((coord_ed[0],coord_emd[0]),(coord_ed[1],coord_emd[1]),color="blue", alpha=0.8)
ax1.plot((coord_ei[0],coord_emi[0]),(coord_ei[1],coord_emi[1]),color="blue", alpha=0.8, label = "Curva discreta")
ax1.plot((x[0],x[-1]),(y[0],y[-1]),color="c", label = "Línea media")
ax1.plot((coord_td[0],coord_tmd[0]),(coord_td[1],coord_tmd[1]),color="green", alpha=0.8)
ax1.plot((coord_ti[0],coord_tmi[0]),(coord_ti[1],coord_tmi[1]),color="green", alpha=0.8, label = "Curva teórica")
#ax1.plot(xnew,power_smooth,color="red")
ax1.plot(fitted_x, y,'g--', label='fit:a=%5.3f, b=%5.3f, c=%5.3f, d=%5.3f' % tuple(popt))
ax1.set_xlim(0,1920)
ax1.set_ylim(0,1080)
ax1.set_ylabel("Pixeles eje y")
ax1.set_xlabel("Pixeles eje x")
ax1.legend(prop={'size': 22})
ax1.grid()
figManager = plt.get_current_fig_manager()
figManager.set_window_title("Análisis de curvatura " + vid_name + " " + file_name[1:] + str(ds))
figManager.window.showMaximized()
plt.tight_layout()
plt.savefig(file_path, dpi = 100)
#plt.show()
plt.close()
def graph_parameters(main_dir,vid_name,inclinations,vacuum,apparent_length,file_name="_Grafico_parametros"):
file_path = main_dir + vid_name + file_name + ".png"
plt.rc('axes', titlesize=12) # fontsize of the axes title
plt.rc('axes', labelsize=12) # fontsize of the x and y labels
fig, (ax1, ax2) = plt.subplots(2,1)
ax1.set_title("Movimiento general del apéndice")
ax1.plot(vacuum,inclinations,color="blue",label="Inclinación respecto a la horizontal")
ax1.set_ylabel("Inclinación" + ' ['+ u'\N{DEGREE SIGN}' + ']')
ax1.set_xlabel("Vacío [kPa]")
ax1.legend(prop={'size': 7})
ax1.grid()
ax2.set_title("Largo aparente del apéndice")
ax2.plot(vacuum,apparent_length,color="blue",label="Largo línea media")
ax2.set_ylabel("Largo aparente [px]")
ax2.set_xlabel("Vacío [kPa]")
ax2.legend(prop={'size': 7})
ax2.grid()
figManager = plt.get_current_fig_manager()
figManager.set_window_title(file_name + vid_name)
figManager.window.showMaximized()
plt.tight_layout()
plt.savefig(file_path, dpi = 100)
#plt.show()
plt.close()
def graph_deflections(main_dir,vid_name,vacuum,list_deflect_de,list_deflect_ie,list_deflect_dt,list_deflect_it,list_deflect_c_dt,list_deflect_c_it,file_name="_deflexiones"):
#Plot de los graficos
file_path = main_dir + vid_name + file_name + ".png"
fig, (ax1, ax2) = plt.subplots(2, 1)
ylim = max([max(list_deflect_de),max(list_deflect_ie),max(list_deflect_dt),max(list_deflect_it)])
ax1.set_title("Deflexión aparente experimental")
ax1.set_ylabel("Deflexión aparente [px]")
ax1.set_xlabel("Vacío [kPa]")
ax1.set_ylim(0, ylim)
ax1.plot(vacuum, list_deflect_ie, color="orange", label="Deflexión izquierda")
ax1.plot(vacuum, list_deflect_de, color="blue", label="Deflexión derecha")
ax1.legend()
ax2.set_title("Deflexión polyfit")
ax2.set_ylabel("Deflexión [px]")
ax2.set_xlabel("Vacío [kPa]")
ax2.set_ylim(0, ylim)
ax2.plot(vacuum, list_deflect_it, color="green", label="Deflexión izquierda")
ax2.plot(vacuum, list_deflect_dt, color="magenta", label="Deflexión derecha")
ax2.legend()
#ax3.set_title("Deflexión calculada por parametros ")
#ax3.set_xlabel("Deflexión [px]")
#ax3.set_ylabel("Vacío[kPa]")
#ax3.plot(list_deflect_c_it, vacuum, color="c", label="deflexión izquierda")
#ax3.plot(list_deflect_c_dt, vacuum, color="y", label="deflexión derecha")
#ax3.legend()
#ax = plt.subplot(111)
#ax.plot(diseño1_p, diseño1_fi, lw=2, color='orange')
ax1.fill_between(vacuum, 0, list_deflect_ie, alpha=0.1, color='orange')
ax1.fill_between(vacuum, 0, list_deflect_de, alpha=0.1, color='blue')
ax2.fill_between(vacuum, 0, list_deflect_it, alpha=0.1, color='green')
ax2.fill_between(vacuum, 0, list_deflect_dt, alpha=0.1, color='magenta')
#ax3.fill_between(list_deflect_c_it, 0, vacuum, alpha=0.1, color='c')
#ax3.fill_between(list_deflect_c_dt, 0, vacuum, alpha=0.1, color='y')
ax1.grid()
ax2.grid()
#ax3.grid()
figManager = plt.get_current_fig_manager()
figManager.set_window_title(file_name + " " + vid_name)
figManager.window.showMaximized()
plt.tight_layout()
plt.savefig(file_path, dpi = 100)
#plt.show()
plt.close()
def calc_vid_stats(list_RSE, list_deflect_de, list_deflect_dt, list_deflect_ie, list_deflect_it):
residuals_d = []
residuals_i = []
video_RSE = np.mean(list_RSE)
for i in range(len(list_deflect_de)):
residuals_d.append(np.abs(list_deflect_de[i]-list_deflect_dt[i]))
residuals_i.append(np.abs(list_deflect_ie[i]-list_deflect_it[i]))
RSE_deflec_d = sum(residuals_d)/(len(list_deflect_de)-2)
RSE_deflec_i = sum(residuals_i)/(len(list_deflect_ie)-2)
return RSE_deflec_d, RSE_deflec_i, video_RSE
def save_report(RSE_deflec_d, RSE_deflec_i, video_RSE, main_dir, vid_name, file_name="_Estadisticas_dataset_"):
#Create a dataframe to store the relevant information from the datasets
file_loc = main_dir + vid_name + file_name + ".csv"
print(file_loc,type(file_loc))
data = {"RSE deflexion derecha": RSE_deflec_d,"RSE deflexion izquierda":RSE_deflec_i,"RSE Video":video_RSE}
df = | pd.DataFrame(data,index=[0]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_type_equal(self):
checks.assert_type_equal(0, 1)
checks.assert_type_equal(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_dtype_equal(self):
checks.assert_dtype_equal([1], [1, 1, 1])
checks.assert_dtype_equal(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_len_equal(self):
checks.assert_len_equal([[1]], [[2]])
checks.assert_len_equal([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_len_equal([[1]], [[2], [3]])
def test_assert_shape_equal(self):
checks.assert_shape_equal(0, 1)
checks.assert_shape_equal([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_shape_equal([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_shape_equal(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_index_equal(self):
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([2, 3, 4]))
def test_assert_meta_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_meta_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_meta_equal(pd.Series([1, 2, 3], index=index), | pd.Series([1, 2, 3], index=index) | pandas.Series |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
import datetime
from pandas.api.types import is_numeric_dtype
import timeserio.ini as ini
from timeserio.data.mock import mock_fit_data
from timeserio.preprocessing import PandasDateTimeFeaturizer
from timeserio.preprocessing.datetime import (
get_fractional_day_from_series, get_fractional_hour_from_series,
get_fractional_year_from_series, truncate_series,
get_zero_indexed_month_from_series, get_time_is_in_interval_from_series,
get_is_holiday_from_series
)
datetime_column = ini.Columns.datetime
seq_column = f'seq_{ini.Columns.datetime}'
usage_column = ini.Columns.target
@pytest.fixture
def df():
return mock_fit_data(start_date=datetime.datetime(2017, 1, 1, 1, 0))
@pytest.fixture
def featurizer():
return PandasDateTimeFeaturizer()
def test_get_fractional_hour_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='0.5H', periods=48)
)
fractionalhour = get_fractional_hour_from_series(series)
expected = pd.Series(np.linspace(0, 23.5, 48))
pdt.assert_series_equal(fractionalhour, expected)
def test_get_fractional_day_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='6H', periods=5))
fractional_day = get_fractional_day_from_series(series)
expected = pd.Series([0, 0.25, 0.5, 0.75, 0])
pdt.assert_series_equal(fractional_day, expected)
def test_get_fractional_year_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='31D', periods=5)
)
fractional_year = get_fractional_year_from_series(series)
expected = pd.Series([0, 1, 2, 3, 4]) * 31 / 365.
pdt.assert_series_equal(fractional_year, expected)
def test_get_is_holiday_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='D', periods=5))
is_holiday = get_is_holiday_from_series(series)
expected = pd.Series([1, 1, 1, 1, 0])
pdt.assert_series_equal(is_holiday, expected)
@pytest.mark.parametrize(
"country, expected",
[("England", [1, 0, 0, 1]), ("Scotland", [1, 1, 1, 0])]
)
def test_get_is_holiday_from_series_with_country(country, expected):
dates = ["2020-01-01", "2020-01-02", "2020-08-03", "2020-08-31"]
series = pd.to_datetime(pd.Series(dates))
is_holiday = get_is_holiday_from_series(series, country=country)
pdt.assert_series_equal(is_holiday, pd.Series(expected))
def test_get_zero_indexed_month_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='1M', periods=12)
)
month0 = get_zero_indexed_month_from_series(series)
expected = pd.Series(range(12))
pdt.assert_series_equal(month0, expected)
@pytest.mark.parametrize(
'series_data, truncation_period, expected_data',
[
([pd.Timestamp(2019, 1, 1, 1, 9)], 'H', [pd.Timestamp(2019, 1, 1, 1)]),
([pd.Timestamp(2019, 1, 2, 1)], 'd', [pd.Timestamp(2019, 1, 2)]),
([pd.Timestamp(2019, 1, 1)], 'W', [pd.Timestamp(2018, 12, 31)]),
([pd.Timestamp(2019, 1, 1)], 'W-FRI', [pd.Timestamp(2018, 12, 29)]),
([pd.Timestamp(2019, 1, 1)], 'W-TUE', [pd.Timestamp(2018, 12, 26)]),
([pd.Timestamp(2019, 2, 8)], 'm', [pd.Timestamp(2019, 2, 1)]),
([pd.Timestamp(2019, 3, 4)], 'Y', [pd.Timestamp(2019, 1, 1)]),
(
[pd.Timestamp(2019, 1, 1, 1, 30), pd.Timestamp(2019, 1, 1, 2, 30)],
'H',
[pd.Timestamp(2019, 1, 1, 1), pd.Timestamp(2019, 1, 1, 2)],
),
]
)
def test_truncate_series(series_data, truncation_period, expected_data):
out = truncate_series(pd.Series(series_data), truncation_period)
expected = pd.Series(expected_data)
pdt.assert_series_equal(out, expected)
def test_set_get_params(featurizer):
# FixMe: move to generic test_transformer or sth (IG)
featurizer.set_params(column='wrong_column')
params = featurizer.get_params()
assert 'attributes' in params
assert 'column' in params
assert params['column'] == 'wrong_column'
assert params['column'] == featurizer.column
assert params['attributes'] == featurizer.attributes
def test_with_unknown_attribute(df, featurizer):
featurizer.set_params(attributes='unknown_attribute')
with pytest.raises(KeyError):
featurizer.transform(df)
def test_with_unknown_column(df, featurizer):
featurizer.set_params(column='unknown_column')
with pytest.raises(KeyError):
featurizer.transform(df)
def test_with_non_dt_column(df, featurizer):
featurizer.set_params(column=ini.Columns.target)
with pytest.raises(AttributeError):
featurizer.transform(df)
def test_featurizer(df, featurizer):
df = featurizer.transform(df)
assert len(featurizer.attributes)
for attr in featurizer.attributes:
assert attr in df
assert | is_numeric_dtype(df[attr]) | pandas.api.types.is_numeric_dtype |
# import pandas as pd
import logging
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
logger = logging.getLogger(__name__)
def subtract_shuffled_null_model_from_panel(panel, shift_null_model=False):
"""Standardize a panel dataset by subtracting the expected value in a null
model.
Given a panel dataset of export values, with items as countries,
time as major_axis, and products as minor_axis, create a new panel in which
an expected value in a null model is subtracted from the data. The null
model assumes that a country's exports are allocated to different products
in the same proportions as those products' total exports are compared to
the total exports of all products.
Parameters
----------
panel : pandas Panel
A panel dataset with `items` being the names of different trajectories
(people, countries, etc.), time as `major_axis`, and features as
`minor_axis`.
shift_null_model : bool, optional, default: False
Whether to shift the null model by one time step so that data is not
normalized by data that depends on itself.
Returns
-------
panel_normalized_null_model : pandas Panel
A normalized panel in which an expected value is subtracted from each
entry in `panel`. The new normalized panel is essentially
`panel - (panel.sum(axis='minor_axis') * panel.sum('items') /
panel.sum('items').sum(axis=1)).
"""
panel_normalized_null_model = panel.copy()
sum_across_items = panel.sum('items')
sum_across_items_and_features = panel.sum('items').sum(axis=1)
share_of_each_feature = (
sum_across_items.div(
sum_across_items_and_features, axis='index')
.shift(int(shift_null_model)))
for item in panel.items:
sum_across_features = panel.loc[item].sum(axis=1)
expected = (
share_of_each_feature).mul(
sum_across_features, axis='index')
panel_normalized_null_model.loc[item] -= expected
return panel_normalized_null_model
class IteratedLog1p(BaseEstimator, TransformerMixin):
"""Transforms features by applying log1p a certain number of times.
Parameters
----------
n : int, default: 1
The number of times to apply numpy.log1p to the data
"""
def __init__(self, n=1, pseudolog=False):
if n < 0:
raise ValueError('`n` must be positive; got {}'.format(n))
self.n = n
def _transformed_filename(self, filename):
if self.n == 1:
return 'log1p_{}'.format(filename)
else:
return 'log1p_applied_{}_times_to_{}'.format(self.n, filename)
def _transformed_name(self, name):
if self.n == 1:
return 'log(1 + {})'.format(name)
else:
return r'log1p^{' + str(self.n) + '}' + '(' + name + ')'
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""Apply `numpy.log1p` to `X` `n` times."""
result = X.copy()
for __ in range(self.n):
result = np.log1p(result)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
if hasattr(X, 'name'):
result.name = self._transformed_name(X.name)
if hasattr(X, 'filename'):
result.filename = self._transformed_filename(X.filename)
return result
def inverse_transform(self, X):
"""Apply `np.exp(X) - 1` `n` times."""
result = X.copy()
for __ in range(self.n):
result = np.exp(X) - 1.0
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
return result
class PseudoLog(BaseEstimator, TransformerMixin):
"""Transforms features by applying arcsinh(x / 2).
"""
def __init__(self):
pass
def _transformed_filename(self, filename):
return 'pseudolog_{}'.format(filename)
def _transformed_name(self, name):
return 'pseudolog(1 + {})'.format(name)
def _transformed_math_name(self, name):
return 'arcsinh({} / 2)'.format(name)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""Apply `arcsinh(x / 2)` to `X`."""
result = np.arcsinh(X / 2.0)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
if hasattr(X, 'name'):
result.name = self._transformed_name(X.name)
if hasattr(X, 'filename'):
result.filename = self._transformed_filename(X.filename)
return result
def inverse_transform(self, X):
"""Apply `np.exp(X) - 1` `n` times."""
result = 2.0 * np.sinh(X)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
return result
class ScaledLogPositiveData(BaseEstimator, TransformerMixin):
"""Applies x -> (log(x) + min(x[x > 0])) / min(x[x > 0]) to x[x > 0].
"""
def __init__(self, X_min_pos=None):
if X_min_pos == 1.:
raise ValueError('X_min_pos cannot be 1.')
self.X_min_pos = X_min_pos
self.X_min_pos_computed_from_data = False
def _transformed_filename(self, filename):
return 'scaled_log_positive_data_{}'.format(filename)
def _transformed_name(self, name):
return '1 + log({X}) / log({Xminpos}) ({Xminpos} - 1)'.format(
X=name, Xminpos=self._min_pos_repr(name))
def _min_pos_repr(self, name):
return_number = (self.X_min_pos is not None and
not self.X_min_pos_computed_from_data)
if return_number:
return '{:.1f}'.format(self.X_min_pos)
return '{}_{{min. pos.}}'.format(name)
def _transformed_math_name(self, name):
t = r'1 + \log \left ({X} \right ) / \log ({Xminpos}) ({Xminpos} - 1)'
return t.format(X=name, Xminpos=self._min_pos_repr(name))
def fit(self, X, y=None):
"""Fit the scaler by computing the smallest positive value or by using
X_min_pos if specified.
"""
X_array = np.array(X)
if self.X_min_pos is None:
self.X_min_pos_computed_from_data = True
self.X_min_pos = X_array[X_array > 0].min()
logger.info('Computed X_min_pos = {}'.format(self.X_min_pos))
self._is_fitted = True
return self
def _check_fitted(self):
if not self._is_fitted:
raise ValueError('Transformer not yet fitted. Call `fit` first.')
def transform(self, X, y=None, fillna=0.0):
"""Apply (log(Xp) + m) / m, where m = log(X_min_pos) / (X_min_pos - 1)
and X_min_pos is the smallest positive value in X, applied to the
positive data Xp.
"""
self._check_fitted()
X_tr = np.array(X.fillna(fillna) if hasattr(X, 'fillna') else X)
positive = X_tr > 0
m = np.log(self.X_min_pos) / (self.X_min_pos - 1.0)
logger.info('m = {:.6f}'.format(m))
X_tr[positive] = (np.log(X_tr[positive]) + m) / m
if isinstance(X, pd.DataFrame):
X_tr = pd.DataFrame(X_tr, index=X.index, columns=X.columns)
if isinstance(X, pd.Panel):
X_tr = pd.Panel(X_tr, items=X.items, major_axis=X.major_axis,
minor_axis=X.minor_axis)
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Panel):
if hasattr(X, 'name'):
X_tr.name = self._transformed_name(X.name)
if hasattr(X, 'filename'):
X_tr.filename = self._transformed_filename(X.filename)
return X_tr
def inverse_transform(self, X):
"""Apply exp(X * X_min_pos - X_min_pos)."""
self._check_fitted()
X_inv = np.array(X)
positive = (X_inv > 0)
X_inv[positive] = np.exp(
X_inv[positive] * self.X_min_pos - self.X_min_pos)
if isinstance(X, pd.DataFrame):
X_inv = | pd.DataFrame(X_inv, index=X.index, columns=X.columns) | pandas.DataFrame |
import datetime as dt
import pandas as pd
import pytest
import intake
from intake_google_analytics.source import GoogleAnalyticsAPI
from pandas.api.types import (is_datetime64_any_dtype, is_float_dtype,
is_integer_dtype)
from pandas.testing import assert_frame_equal
def test_parse_fields_wrong_style():
with pytest.raises(ValueError):
GoogleAnalyticsAPI._parse_fields(['ga:users'], style='nope')
def test_parse_metrics():
metrics = ['ga:users']
parsed = GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
assert parsed == [{'expression': 'ga:users'}]
metrics = ['ga:users', 'ga:session']
parsed = GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
assert parsed == [{'expression': 'ga:users'}, {'expression': 'ga:session'}]
metrics = ['ga:users', {"expression": 'ga:session', 'alias': 'Session'}]
parsed = GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
assert parsed == [
{'expression': 'ga:users'},
{"expression": 'ga:session', 'alias': 'Session'}
]
metrics = [{"expression": 'ga:session'}]
parsed = GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
assert parsed == metrics
metrics = [{"expression": 'ga:session', 'alias': 'Session'}]
parsed = GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
assert parsed == metrics
with pytest.raises(ValueError):
metrics = [{"espresso": 'ga:session', 'alias': 'Session'}]
GoogleAnalyticsAPI._parse_fields(metrics, style='metrics')
with pytest.raises(ValueError):
GoogleAnalyticsAPI._parse_fields([1], style='metrics')
def test_parse_dimensions():
dimensions = ['ga:userType']
parsed = GoogleAnalyticsAPI._parse_fields(dimensions, style='dimensions')
assert parsed == [{'name': 'ga:userType'}]
dimensions = ['ga:userType', 'ga:date']
parsed = GoogleAnalyticsAPI._parse_fields(dimensions, style='dimensions')
assert parsed == [{'name': 'ga:userType'}, {'name': 'ga:date'}]
dimensions = ['ga:userType', {'name': 'ga:date'}]
parsed = GoogleAnalyticsAPI._parse_fields(dimensions, style='dimensions')
assert parsed == [{'name': 'ga:userType'}, {'name': 'ga:date'}]
dimensions = [{'name': 'ga:date'}]
parsed = GoogleAnalyticsAPI._parse_fields(dimensions, style='dimensions')
assert parsed == dimensions
with pytest.raises(ValueError):
dimensions = [{"nom": 'ga:date'}]
GoogleAnalyticsAPI._parse_fields(dimensions, style='dimensions')
with pytest.raises(ValueError):
GoogleAnalyticsAPI._parse_fields([1], style='dimensions')
def test_parse_date_objects():
assert GoogleAnalyticsAPI._parse_date('2020-03-19') == '2020-03-19'
assert GoogleAnalyticsAPI._parse_date(dt.date(2020, 3, 19)) == '2020-03-19'
assert GoogleAnalyticsAPI._parse_date(dt.datetime(2020, 3, 19, 16, 20, 0)) == '2020-03-19'
assert GoogleAnalyticsAPI._parse_date(pd.to_datetime('2020-03-19 16:20:00')) == '2020-03-19'
assert GoogleAnalyticsAPI._parse_date(pd.Timestamp(2020, 3, 19, 16, 20, 0)) == '2020-03-19'
with pytest.raises(TypeError):
GoogleAnalyticsAPI._parse_date(dt.timedelta(days=2))
def test_parse_date_strings():
assert GoogleAnalyticsAPI._parse_date('yesterday') == 'yesterday'
assert GoogleAnalyticsAPI._parse_date('today') == 'today'
assert GoogleAnalyticsAPI._parse_date('1000DaysAgo') == '1000DaysAgo'
with pytest.raises(ValueError):
GoogleAnalyticsAPI._parse_date('tomorrow')
with pytest.raises(ValueError):
GoogleAnalyticsAPI._parse_date('πDaysAgo')
def test_query_body(monkeypatch):
monkeypatch.setattr(GoogleAnalyticsAPI, 'create_client', lambda x: None)
inputs = {
'view_id': 'VIEWID',
'start_date': '5DaysAgo', 'end_date': 'yesterday',
'metrics': ['ga:users']
}
expected_body = {'reportRequests': [
{'dateRanges': [{'endDate': 'yesterday', 'startDate': '5DaysAgo'}],
'hideTotals': True,
'hideValueRanges': True,
'includeEmptyRows': True,
'metrics': [{'expression': 'ga:users'}],
'viewId': 'VIEWID'}
]}
client = GoogleAnalyticsAPI(None)
body = client._build_body(**inputs)
assert body == expected_body
def test_query_body_with_dimensions(monkeypatch):
monkeypatch.setattr(GoogleAnalyticsAPI, 'create_client', lambda x: None)
inputs = {
'view_id': 'VIEWID',
'start_date': '5DaysAgo', 'end_date': 'yesterday',
'metrics': ['ga:users'],
'dimensions': ['ga:userType']
}
expected_body = {'reportRequests': [
{'dateRanges': [{'endDate': 'yesterday', 'startDate': '5DaysAgo'}],
'hideTotals': True,
'hideValueRanges': True,
'includeEmptyRows': True,
'metrics': [{'expression': 'ga:users'}],
'dimensions': [{'name': 'ga:userType'}],
'viewId': 'VIEWID'}
]}
client = GoogleAnalyticsAPI(None)
body = client._build_body(**inputs)
assert body == expected_body
def test_dataframe_empty_report():
report = {
'columnHeader':
{'metricHeader': {'metricHeaderEntries': [{'name': 'ga:users', 'type': 'INTEGER'}]}},
'data': {}
}
df = GoogleAnalyticsAPI._to_dataframe(report)
assert df.empty
datetime_dimensions = [
('ga:yearMonth', '202003'),
('ga:date', '20200319'),
('ga:dateHour', '2020031916'),
('ga:dateHourMinute', '202003191620'),
]
@pytest.mark.parametrize('dimension', datetime_dimensions, ids=[p[0] for p in datetime_dimensions])
def test_dataframe_datetime_dimensions(dimension):
dim, value = dimension
report = {
'columnHeader':
{'dimensions': [dim],
'metricHeader': {'metricHeaderEntries': [{'name': 'ga:users', 'type': 'INTEGER'}]}},
'data': {
'rowCount': 1,
'rows': [{'dimensions': [value],
'metrics': [{'values': ['1']}]}]
}
}
df = GoogleAnalyticsAPI._to_dataframe(report)
assert is_datetime64_any_dtype(df[dim])
def test_dataframe_multiple_datetime_dimensions():
multi_column = {
'columnHeader':
{'dimensions': ['ga:date', 'ga:dateHourMinute'],
'metricHeader': {'metricHeaderEntries': [{'name': 'ga:users', 'type': 'INTEGER'}]}},
'data': {
'rowCount': 1,
'rows': [{'dimensions': ['20200319', '202003191620'],
'metrics': [{'values': ['1']}]}]
}
}
df = GoogleAnalyticsAPI._to_dataframe(multi_column)
assert is_datetime64_any_dtype(df['ga:dateHourMinute'])
assert is_datetime64_any_dtype(df['ga:date'])
metric_dtypes = [
('INTEGER', "ga:users", '1', is_integer_dtype),
('TIME', 'ga:sessionDuration', '1.1', is_float_dtype),
('PERCENT', 'ga:percentNewSessions', '1.1', is_float_dtype),
('CURRENCY', 'ga:goalValueAll', '1.1', is_float_dtype),
('FLOAT', 'ga:pageviewsPerSession', '1.1', is_float_dtype)
]
@pytest.mark.parametrize('metric', metric_dtypes, ids=[p[0] for p in metric_dtypes])
def test_dataframe_metric_dtype(metric):
ga_type, column, value, test_func = metric
report = {
'columnHeader':
{'metricHeader': {'metricHeaderEntries':
[{'name': column, 'type': ga_type}]}},
'data': {
'rowCount': 1,
'rows': [{'metrics': [{'values': [value]}]}]
}
}
df = GoogleAnalyticsAPI._to_dataframe(report)
assert test_func(df[column])
class MockGAClient():
def __init__(self, credentials_path):
pass
def batchGet(self, body):
return MockGABatch(body)
class MockGABatch():
def __init__(self, body):
self.body = body
def execute(self):
pass
def test_query_to_dataframe(monkeypatch):
monkeypatch.setattr(MockGABatch, 'execute', lambda body: {
'reports': [
{'columnHeader': {'metricHeader': {'metricHeaderEntries': [{'name': 'ga:users',
'type': 'INTEGER'}]}},
'data': {'rowCount': 1, 'rows': [{'metrics': [{'values': ['1']}]}]}}
]
}
)
monkeypatch.setattr(GoogleAnalyticsAPI, 'create_client', lambda x: MockGAClient(x))
ga_api = GoogleAnalyticsAPI(None)
df = ga_api.query(
'VIEWID',
start_date='5DaysAgo', end_date='yesterday',
metrics=['ga:user']
)
assert_frame_equal(df, | pd.DataFrame([{'ga:users': 1}]) | pandas.DataFrame |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = pd.read_excel(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
temp_df['HVAC_Zone_ID'] = int(name.split('_')[0][2:]) # get the number of ac
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_hvac = pd.concat([combined_hvac, temp_df], ignore_index=True) # concat the data
else:
pass
# drop na rows when specific column is null
combined_indoor = combined_indoor[combined_indoor['Date_Time'].notnull()]
combined_outdoor = combined_outdoor[combined_outdoor['Date_Time'].notnull()]
combined_window = combined_window[combined_window['Date_Time'].notnull()]
combined_door = combined_door[combined_door['Date_Time'].notnull()]
combined_hvac = combined_hvac[combined_hvac['Date_Time'].notnull()]
# process windows, door open/close data
combined_door['Door_Status'] = combined_door['Door_Status'].replace([0, 1, 2], [1, 0, 0])
combined_window['Window_Status'] = combined_window['Window_Status'].replace([0, 1, 2], [1, 0, 0])
# format datetime
print("Formatting datetime!")
combined_indoor['Date_Time'] = pd.to_datetime(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_outdoor['Date_Time'] = pd.to_datetime(combined_outdoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_window['Date_Time'] = pd.to_datetime(combined_window['Date_Time'], infer_datetime_format=True)
combined_door['Date_Time'] = pd.to_datetime(combined_door['Date_Time'], infer_datetime_format=True)
combined_hvac['Date_Time'] = pd.to_datetime(combined_hvac['Date_Time'], infer_datetime_format=True)
# format data type
print(combined_indoor.dtypes)
print(combined_outdoor.dtypes)
print(combined_window.dtypes)
print(combined_door.dtypes)
print(combined_hvac.dtypes)
combined_indoor['Building_ID'] = combined_indoor['Building_ID'].astype(int)
combined_indoor['Room_ID'] = combined_indoor['Room_ID'].astype(int)
combined_outdoor['Building_ID'] = combined_outdoor['Building_ID'].astype(int)
combined_window['Building_ID'] = combined_window['Building_ID'].astype(int)
combined_window['Room_ID'] = combined_window['Room_ID'].astype(int)
combined_window['Window_ID'] = combined_window['Window_ID'].astype(int)
combined_door['Building_ID'] = combined_door['Building_ID'].astype(int)
combined_door['Room_ID'] = combined_door['Room_ID'].astype(int)
combined_door['Door_ID'] = combined_door['Door_ID'].astype(int)
combined_hvac['Building_ID'] = combined_hvac['Building_ID'].astype(int)
combined_hvac['Room_ID'] = combined_hvac['Room_ID'].astype(int)
combined_hvac['HVAC_Zone_ID'] = combined_hvac['HVAC_Zone_ID'].astype(int)
# replace null with empty
# # check combined data
# print('check null values')
# print(combined_window.isnull().sum())
# print(combined_door.isnull().sum())
# print(combined_hvac.isnull().sum())
#
# # check the unique IDs
# print(combined_window.Window_ID.unique())
# print(combined_door.Door_ID.unique())
# print(combined_hvac.HVAC_Zone_ID.unique())
#
# print(combined_hvac.Building_ID.unique())
# print(combined_window.Building_ID.unique())
# print(combined_door.Building_ID.unique())
# save data
combined_indoor.to_csv(save_path + 'combined_indoor.csv', index=False)
combined_outdoor.to_csv(save_path + 'combined_outdoor.csv', index=False)
combined_window.to_csv(save_path + 'combined_window.csv', index=False)
combined_door.to_csv(save_path + 'combined_door.csv', index=False)
combined_hvac.to_csv(save_path + 'combined_hvac.csv', index=False)
''' read templates and save data into the standard templates '''
# data
combined_indoor = pd.read_csv(save_path + 'combined_indoor.csv')
combined_outdoor = pd.read_csv(save_path + 'combined_outdoor.csv')
combined_window = pd.read_csv(save_path + 'combined_window.csv')
combined_door = pd.read_csv(save_path + 'combined_door.csv')
combined_hvac = pd.read_csv(save_path + 'combined_hvac.csv')
# templates
# read templates into pandas
template_window = | pd.read_csv(template_path+'Window_Status.csv') | pandas.read_csv |
import pandas as pd
from math import ceil
import pyfpgrowth
def aggregate_frequent_itemsets(frequent_itemset_dict):
result = pd.DataFrame(columns=['errorCode', 'numberWindowsTotal', 'itemset', 'support', 'supportRel'])
for err_code, df in frequent_itemset_dict.items():
result = | pd.concat([result,df], axis=0) | pandas.concat |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
def resample_timeseries_dataframe(df,
dt_col,
interval,
start_time,
end_time,
merge_mode="mean"):
'''
resample and return a dataframe with a new time interval.
:param df: input dataframe.
:param dt_col: name of datetime column.
:param interval: pandas offset aliases, indicating time interval of the output dataframe
:param start_time: start time of the output dataframe
:param end_time: end time of the output dataframe
:param merge_mode: if current interval is smaller than output interval,
we need to merge the values in a mode. "max", "min", "mean"
or "sum" are supported for now.
'''
assert dt_col in df.columns, f"dt_col {dt_col} can not be found in df."
assert pd.isna(df[dt_col]).sum() == 0, "There is N/A in datetime col"
assert pd.Timestamp(start_time) <= pd.Timestamp(
end_time), "end time must be later than start time."
assert merge_mode in ["max", "min", "mean", "sum"],\
f"merge_mode should be one of [\"max\", \"min\", \"mean\", \"sum\"]," \
f" but found {merge_mode}."
start_time_stamp = pd.Timestamp(start_time)
end_time_stamp = pd.Timestamp(end_time)
zero_time_stamp = pd.Timestamp(0, unit='ms')
res_df = df.copy()
res_df[dt_col] = df.apply(
lambda row: resample_helper(
row[dt_col],
interval,
start_time_stamp,
end_time_stamp,
zero_time_stamp),
axis=1)
res_df = res_df[~res_df[dt_col].isin([None])]
if merge_mode == "max":
res_df = res_df.groupby([dt_col]).max()
if merge_mode == "min":
res_df = res_df.groupby([dt_col]).min()
if merge_mode == "mean":
res_df = res_df.groupby([dt_col]).mean()
if merge_mode == "sum":
res_df = res_df.groupby([dt_col]).sum()
new_start = start_time_stamp + \
(interval - divmod(start_time_stamp - zero_time_stamp, pd.Timedelta(interval))[1])
new_end = end_time_stamp - \
divmod(end_time_stamp - zero_time_stamp, pd.Timedelta(interval))[1]
new_end = new_start if new_start > new_end else new_end
new_index = pd.date_range(start=new_start, end=new_end, freq=interval)
res_df = res_df.reindex(new_index)
res_df.index.name = dt_col
res_df = res_df.reset_index()
return res_df
def resample_helper(curr_time,
interval,
start_time_stamp,
end_time_stamp,
zero_time_stamp):
offset = divmod((curr_time - zero_time_stamp), | pd.Timedelta(interval) | pandas.Timedelta |
"""Tool for generating random events on the sky."""
import argparse
from configparser import ConfigParser
import code
import logging
import numpy as np
import pandas as pd
max_iter_npoints = 1000000
min_iter_npoints = 100
info = logging.info
def num_events(rate, volume, uptime, nyears):
"""Randomly determine a number of everts.
Args:
rate - events per Gpc^3 per year
volume - detectable volume, in Gpc^3
uptime - fraction of detector uptime
nyears - the number of years
"""
expected_events = rate*volume*uptime*nyears
n_events = np.random.poisson(expected_events)
return n_events
def sample_sphere(n, truncate=True):
"""Randomly generate pointings on a sphere.
Use a conceptually straightforward (but inefficient) way to
generate random points on a sphere: generate random poinds in a
cube, throw away points outside a sphere contained entirely in the
cube, and project onto the sphere.
Args:
n - the number of points to generate
truncate - if the algorithm ends up with more points than
requested, return only the number requested, not all
generated points.
Returns:
a pandas.DataFrame of the randomly generated points
"""
point_dfs = []
accumulated_samples = 0
while accumulated_samples < n:
# (2*r)^3 / (4/3 pi r^3) = 6/pi
iter_npoints = min(int(np.round((n-accumulated_samples)*6/np.pi)),
max_iter_npoints)
# do 3-sigma more
iter_npoints = iter_npoints + np.int(3*np.sqrt(iter_npoints))
iter_npoints = max(iter_npoints, min_iter_npoints)
x = np.random.uniform(-1, 1, iter_npoints)
y = np.random.uniform(-1, 1, iter_npoints)
z = np.random.uniform(-1, 1, iter_npoints)
r = np.sqrt(x*x+y*y+z*z)
in_sphere = r < 1.0
r = r[in_sphere]
x = x[in_sphere]/r
y = y[in_sphere]/r
z = z[in_sphere]/r
theta = np.arccos(z)
phi = np.arctan2(y, x)
ra = (np.degrees(phi) + 360) % 360
decl = 90.0-np.degrees(theta)
new_df = | pd.DataFrame({'ra': ra, 'decl': decl}) | pandas.DataFrame |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = | Timedelta('-1 days 1 us') | pandas.Timedelta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.