prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
import pandas as pd
import sys
import json
with open("data/TALB_2018.geojson") as f:
geojson = json.load(f)
for i in range(len(geojson["features"])):
geojson["features"][i]["properties"]["cancer"] = {}
def find_talb(name):
if pd.isna(name):
return
for i,f in enumerate(geojson["features"]):
if f["properties"]["TALB2018_1"].startswith(name.replace(".", "")):
return i
print(name + " not found")
year_bands = ["2010-2012", "2013-2015", "2016-2018"]
cancer_types = ["breast", "prostate", "lung"]
maori = ["maori", "non-maori", "total"]
keys = ["TALB"]
for c in cancer_types:
for m in maori:
for y in year_bands:
keys.append(c + m + y)
numerator = | pd.read_excel("misc/annual_counts_OUTPUT - Checked.xlsx", sheet_name="TALB", skiprows=10, nrows=88, names = keys) | pandas.read_excel |
import argparse
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import scipy.cluster
import scipy.spatial.distance
import sklearn.cluster
import sklearn.feature_extraction
import sklearn.manifold
import sklearn.metrics.pairwise
if True:
p = argparse.ArgumentParser()
p.add_argument("--tag", required=True)
args = p.parse_args()
tag = args.tag
else:
tag = "Hemoglobin_CTEP Trials_072018"
#tag = "Platelets_CTEP Trials_072018"
#tag = "WBC_CTEP Trials_072018"
#tag = "HIV_CTEPTrials_072018"
input_tsv = "../nci_data/dataset1-trials/" + tag + ".tsv"
output_pdf = "./" + tag + ".clustering.pdf"
features_csv = "./" + tag + ".features.csv"
linkage_matrix_csv = "./" + tag + ".linkage_matrix.csv"
# Load data.
tb = pd.read_table(input_tsv)
num_rows_excluded = sum(pd.isnull(tb["Boolean"]))
num_rows_orig = tb.shape[0]
tb = tb.loc[~pd.isnull(tb["Boolean"]),:]
tb = tb.reset_index(drop=True)
num_rows = tb.shape[0]
print("Excluding %d of %d rows" % (num_rows_excluded, num_rows_orig))
print("After exclusion, %d rows remain" % num_rows)
# Parse boolean.
def f(b):
b = re.sub(r"[()]", "", b)
operators = [w for w in b.split() if w in ("OR", "AND")]
as_ops = b.replace("OR", "OP").replace("AND", "OP")
triples = [tuple(re.split(r'(>=|<=|>|<|==|=)', t.strip(), maxsplit=1)) for t in as_ops.split("OP")]
triples = [tuple(ti.strip() for ti in t) for t in triples]
for i, t in enumerate(triples):
print(t)
if len(t) == 2:
new_triple = (t[0], t[1], "?")
print("Warning: {} is not of length 3, replacing with {}".format(t, new_triple))
triples[i] = new_triple
if len(t) == 1:
new_triple = (t[0], "?", "?")
print("Warning: {} is not of length 3, replacing with {}".format(t, new_triple))
triples[i] = new_triple
return {"triples": triples, "operators": operators}
def g(b):
if pd.isnull(b):
return b
else:
return f(b)
tb["parsed"] = [g(b) for b in tb["Boolean"]]
triples = [x["triples"] for x in tb["parsed"] if x]
operators = [x["operators"] for x in tb["parsed"]]
# Make features.
feat = [collections.defaultdict(float) for i in range(tb.shape[0])]
for i, triple_list in enumerate(triples):
for l, c, r in triple_list:
# Add count of each element alone within each triple.
feat[i]["l_count_%s" % l] += 1
feat[i]["c_count_%s" % c] += 1
feat[i]["r_count_%s" % r] += 1
# Add count of each pair of elements within each triple.
feat[i]["lc_count_(%s, %s)" % (l, c)] += 1
feat[i]["lr_count_(%s, %s)" % (l, r)] += 1
feat[i]["cr_count_(%s, %s)" % (c, r)] += 1
# Add count of each triple.
t1 = (l, c, r)
feat[i]["triple_count_%s" % str(t1)] += 1
# Add count of each pair of triples.
for t2 in triple_list:
feat[i]["triple_pair_count_%s_%s" % (str(t1), str(t2))] += 1
for i, operator_list in enumerate(operators):
for o1 in operator_list:
# Add count for each operator.
feat[i]["operator_count_%s" % o1] += 1
# Add count for each pair of operators.
for o2 in operator_list:
feat[i]["operator_pair_count_%s_%s" % (o1, o2)] += 1
# Make feature matrix.
feature_vectorizer = sklearn.feature_extraction.DictVectorizer(sparse=False)
X = feature_vectorizer.fit_transform(feat)
# Carry out hierarchical clustering.
#hc_linkage = scipy.cluster.hierarchy.linkage(X, method="ward", metric="euclidean")
hc_linkage = scipy.cluster.hierarchy.linkage(X, method="complete", metric="cosine")
#hc_linkage = scipy.cluster.hierarchy.linkage(X, method="average", metric="cosine")
# Plot clustering.
h = 25.0 * tb.shape[0] / 174
fig = plt.figure(figsize=(25, h))
leaf_labels = [x for x in tb["Boolean"]]
dn = scipy.cluster.hierarchy.dendrogram(hc_linkage, labels=leaf_labels, orientation="left")
plt.title("Hierarchical clustering of %s " % tag)
plt.axis('tight')
plt.subplots_adjust(right=0.45)
plt.savefig(output_pdf)
plt.close(fig)
# Save features used for clustering.
feature_colnames = ["feature_%s" % x for x in feature_vectorizer.get_feature_names()]
feature_tb = pd.DataFrame(X, index=tb.index, columns=feature_colnames)
feature_with_orig_tb = | pd.concat((tb, feature_tb), axis=1) | pandas.concat |
import os
import pandas as pd
import numpy as np
from itertools import chain
from codifyComplexes.CodifyComplexException import CodifyComplexException
from .DataLoaderClass import DataLoader
#TODO: REWRITE PROTOCOLs CLASSES TO REDUCE COUPLING. E.G. Pairwise agregation should be generic for both seq and struct, just neigs different
FEATURES_MISMATH_TOLERANCE=0.1 #Fraction of missing residues between different features that will trigger error
class AbstractProtocol(DataLoader):
'''
This class is a template for codification protocols
'''
DESIRED_ORDER=["L", "R", "P"]
def __init__(self, dataRootPath, cMapPath, prevStepPaths, singleChainfeatsToInclude,
pairfeatsToInclude=None, verbose=False):
'''
:param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:
computedFeatures/
common/
contactMaps/
seqStep/
conservation/
...
structStep/
PSAIA/
VORONOI/
...
:param cMapPath: str. A path to a dir that contains the contact map of the protein complex
:param prevStepPaths: str or str[]. A path to previous results files directory. If it is None, contactMaps files will be used
to define which residue pairs are in contact. Can also be a str[] if multiple feedback_path's
wanted
:param singleChainfeatsToInclude: List that contains the paths where single chain features needed for complex
codification are located. Must have the following format:
["featName":(relativePath_from_dataRootPath, listOfColumnNumbers)]
:param pairfeatsToInclude: List that contains the paths where pair features needed for complex
codification are located. Must have the following format:
["featName":(relativePath_from_dataRootPath, listOfColumnNumbers)]
:param verbose: bool.
'''
if not hasattr(self, "dataRootPath"):
DataLoader.__init__(self, dataRootPath, verbose)
self.cMapPath= cMapPath
self.singleChainfeatsToInclude= singleChainfeatsToInclude
self.pairfeatsToInclude= pairfeatsToInclude
if prevStepPaths is None:
self.prevFnamesList=None
else:
self.prevFnamesList= prevStepPaths if isinstance(prevStepPaths,list) else [prevStepPaths]
self.prevFnamesList= [os.path.join(onePath, fname) for onePath in self.prevFnamesList for fname in os.listdir(onePath)
if fname.endswith(".res.tab.gz")]
if self.pairfeatsToInclude and not self.useCorrMut:
self.pairfeatsToInclude = [ elem for elem in self.pairfeatsToInclude if elem[0]!="corrMut"]
if len(self.pairfeatsToInclude)==0:
self.pairfeatsToInclude = None
def applyProtocol( self, prefixComplex):
'''
This method is the basic skeleton for applyProtocol of subclasses
Given a prefix that identifies the complex and prefixes that identifies
the ligand and the receptor, this method integrates the information that
is contained in self.dataRootPath and is described in self.singleChainfeatsToInclude
:param prefixComplex: str. A prefix that identifies a complex
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R).
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
[propertiesP .... propertiesL .... propertiesR]
'''
#load contact maps or previous results to define pairs of residues ligand to receptor
raw_prefix= prefixComplex.split("@")[0].split("#")[0]
if self.prevFnamesList is None:
cmapNameList= list(self.getFullNamesIterInPath( raw_prefix, self.cMapPath))
if len(cmapNameList)>1:
raise ValueError("There are more than 1 Contact map for %s in %s path"%(prefixComplex,self.cMapPath))
allPairsCodified= self.loadDataFile(cmapNameList)
else:
allPairsCodified= self.loadPreviousResults(prefixComplex)
if not self.pairfeatsToInclude is None: #add pairwise features if there are available
allPairsCodified= self.addPairFeatures(raw_prefix, allPairsCodified)
lFeats= self.loadSingleChainFeatures( raw_prefix, chainType="l")
rFeats= self.loadSingleChainFeatures( raw_prefix, chainType="r")
#add single chain features to contact map (or pairwise features)
allPairsCodified= self.combinePairwiseAndSingleChainFeats(allPairsCodified,lFeats, rFeats)
assert allPairsCodified.shape[0]>1, "Error, %s dataset is empty"%prefixComplex
#Reorder columns to DESIRED_ORDER order
allPairsCodified= self.reorderColumns(allPairsCodified)
return allPairsCodified
def reorderColumns(self, allPairsCodified):
colNames= list(allPairsCodified.columns)
categIndex= colNames.index("categ")# categ is the last non-feature column. All previous columns are ids
lFeatNames= [elem for elem in colNames[(categIndex+1):] if elem[-1]=="L"]
rFeatNames= [elem for elem in colNames[(categIndex+1):] if elem[-1]=="R"]
pairwiseFeatNames= [elem for elem in colNames[(categIndex+1):] if elem.endswith("_P")]
colOrder= list(colNames[:(categIndex+1)]) #first columns are pair ids and label
for featType in AbstractProtocol.DESIRED_ORDER:
if featType=="L":
colOrder+=lFeatNames
elif featType=="R":
colOrder+=rFeatNames
elif featType=="P":
colOrder+=pairwiseFeatNames
else:
raise ValueError("just L, R or P allowed in AbstractProtocol.DESIRED_ORDER")
allPairsCodified= allPairsCodified[ colOrder ]
allPairsCodified.columns= colOrder
return allPairsCodified
def loadPreviousResults(self, prefixComplex):
'''
Loads previous results. Returns a pandas.DataFrame that contains in each row
the scores of previous steps for a given pair of amino acids.
:param prefixComplex: str. A prefix (a complex id) that identifies the receptor or ligand
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR',
'categ', [prev_step_scores]
'''
previousResultsList= [fname for fname in self.prevFnamesList if fname.endswith(prefixComplex+".res.tab.gz")]
assert len(previousResultsList)>0, "No previous results"
for fname in previousResultsList:
if self.verbose: print("loading previous resuls for %s"%(prefixComplex))
prevResults= self.loadDataFile( iter([fname]) )
prevResults.loc[:,"prediction_norm"]= (prevResults["prediction"] - np.mean(prevResults["prediction"])) / np.std(prevResults["prediction"])
break # break to add just one type of previous predictions
if len(previousResultsList)>1:
for fname in previousResultsList[1:]: #Load the remaining previous predictions
if self.verbose: print("loading previous resuls for %s"%(prefixComplex))
prevNrow= prevResults.shape[0]
newData= self.loadDataFile( iter([fname] ))
newData.loc[:,"prediction_norm"]= (newData["prediction"] - np.mean(newData["prediction"])) / np.std(newData["prediction"])
merge_on_cols= ["chainIdL", "resIdL","resNameL","chainIdR", "resIdR","resNameR", "categ"]
prevResults= pd.merge(prevResults, newData, how='inner', on=merge_on_cols)
curNrow= prevResults.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous results in %s. There are 0 rows "+
"in previous features")%(prefixComplex ))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > FEATURES_MISMATH_TOLERANCE:
raise CodifyComplexException(("Error merging previous results in %s. There are a different number of residues "+
"compared to previous file\nNrows previously/now %d/%d %s")%(prefixComplex,
prevNrow, curNrow,fname))
return prevResults
def loadSingleChainFeatures(self, prefix, chainType):
'''
Loads all features files computed for ligand or receptor chains. Returns a pandas.DataFrame
that contains in each row all features from all files for each amino acid. Just amino acids
that appears in each file will be included. Others will be ruled out (intersection)
:param prefix: str. A prefixOneChainType that identifies the receptor or ligand. e.g. 1A2K
:param chainType: str. "l" for ligand and "r" for receptor
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainId%s', 'resId%s', 'resName%s', [properties]
%s is L if chainType=="l" and R if chainType=="r"
'''
assert chainType=="l" or chainType=="r"
prefixOneChainType= prefix+"_"+chainType
# print(prefixOneChainType); raw_input("enter")
oneChainTypeFeats= None
featName, params= self.getParamsForLoadingFile(prefixOneChainType, self.singleChainfeatsToInclude[0]) #Load just first single chain feature
if self.verbose: print("loading %s for %s"%(featName,prefixOneChainType))
oneChainTypeFeats= self.loadDataFile(*params)
if len(self.singleChainfeatsToInclude)>1:
for featTuple in self.singleChainfeatsToInclude[1:]: #Load the remaining single chain features
featName, params= self.getParamsForLoadingFile( prefixOneChainType, featTuple)
if self.verbose: print("loading %s for %s"%(featName, prefixOneChainType))
prevNrow= oneChainTypeFeats.shape[0]
# params= ( list(params[0]), params[1] )
# print(featTuple, list(params[0]))
newData= self.loadDataFile(*params)
oneChainTypeFeats_prev= oneChainTypeFeats
oneChainTypeFeats= pd.merge(oneChainTypeFeats, newData, how='inner', on=["chainId", "resId","resName"])
curNrow= oneChainTypeFeats.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous single chain feature %s in %s. There are 0 rows "+
"in previous feature to %s")%(featName, prefixOneChainType, featName))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > FEATURES_MISMATH_TOLERANCE:
if prevNrow>curNrow:
df_diff= getDifRows_pd(oneChainTypeFeats_prev, oneChainTypeFeats, isSingleChain=True)
else:
df_diff= getDifRows_pd(oneChainTypeFeats, oneChainTypeFeats_prev, isSingleChain=True)
# print(df_diff);raw_input()
# print(oneChainTypeFeats_prev);raw_input()
# print(oneChainTypeFeats);raw_input()
errorMsg= str(df_diff)+"\n%s Nrows previously/now %d/%d %s"%(prefixOneChainType, prevNrow, curNrow,featName)
raise CodifyComplexException((errorMsg+"\nError merging single chain feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefixOneChainType, featName))
chainType= chainType.upper()
oneChainTypeFeats.rename(columns={elem:elem+chainType for elem in list(oneChainTypeFeats.columns.values)}, inplace=True)
return oneChainTypeFeats
def addPairFeatures(self, prefix, allPairs):
'''
Loads all pairwise features files and adds them to the pairs of residues contained in allPairs df
Returns a pandas.DataFrame that contains in each row all pairwise features from all files for each pair of amino acids.
Just amino acid pairs that appears in each file will be included. Others will be ruled out (intersection)
:param prefix: str. A prefix that identifies the complex
:param allPairs: pandas.DataFrame. A pandas.Dataframe in which each row represents one amino acid pair
Column names are:
"chainIdL", "resIdL","resNameL","chainIdR", "resIdR","resNameR", "categ", [previousScores]
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents one amino acid pair
Column names are:
"chainIdL", "resIdL","resNameL","chainIdR", "resIdR","resNameR", "categ", [previousScores] [pairFeats]
'''
pairTypeFeats= None
featName, params= self.getParamsForLoadingFile( prefix, self.pairfeatsToInclude[0])
if self.verbose: print("loading %s for %s"%(featName,prefix))
pairTypeFeats= self.loadDataFile(*params)
if len(self.pairfeatsToInclude)>1:
for featTuple in self.pairfeatsToInclude[1:]: #Load the remaining single chain feature
featName, params= self.getParamsForLoadingFile( prefix, featTuple)
if self.verbose: print("loading %s for %s"%(featName, prefix))
prevNrow= pairTypeFeats.shape[0]
pairTypeFeats_prev= pairTypeFeats
newData= self.loadDataFile(*params)
pairTypeFeats= pd.merge(pairTypeFeats, newData, how='inner', on=["chainIdL", "resIdL","resNameL",
"chainIdR", "resIdR","resNameR"])
curNrow= pairTypeFeats.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous pair feature %s in %s. There are 0 rows "+
"in previous feature to %s")%(featName, prefix, featName))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > FEATURES_MISMATH_TOLERANCE**2:
df_diff= getDifRows_pd(pairTypeFeats_prev, pairTypeFeats, isSingleChain=False)
errorMsg= str(df_diff)+"\n%s Nrows previously/now %d/%d %s"%(prefixOneChainType, prevNrow, curNrow,featName)
raise CodifyComplexException((errorMsg+"\nError merging pair feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefixOneChainType, featName))
#check if _P has already been assigned as mark to allPairs pairwise features
nPs= sum( (1 for elem in pairTypeFeats.columns.values if elem.endswith("_P")))
if nPs>0: #if so add _P to pairwise features
pairTypeFeats.rename(columns={elem:elem+"_P" for elem in set(pairTypeFeats.columns.values
).difference(AbstractProtocol.ARE_STR_TYPE_COLUMNS)}, inplace=True)
prevNrow= allPairs.shape[0]
allPairs_prev= allPairs
allPairs= pd.merge(allPairs, pairTypeFeats, how='inner', on=["chainIdL", "resIdL","resNameL",
"chainIdR", "resIdR","resNameR"])
curNrow= allPairs.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous pair feature %s in %s. There are 0 rows "+
"in previous feature to %s")%(featName, prefix, "allPairs"))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > FEATURES_MISMATH_TOLERANCE**2:
df_diff= getDifRows_pd(allPairs_prev, allPairs, isSingleChain=False)
errorMsg= str(df_diff)+"\n%s Nrows previously/now %d/%d %s"%(prefixOneChainType, prevNrow, curNrow, "allPairs")
raise CodifyComplexException((errorMsg+"\nError merging pair feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefixOneChainType, "allPairs"))
return allPairs
def combinePairwiseAndSingleChainFeats(self, pairFeats, singleFeatsL, singleFeatsR):
'''
Merges pairFeats pandas.DataFrame with singleFeatsL and singleFeatsR dataFrames.
singleFeatsL has n rows (as many as ligand residues) and singleFeatsR has m rows
(as many as ligand residues), and pairsFeats has ~ n*m rows
(some amino acids pairs might not be considered due to several reasons).
:param pairFeats: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the receptor
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
[propertiesP]
:param singleFeatsL: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the ligand
Column names are:
'chainIdL', 'resIdL', 'resNameL',[propertiesL]
:param singleFeatsR: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the receptor
Column names are:
'chainIdR', 'resIdR', 'resNameR',[propertiesR]
:return directPairs: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R).
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
[ propertiesL .... propertiesR .... properties_P]
'''
if singleFeatsL.shape[1]!= singleFeatsR.shape[1]:
print( "singleFeatsL and singleFeatsR have different number of variables")
featsLNo_chain= set( (elem[:-1] for elem in singleFeatsL.columns))
featsRNo_chain= set( (elem[:-1] for elem in singleFeatsR.columns))
errorMsg= "\n".join(["L %d R %d"%(len(singleFeatsL.columns),len(singleFeatsR.columns)) ,"L dif R",
str(sorted(featsLNo_chain.difference(featsRNo_chain))), "R diff L",
str(sorted(featsRNo_chain.difference(featsLNo_chain))) ])
raise ValueError( errorMsg+"\nsingleFeatsL and singleFeatsR have different number of variables")
otherPairColumns= set(pairFeats.columns.values).difference(set(AbstractProtocol.ARE_STR_TYPE_COLUMNS+["categ"]))
if len(otherPairColumns)>0: #add _P suffix to all pairwise features
pairFeats.rename(columns={elem:elem+"_P" for elem in otherPairColumns}, inplace=True)
directPairs= pd.merge(pairFeats, singleFeatsL, how='inner', on=None)
directPairs= pd.merge(directPairs, singleFeatsR, how='inner', on=None)
return directPairs
def prepareDataForPairwiseAggregat(self, df):
'''
abstract method
:param df: the Pandas.DataFrame of features before aggregation
:return pairwiseDf, ids2RowL, ids2RowR, neigsids2rowL, neigsids2rowR
pairwiseDf: the Pandas.DataFrame of features after aggregation
ids2RowL: a dict that maps from resIdL to all rows of df that contain resIdL
ids2RowL: a dict that maps from resIdR to all rows of df that contain resIdR
neigsids2rowL: a dict that maps from resIdL to all rows of df that contain a neighbour of resIdL
neigsids2rowR: a dict that maps from resIdR to all rows of df that contain a neighbour of resIdR
'''
raise ValueError("Not implemented")
def computeNumericAggr(self, df, selectedRows):
return computeNumericAggr( df, selectedRows)
def addPairwiseAggregation(self, df):
'''
Adds environment pairwise features to a df that contains pairwise features (named featName_P)
:param df: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (ligand, receptor)
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
[ propertiesL .... propertiesR .... properties_P] #no defined order for properties
:return newDf: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R). New columns will have been added, all then
named %sAggr%d%s(numAggr/factorAggr, numberOfNewFeature, ligand/receptor)
Column names are:
'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
[ propertiesL propertiesR properties_P ] #no defined order for properties
'''
pairwiseDf, ids2RowL, ids2RowR, neigsids2rowL, neigsids2rowR = self.prepareDataForPairwiseAggregat(df)
nElems= df.shape[0]
l_2_r_neigs= [[] for i in range(df.shape[0])]
r_2_l_neigs= [[] for i in range(df.shape[0])]
l_neigs_2_r_neigs= [[] for i in range(df.shape[0])]
for idL in ids2RowL:
rowsInvolvingL= ids2RowL[idL]
rowsNeigsL= neigsids2rowL[idL]
for idR in ids2RowR:
rowsInvolvingR= ids2RowR[idR]
rowsNeigsR= neigsids2rowR[idR]
row_Index= tuple(rowsInvolvingL.intersection(rowsInvolvingR))[0]
l_2_r_neigs[row_Index]= tuple(rowsInvolvingL.intersection(rowsNeigsR))
r_2_l_neigs[row_Index]= tuple(rowsInvolvingR.intersection(rowsNeigsL))
l_neigs_2_r_neigs[row_Index]= tuple(rowsNeigsR.intersection(rowsNeigsL))
numericAggreL2r= self.computeNumericAggr(pairwiseDf, l_2_r_neigs)
numericAggreR2l= self.computeNumericAggr(pairwiseDf, r_2_l_neigs)
numericAggreN2N= self.computeNumericAggr(pairwiseDf, l_neigs_2_r_neigs)
numericAggreL2r.columns= [ "l2r-pair"+elem for elem in numericAggreL2r.columns]
numericAggreR2l.columns= [ "r2l-pair"+elem for elem in numericAggreR2l.columns]
numericAggreN2N.columns= [ "n2n-pair"+elem for elem in numericAggreN2N.columns]
df= pd.concat([df, numericAggreL2r, numericAggreR2l, numericAggreN2N ], axis=1)
return df
def computeNumericAggr(df, selectedRows):
'''
Compute aggregation functions (min, max, mean and sum) for the rows of data. Each row is aggregated
over the rows included in selectedRows
:param data: pandas.DataFrame(nrow, nfeatures). The numeric data to aggregate. Each row i is averaged, sum... with
the rows such that selectedRows[i,:]==True
:param selectedRows: [[]]: int (nrow, variableLength)): Each row i is averaged, with the rows included in
selectedRows[i]
:return np.array (nrow, 4*nfeatures)
'''
data= df.values
nVars= data.shape[1]
aggregatedResults= -(2**10)* np.ones( (data.shape[0], nVars* 4))
splitPoint2= 2*nVars
splitPoint3= 3*nVars
splitPoint4= 4*nVars
for i in range(data.shape[0]):
dataToAggregate= data[selectedRows[i], :]
if dataToAggregate.shape[0]>0:
aggregatedResults[i, 0:nVars]= np.mean( dataToAggregate, axis=0)
aggregatedResults[i, nVars:splitPoint2]= np.max( dataToAggregate, axis=0)
aggregatedResults[i, splitPoint2:splitPoint3]= np.min( dataToAggregate, axis=0)
aggregatedResults[i, splitPoint3:splitPoint4]= np.sum( dataToAggregate, axis=0)
aggregatedResults= pd.DataFrame(aggregatedResults)
aggregatedResults.columns= list(chain.from_iterable([[ "numAggr-"+oper+"-"+name for name in df.columns] for
oper in ["mean", "max", "min", "sum"] ]))
return aggregatedResults
def computeFactorAggr(df, selectedRows):
'''
Compute aggregation function (sum) for the rows of data. Each row is aggregated
over the rows included in selectedRows. This is equivalent to count the number of neighbours of each category
:param data: pandas.DataFrame(nrow, nfeatures). The numeric data to aggregate. Each row i is averaged, with
the rows such that selectedRows[i,:]==True
:param selectedRows: [[]]: int (nrow, variableLength)): Each row i is added with the rows included in
selectedRows[i]
:return np.array (nrow, nfeatures)
'''
data= df.values
aggregatedResults= -(2**10)* np.ones( (data.shape[0], data.shape[1]))
for i in range(data.shape[0]):
dataToAggregate= data[selectedRows[i],:]
if dataToAggregate.shape[0]>0:
aggregatedResults[i, :]= np.sum( dataToAggregate, axis=0)
aggregatedResults= | pd.DataFrame(aggregatedResults) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
import pandas as pd
import seaborn as sns
from itertools import groupby
def load_result(fn, label):
'''fn is a file name, label is an arbitrary string,
results a tuple (union_cardinality, error)
'''
with open(fn, 'r') as f:
x, y, z, je = np.loadtxt(f, usecols=(0, 1, 2, 4), unpack=True)
u = x + y - z # union cardinality
j = z / u # true jaccard
err = abs(je - j) / j
ulabel_int = log(u) / log(2)
ulabel_int = ulabel_int.astype(int)
ulabel_str = [r"$\mathregular{2}^{\mathregular{" + str(x) + "}}$" for x in ulabel_int]
d = {'union': pd.Series(u),
'err': pd.Series(err),
'Method': pd.Series(label, index=range(len(u))),
'ulabel': pd.Series(ulabel_str, index=range(len(u))),
'jaccard': pd.Series(j),
'constant': pd.Series(1, index=range(len(u)))}
return pd.DataFrame(d)
if __name__ == "__main__":
file1 = load_result('full_results-6-4-4-false.txt', "HyperMinHash: 64 buckets of 4+4 bits")
file2 = load_result('full_results-6-0-8-false.txt', "MinHash: 64 buckets of 8 bits")
file3 = load_result('full_results-5-0-16-false.txt', "MinHash: 32 buckets of 16 bits")
fig = plt.figure()
for jaccard, subplot in [(0.1, 221), (1 / 3, 222), (0.5, 223), (0.9, 224)]:
err1 = file1.loc[file1['jaccard'] == jaccard]
err2 = file2.loc[file2['jaccard'] == jaccard]
err3 = file3.loc[file3['jaccard'] == jaccard]
data = | pd.concat([err1, err2, err3]) | pandas.concat |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = self.store.select('p4d',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']), Term('items', '=', ['ItemA','ItemB']) ])
expected = p4d.truncate(after='20000108').reindex(minor=['A', 'B'],items=['ItemA','ItemB'])
tm.assert_panel4d_equal(result, expected)
# valid terms
terms = [
dict(field = 'major_axis', op = '>', value = '20121114'),
('major_axis', '20121114'),
('major_axis', '>', '20121114'),
(('major_axis', ['20121114','20121114']),),
('major_axis', datetime(2012,11,14)),
'major_axis>20121114',
'major_axis>20121114',
'major_axis>20121114',
(('minor_axis', ['A','B']),),
(('minor_axis', ['A','B']),),
((('minor_axis', ['A','B']),),),
(('items', ['ItemA','ItemB']),),
('items=ItemA'),
]
for t in terms:
self.store.select('wp', t)
self.store.select('p4d', t)
# valid for p4d only
terms = [
(('labels', '=', ['l1','l2']),),
Term('labels', '=', ['l1','l2']),
]
for t in terms:
self.store.select('p4d', t)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0.,1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r : tm.assert_series_equal(l, r, True, True, True)
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
from datetime import date
ser = Series(values, [date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime(2012, 1, 1), datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
# not consolidated
df['foo'] = np.random.randn(len(df))
self.store['df'] = df
recons = self.store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ['foo', 'bar'])
finally:
store.close()
os.remove(self.scratchpath)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
try:
store = HDFStore(self.scratchpath)
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
try:
store = HDFStore(self.scratchpath)
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
self.store['obj'] = df1
tm.assert_frame_equal(self.store['obj'], df1)
self.store['obj'] = df2
tm.assert_frame_equal(self.store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, tm.assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, tm.assert_panel_equal)
def test_wide_table_dups(self):
wp = tm.makePanel()
try:
store = HDFStore(self.scratchpath)
store._quiet = True
store.put('panel', wp, table=True)
store.put('panel', wp, table=True, append=True)
recons = store['panel']
tm.assert_panel_equal(recons, wp)
finally:
store.close()
os.remove(self.scratchpath)
def test_long(self):
def _check(left, right):
tm.assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
self.store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
self.store['a'] = ts
tm.assert_series_equal(self.store['a'], ts)
def test_select(self):
wp = tm.makePanel()
# put/select ok
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.store.select('wp')
# non-table ok (where = None)
self.store.remove('wp')
self.store.put('wp2', wp, table=False)
self.store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100), items = [ 'Item%03d' % i for i in xrange(100) ],
major_axis=date_range('1/1/2000', periods=100), minor_axis = [ 'E%03d' % i for i in xrange(100) ])
self.store.remove('wp')
self.store.append('wp', wp)
items = [ 'Item%03d' % i for i in xrange(80) ]
result = self.store.select('wp', Term('items', items))
expected = wp.reindex(items = items)
| tm.assert_panel_equal(expected, result) | pandas.util.testing.assert_panel_equal |
# Author: <NAME> (http://falexwolf.de)
# T. Callies
"""Rank genes according to differential expression.
"""
import numpy as np
import pandas as pd
from math import sqrt, floor
from scipy.sparse import issparse
from .. import utils
from .. import settings
from .. import logging as logg
from ..preprocessing import simple
def rank_genes_groups(
adata,
group_by,
use_raw=True,
groups='all',
reference='rest',
n_genes=100,
compute_distribution=False,
only_positive=True,
copy=False,
test_type='t-test_overestim_var',
correction_factors=None):
"""Rank genes according to differential expression [Wolf17]_.
Rank genes by differential expression. By default, a t-test-like ranking is
used, in which means are normalized with variances.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
group_by : `str`
The key of the sample grouping to consider.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
reference : `str`, optional (default: `'rest'`)
If `'rest'`, compare each group to the union of the rest of the group. If
a group identifier, compare with respect to this group.
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
test_type : {'t-test_overestim_var', 't-test', 'wilcoxon', , 't-test_double_overestim_var',
't-test_correction_factors'}, optional (default: 't-test_overestim_var')
If 't-test', use t-test to calculate test statistics. If 'wilcoxon', use
Wilcoxon-Rank-Sum to calculate test statistic. If
't-test_overestim_var', overestimate variance.
't-test_double_overestim_var', additionally, underestimate variance of the rest
't-test_correction_factors', define correction factors manually
only_positive : bool, optional (default: `True`)
Only consider positive differences.
correction_factors: [a,b], optional (default: None)
Only for the test-type 't-test_correction_factors'. Then, a determines correction factor for group variance,
b determines correction factor for variance of the comparison group
Returns
-------
rank_genes_groups_gene_scores : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id of shape storing the zscore
for each gene for each group.
rank_genes_groups_gene_names : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id for storing the gene names.
"""
logg.info('rank differentially expressed genes', r=True)
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
if compute_distribution:
logg.warn('`compute_distribution` is deprecated, as it requires storing'
'a shifted and rescaled disribution for each gene'
'You can now run `sc.pl.rank_genes_groups_violin` without it, '
'which will show the original distribution of the gene.')
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (reference != 'rest'
and reference not in set(adata.obs[group_by].cat.categories)):
raise ValueError('reference = {} needs to be one of group_by = {}.'
.format(reference,
adata.obs[group_by].cat.categories.tolist()))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, group_by)
adata.uns['rank_genes_groups_params'] = np.array(
(group_by, reference, test_type, use_raw),
dtype=[('group_by', 'U50'), ('reference', 'U50'), ('test_type', 'U50'), ('use_raw', np.bool_)])
# adata_comp mocks an AnnData object if use_raw is True
# otherwise it's just the AnnData object
adata_comp = adata
if adata.raw is not None and use_raw:
adata_comp = adata.raw
X = adata_comp.X
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
rankings_gene_zscores = []
rankings_gene_names = []
n_groups = groups_masks.shape[0]
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = np.where(mask)[0].size
logg.info(' consider \'{}\':'.format(group_by), groups_order,
'with sample numbers', ns)
if reference != 'rest':
ireference = np.where(groups_order == reference)[0][0]
reference_indices = np.arange(adata_comp.n_vars, dtype=int)
avail_tests = {'t-test', 't-test_overestim_var', 'wilcoxon', 't-test_double_overestim_var',
't-test_correction_factors'}
if test_type not in avail_tests:
raise ValueError('test_type should be one of {}.'
'"t-test_overestim_var" is being used as default.'
.format(avail_tests))
if test_type is 't-test_correction_factors':
if correction_factors is None:
raise ValueError('For this test type, you need to enter correction factors manually.')
if len(correction_factors) != 2:
raise ValueError('We need exactly 2 correction factors, accessible via correction_factors[i], i=0,1')
if correction_factors[0]<0 or correction_factors[1]<0:
raise ValueError('Correction factors need to be positive numbers!')
if test_type in {'t-test', 't-test_overestim_var', 't-test_double_overestim_var',
't-test_correction_factors'}:
# loop over all masks and compute means, variances and sample numbers
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = simple._get_mean_var(X[mask])
# test each either against the union of all other groups or against a
# specific group
for igroup in range(n_groups):
if reference == 'rest':
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
if test_type == 't-test':
ns_rest = np.where(mask_rest)[0].size
elif test_type == 't-test_correction_factors':
# The tendency is as follows: For the comparison group (rest), overesimate variance --> smaller ns_rest
ns_rest = np.where(mask_rest)[0].size/correction_factors[1]
else: # hack for overestimating the variance
ns_rest = ns[igroup]
if test_type in {'t-test', 't-test_overestim_var'}:
ns_group=ns[igroup]
elif test_type == 't-test_correction_factors':
# We underestimate group variance by increasing denominator, i.e. ns_group
ns_group=ns[igroup]*correction_factors[0]
else :
# We do the opposite of t-test_overestim_var
ns_group=np.where(mask_rest)[0].size
denominator = np.sqrt(vars[igroup]/ns_group + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
zscores = (means[igroup] - mean_rest) / denominator
zscores[np.isnan(zscores)] = 0
zscores = zscores if only_positive else np.abs(zscores)
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mask = groups_masks[igroup]
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[igroup],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
elif test_type == 'wilcoxon':
# Wilcoxon-rank-sum test is usually more powerful in detecting marker genes
# Limit maximal RAM that is required by the calculation. Currently set fixed to roughly 100 MByte
CONST_MAX_SIZE = 10000000
ns_rest = np.zeros(n_groups, dtype=int)
# initialize space for z-scores
zscores = np.zeros(n_genes)
# First loop: Loop over all genes
if reference != 'rest':
for imask, mask in enumerate(groups_masks):
if imask == ireference: continue
else: mask_rest = groups_masks[ireference]
ns_rest[imask] = np.where(mask_rest)[0].size
if ns_rest[imask] <= 25 or ns[imask] <= 25:
logg.hint('Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.')
n_active = ns[imask]
m_active = ns_rest[imask]
# Now calculate gene expression ranking in chunkes:
chunk = []
# Calculate chunk frames
n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active))
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
# Calculate rank sums for each chunk for the current mask
for chunk_index, right in enumerate(chunk):
# Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.
if issparse(X):
df1 = pd.DataFrame(data=X[mask, left:right].todense())
df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),
index=np.arange(start=n_active, stop=n_active + m_active))
else:
df1 = | pd.DataFrame(data=X[mask, left:right]) | pandas.DataFrame |
from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import pandas as pd
from util.data import load_data
class Analytics:
def __init__(self, server_id: str, db):
self.server_id = server_id
self.db = db
@staticmethod
def no_data_embed(topic: str) -> Embed:
"""CREATE AN EMBED IF NO DATA WAS COLLECTED"""
embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
return embed
async def analyze_message(self):
"""ANALYZE THE MESSAGE DATA"""
data = await load_data(self.db, self.server_id)
data = data["message"]
if len(data) == 0:
return self.no_data_embed("message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Message counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_delete(self):
"""ANALYZE MESSAGE DELETE"""
data = await load_data(self.db, self.server_id)
data = data["message_delete"]
if len(data) == 0:
return self.no_data_embed("message delete")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message delete ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message delete counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message delete from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message delete counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message delete counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_edit(self):
"""ANALYZE MESSAGE EDIT"""
data = await load_data(self.db, self.server_id)
data = data["message_edit"]
if len(data) == 0:
return self.no_data_embed("message edit")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message edit ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message edits counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message edits from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message edits counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message edits counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_reaction(self):
"""ANALYZE THE REACTION DATA"""
data = await load_data(self.db, self.server_id)
data = data["reaction"]
if len(data) == 0:
return self.no_data_embed("reaction")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["reactionname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Reaction ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed reaction data"),
Embed(title=embed_title, description="Reaction counted by name:\n"f"```{name_count}```"),
Embed(title=embed_title, description="Reaction counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Reaction send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Reaction counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Reaction counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_botrequests(self):
"""ANALYZE THE BOT-REQUESTS DATA"""
data = await load_data(self.db, self.server_id)
data = data["bot_requests"]
if len(data) == 0:
return self.no_data_embed("bot-requests")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["cmdname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
embed_title = "Bot-Requests ~ Analytics"
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed bot-requests data"),
Embed(title=embed_title, description="Executed CMD-names counted:\n"f"```{name_count}```"),
Embed(title=embed_title, description="Bot-Requests messages counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Bot-Requests messages send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Bot-Requests counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Bot-Requests counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_botmsg(self):
"""ANALYZE THE BOT MSG DATA"""
data = await load_data(self.db, self.server_id)
data = data["bot_msg"]
if len(data) == 0:
return self.no_data_embed("bot-message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Bot-Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed bot-message data"),
Embed(title=embed_title, description=f"Total bot messages: {len(data)}"),
Embed(title=embed_title, description="BotMessages counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="BotMessages send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="BotMessages send in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="BotMessages send on which day:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_users(self): # TODO show last 10 users at timestamp
data = await load_data(self.db, self.server_id)
data = data["users"]
if len(data) == 0:
return self.no_data_embed("users")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
result = df.head(10)
#df["timestamp"] = pd.to_datetime(df["timestamp"])
#df["hours"] = df["timestamp"].dt.hour
#df["weekday"] = df["timestamp"].dt.day_name()
#hours_count = pd.value_counts(df["hours"])
#weekday_count = pd.value_counts(df["weekday"])
embed_title = "Users ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed users data"),
Embed(title=embed_title, description=f"```{result}```")
#Embed(title=embed_title, description="Users counted in which hours:\n"f"```{hours_count}```"),
#Embed(title=embed_title, description="Users counted in which hours:\n"f"```{hours_count}```"),
#Embed(title=embed_title, description="Users counted on which weekdays:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_userjoin(self):
data = await load_data(self.db, self.server_id)
data = data["userjoins"]
if len(data) == 0:
return self.no_data_embed("userjoins")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Userjoin ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Userjoins counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Userjoins counted on which weekdays:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_userleave(self):
data = await load_data(self.db, self.server_id)
data = data["userleave"]
if len(data) == 0:
return self.no_data_embed("userleave")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Userleave ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Userleaves counted in which hour:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Userleaves counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_mentions(self):
data = await load_data(self.db, self.server_id)
data = data["mentions"]
if len(data) == 0:
return self.no_data_embed("mentions")
# ANALYZE THE DATA:
df = | pd.DataFrame(data) | pandas.DataFrame |
""" Construct dataset """
import math
import pandas as pd
import numpy as np
import keras
import csv
def one_hot_encode_object_array(arr, nr_classes):
'''One hot encode a numpy array of objects (e.g. strings)'''
_, ids = np.unique(arr, return_inverse=True)
return keras.utils.to_categorical(ids, nr_classes)
def produce_diff(model, result_dir, down_station, input_list, include_time, sample_size, network_type, denormalise, roundit):
# include_diff always false, we don't need to prodice a diff for models that include a diff in the input
(y_train, x_train, y_cv, x_cv, y_test, x_test, _, _, train_y_max, train_y_min, train_idx, test_idx, cv_idx, _, _) = construct(down_station, input_list, include_time, sample_size, network_type)
y_train_pred = model.predict(x_train)
y_train_pred = y_train_pred.ravel()
y_cv_pred = model.predict(x_cv)
y_cv_pred = y_cv_pred.ravel()
y_test_pred = model.predict(x_test)
y_test_pred = y_test_pred.ravel()
pred = np.concatenate((y_train_pred, y_test_pred, y_cv_pred))
real = np.concatenate((y_train, y_test, y_cv))
idx = np.concatenate((train_idx, test_idx, cv_idx))
if denormalise:
pred = pred * (train_y_max - train_y_min) + train_y_min
real = real * (train_y_max - train_y_min) + train_y_min
if roundit:
pred = np.rint(pred)
real = np.rint(real)
pred_file = '{0}/{1}-pred-{2}.txt'.format(result_dir, down_station, network_type)
np.savetxt(pred_file, np.transpose((idx, pred)), delimiter=',', fmt="%s")
real_file = '{0}/{1}-real-{2}.txt'.format(result_dir, down_station, network_type)
np.savetxt(real_file, np.transpose((idx, real)), delimiter=',', fmt="%s")
diff_file = open('{0}/{1}-diff-{2}.txt'.format(result_dir, down_station, network_type), 'w+')
np.savetxt(diff_file, np.transpose((idx, np.subtract(real, pred))), delimiter=',', fmt="%s")
# input_list is a tuple (input_name, interpolation_method, shift_offset, trim_nan)
def construct(down_station, input_list, include_time, sample_size, network_type):
"""Construct training dataset"""
time_resolution = '1H'
print('Constructing training data with resolution {0}'.format(time_resolution))
################################################################################################
# downstream station
target = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(down_station), parse_dates=['Date'])
target = target.set_index(['Date'])
target.index = pd.to_datetime(target.index)
target_mean = target.resample(time_resolution).mean()
first_date = target_mean.index.values[0]
last_date = target_mean.index.values[-1]
target_mean = target_mean.interpolate(method='linear')
target_count = target.resample(time_resolution).count()
target_count = target_count.values.astype(int)[:, 0]
target_count = np.convolve(target_count, np.ones(24, dtype=int), 'full') # ignore training sample if more than day's worth of data is missing
print('Downstream data for {0} downsampled'.format(down_station))
################################################################################################
# input stations
inputs_mean = []
inputs_count = []
for input_idx in range(0, len(input_list)):
input_name = input_list[input_idx][0]
input_interpolation_method = input_list[input_idx][1]
input_shift_offset = input_list[input_idx][2]
input_data = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(input_name), parse_dates=['Date'])
input_data = input_data.set_index(['Date'])
input_data.index = pd.to_datetime(input_data.index)
if input_interpolation_method == 'linear':
input_data_mean = input_data.resample(time_resolution).mean()
else:
input_data_mean = input_data.resample(time_resolution).pad()
if input_shift_offset > 0:
input_data_mean = input_data_mean.shift(sample_size)
input_data_first_date = input_data_mean.index.values[0]
if input_data_first_date > first_date:
first_date = input_data_first_date
input_data_last_date = input_data_mean.index.values[-1]
if input_data_last_date < last_date:
last_date = input_data_last_date
input_data_mean = input_data_mean.interpolate(method=input_interpolation_method)
input_data_count = input_data.resample(time_resolution).count()
input_data_count = input_data_count.values.astype(int)[:, 0]
input_data_count = np.convolve(input_data_count, np.ones(24, dtype=int), 'full') # ignore training sample if more than day's worth of data is missing
inputs_mean.append(input_data_mean)
inputs_count.append(input_data_count)
print('Input for {0} downsampled using {1} interpolation and shift of {2} timesteps'.format(input_name, input_interpolation_method, input_shift_offset))
################################################################################################
# trim input and output arrays to equal lengths
print('Data before {0} ignored'.format(first_date))
print('Data after {0} ignored'.format(last_date))
lower_idx = 0
upper_idx = 0
for input_idx in range(0, len(inputs_mean)):
lower_idx = inputs_mean[input_idx].index.get_loc(first_date)
upper_idx = inputs_mean[input_idx].index.get_loc(last_date)
inputs_mean[input_idx] = inputs_mean[input_idx].head(upper_idx).tail(upper_idx - lower_idx)
inputs_count[input_idx] = inputs_count[input_idx][sample_size+lower_idx:upper_idx]
print('{0} training samples before trimming dates'.format(target_mean.index.size))
lower_idx = target_mean.index.get_loc(first_date)
upper_idx = target_mean.index.get_loc(last_date)
target_mean = target_mean.head(upper_idx).tail(upper_idx - lower_idx - sample_size)
print('{0} training samples after trimming dates'.format(target_mean.index.size))
target_count = target_count[sample_size+lower_idx:upper_idx]
train = target_mean.copy(deep=True)
################################################################################################
# Add time sine and cosine values
time_inputs = 0
if include_time:
print('Including time of year as input')
day_of_year = target_mean.index.to_series().apply(lambda x: x.timetuple().tm_yday/365.25 * 2 * math.pi)
# day_of_year = target_mean.index.to_series().apply(lambda x: (x.timetuple().tm_yday - 1)/365.25 * 2 * math.pi) # this is more correct
time_sin = day_of_year.apply(np.sin)
time_cos = day_of_year.apply(np.cos)
if (network_type == 'bnn') or (network_type == 'cnn') or (network_type == 'multi_cnn') or (network_type == 'multi_cnn_custom') or (network_type == 'rnn_lstm') or (network_type == 'rnn_gru'):
for i in range(0, sample_size):
train['TimeSin{0}'.format(i)] = time_sin
train['TimeCos{0}'.format(i)] = time_cos
else:
train['TimeSin'] = time_sin
train['TimeCos'] = time_cos
time_inputs = 2
################################################################################################
# Add inputs to training dataset
for input_idx in range(0, len(input_list)):
input_name = input_list[input_idx][0]
for i in range(0, sample_size):
shifted = inputs_mean[input_idx].shift(sample_size-i)
train['Inflow_{0}_{1}'.format(input_name, i)] = shifted
# Last column should be target values
train = train.assign(Target=target_mean.values.astype(float))
#############################################################################################################
# Nuke samples where too much data is missing
mask = target_count.astype(float)
mask[mask == 0] = 'nan'
# uncomment if you want to trim based on high/low flow regime
# mask_y = train.values[:, -1]
# mask[mask_y > 200] = 'nan'
for input_idx in range(0, len(input_list)):
input_name = input_list[input_idx][0]
trim_nan = input_list[input_idx][3]
if trim_nan:
mask_input = inputs_count[input_idx]
mask[mask_input == 0] = 'nan'
print('Removing nan values for {0}'.format(input_name))
else:
print('Not removing nan values for {0}'.format(input_name))
# creating copy for running full prediction on
traincopy = train.copy()
print('{0} training samples before trimming missing values'.format(train.index.size))
train = train.assign(Mask=mask)
train = train.dropna(axis=0, subset=train.columns[:])
print('{0} training samples after trimming missing values'.format(train.index.size))
#############################################################################################################
# Clean up unused columns
train = train.drop('Value', 1)
train = train.drop('Mask', 1)
traincopy = traincopy.drop('Value', 1)
#############################################################################################################
# Split training, CV and test datasets
idx_test = int(0.6 * len(train))
idx_cv = int(0.8 * len(train))
cv = train.tail(len(train) - idx_cv)
test = train.head(idx_cv).tail(idx_cv - idx_test)
train = train.head(idx_test)
#############################################################################################################
# Split input and output
full_first_date = | pd.to_datetime("2011-08-01 00:00:00") | pandas.to_datetime |
import argparse
import pandas as pd
from shapely import geometry
import helpers
import json
from polygon_geohasher import polygon_geohasher
from tqdm import tqdm
CELL_SIZE_X = 360.0 / 4320.0
CELL_SIZE_Y = 180.0 / 2160.0
def parse_args():
parser = argparse.ArgumentParser(
description="Converts SPAM2017 crop information into a geohashed CSV."
)
parser.add_argument("--input_file", type=str, required=True)
parser.add_argument("--output_file", type=str, default="./output.csv")
parser.add_argument("--country_iso", type=str)
parser.add_argument("--coverage_file", type=str)
parser.add_argument("--crop_columns", nargs="+", type=str, required=True)
parser.add_argument("--geohash_level", type=int, default=5)
return parser.parse_args()
def main():
args = parse_args()
# load the csv file into pandas for cleanup
print('Loading...')
df = pd.read_csv(args.input_file)
# filter down to area of interest records
print('Finding AoI...')
geohashes_aoi = set()
if args.coverage_file is not None:
# loading coverage polygon from geo json file
coverage_geojson = json.load(open(args.coverage_file))
# generate geohashes covered by the AoI
geohashes_aoi = helpers.geohashes_from_geojson_poly(
coverage_geojson, precision=args.geohash_level
)
# filter down to country of interest records
elif args.country_iso is not None:
df = df.loc[df["iso3"] == args.country_iso]
# extract x, y locations and crop of interest
df = df[(["x", "y"] + args.crop_columns)]
df = df.reset_index()
# loop over the x, y which are the cell centroids, and generate a bounding box based on
# the cell size (taken from the associated geotiff resolution)
print('Converting points to bounds...')
centroids = zip(df["x"], df["y"])
bounds = [
geometry.box(
c[0] - CELL_SIZE_X / 2,
c[1] - CELL_SIZE_Y / 2,
c[0] + CELL_SIZE_X / 2,
c[1] + CELL_SIZE_Y / 2,
)
for c in tqdm(centroids)
]
# loop through the bounds we've created and intersect each with the intended geohash grid
print('Converting bounds to geohashes...')
geohashes = [
polygon_geohasher.polygon_to_geohashes(
b, precision=args.geohash_level, inner=False
)
for b in tqdm(bounds)
]
# flatten gh set for each cell preserving index - no clean way to do this in pandas
flattened_gh = []
print('Clipping geohashes to AoI...')
for idx, gh_set in tqdm(enumerate(geohashes)):
for gh in gh_set:
if (len(geohashes_aoi) > 0 and gh in geohashes_aoi) or len(
geohashes_aoi
) is 0:
bounds_str = helpers.geohash_to_array_str(gh)
flattened_gh.append((idx, gh, bounds_str))
# store as a dataframe with any geohashes that were part of 2 cells reduced to 1
# a better implementation of this would take the value of both cells into account and
# compute a final adjusted value for the given geohash
print('Genering output csv...')
geohash_df = | pd.DataFrame(flattened_gh, columns=["cell", "geohash", "bounds"]) | pandas.DataFrame |
import sys
from pathlib import Path
from itertools import chain
from typing import List
import numpy as np
import pandas as pd
import pandas_flavor as pf
from janitor import clean_names
sys.path.append(str(Path.cwd()))
from config import root_dir # noqa E402
from utils import ( # noqa: E402
get_module_purpose,
read_args,
read_ff_csv,
retrieve_team_abbreviation,
)
def clean_game_date(season_year: int, date: str) -> str:
"""Creates a date string from a season year and date string.
Args:
season_year (int): The season year.
date (str): The date string.
Returns:
str: The date string if the date is part of the regular season, otherwise
returns 'non-regular-season'.
"""
if len(date) == 3 and date[0] == "9":
return f"{season_year}-09-{date[1:]}"
elif len(date) == 4 and int(date[:2]) > 9:
return f"{season_year}-{date[:2]}-{date[2:]}"
elif len(date) == 3 and date[0] in ["1", "2"]:
season_year += 1
return f"{season_year}-0{date[0]}-{date[1:]}"
else:
return "non-regular-season"
@pf.register_dataframe_method
def clean_games_date(
df: pd.DataFrame, season_year: int, date_column: str = "date"
) -> pd.DataFrame:
df[date_column] = (
df[date_column].astype(str).apply(lambda x: clean_game_date(season_year, x))
)
return df
# create game id
@pf.register_dataframe_method
def create_game_id(df: pd.DataFrame) -> pd.DataFrame:
"""Create a unique id for each game. Assumes every two rows are a single game.
Args:
df (pd.DataFrame): Dataframe with betting lines data.
Returns:
pd.DataFrame: Dataframe with unique id for
each game that links to the betting lines data.
Raises:
ValueError: Occurs when the dataframe has an odd number of rows.
An odd row count indicates that there is an incomplete game.
"""
if divmod(df.shape[0], 2)[1] != 0:
raise ValueError("Dataframe must have an even number of rows.")
_id = list(range(1, (len(df) // 2) + 1))
df["game_id"] = list(chain(*zip(_id, _id)))
return df
@pf.register_dataframe_method
def add_team_abbreviation(df: pd.DataFrame, team_column: str = "team") -> pd.DataFrame:
"""Convert team name to team abbreviation.
Args:
df (pd.DataFrame): Dataframe with betting lines data and team column.
team_column (str, optional): Column with full team name. Defaults to "team".
Returns:
pd.DataFrame: Dataframe with team abbreviation column.
"""
df[team_column] = df[team_column].apply(lambda x: retrieve_team_abbreviation(x))
return df
@pf.register_dataframe_method
def create_point_spread_df(df: pd.DataFrame):
"""Convert over-under (O/U) and line to projected points for each team.
For example, if the O/U is 49 and Team 1 is favored by -7 over Team 2,
the point projection for Team 1 is 28 and 21 for Team 2.
Args:
df (pd.DataFrame): Dataframe with betting lines data.
Returns:
pd.DataFrame: Dataframe with point spread data.
"""
is_even_moneyline = df[df["ml"] < 0].shape[0] > 1
is_pick = any(df["open"].str.contains("pk"))
if any([is_even_moneyline, is_pick]):
fav_team, underdog_team = df["team"]
fav_pts, underdog_pts = [
float(max([x for x in df["open"] if x != "pk"])) / 2
] * 2
else:
fav_team_index = [index for index, value in enumerate(df["ml"]) if value < 0][0]
underdog_team_index = int(not fav_team_index)
fav_team = df["team"].iloc[fav_team_index]
underdog_team = df["team"].iloc[underdog_team_index]
pt_spread = df["open"].astype(float).min()
over_under = df["open"].astype(float).max()
fav_pts = (over_under / 2) + pt_spread * 0.5
underdog_pts = (over_under / 2) - pt_spread * 0.5
spread_df = pd.DataFrame(
[[fav_team, underdog_team, fav_pts], [underdog_team, fav_team, underdog_pts]],
columns=["team", "opp", "projected_off_pts"],
)
spread_df["projected_off_pts"] = spread_df["projected_off_pts"].apply(
lambda x: round(x)
)
return spread_df
@pf.register_dataframe_method
def process_betting(df: pd.DataFrame, season_year: int) -> pd.DataFrame:
"""Converts raw betting lines data to include point projections for each
team in each game. Point projections can be used to inform how much
scoring is expected to occur in each game.
Args:
df (pd.DataFrame): Raw betting lines data.
season_year (int): The season year.
Returns:
pd.DataFrame: Dataframe with point projections for each team in each game.
"""
process_betting_df = pd.DataFrame()
game_ids = df["game_id"].unique()
for game_id in game_ids:
game_df = df[df["game_id"] == game_id]
point_spread_df = game_df.create_point_spread_df()
point_spread_df["date"] = game_df["date"].iloc[0]
point_spread_df["season_year"] = season_year
process_betting_df = pd.concat([process_betting_df, point_spread_df])
return process_betting_df
def impute_missing_projections(
df: pd.DataFrame,
calendar_df: pd.DataFrame,
keys: List[str] = ["date", "season_year", "team", "opp"],
default_point_projection: int = 25,
) -> pd.DataFrame:
"""Impute missing point projections for each team in each game.
Some dates for betting are incorrect. In these instances,
the point projections are imputed with the average point projection
for all previous games played in the same season.
Args:
df (pd.DataFrame): Dataframe with point projections for each team in each game.
calendar_df (pd.DataFrame): Dataframe with dates for each game.
keys (List[str], optional): Columns to join the point projections and calendar.
Defaults to ["date", "season_year", "team", "opp"].
default_point_projection (int, optional): If a team has no
previous games (i.e., it's the first
game of the season), the point projection is imputed with this value.
Defaults to 25.
Returns:
pd.DataFrame: Dataframe with imputed point projections for each team in
each game. There should be no NA values for the point projections.
"""
# identify games without a projection value
missing_projection_df = pd.merge(calendar_df, df, on=keys, how="left")
if not missing_projection_df["projected_off_pts"].isnull().sum():
return pd.DataFrame()
missing_projection_df = missing_projection_df[
| pd.isna(missing_projection_df["projected_off_pts"]) | pandas.isna |
#! python3
import random
import math
import pandas as pd
from pandas import DataFrame as df
from anytree import Node, RenderTree, NodeMixin, AsciiStyle
from anytree.exporter import DotExporter, JsonExporter
from anytree.importer import JsonImporter
import os
import copy
import time
import json
import queue
import csv
import sqlite3
import statistics
import matplotlib.pyplot as plt
import random
# Final four pairings
final_four_pairings = {
"mens" : {
2021 :
[['West', 'East'],
['South', 'Midwest']]
,
2019 :
[['East', 'West'],
['South', 'Midwest']]
,
2018 : {
# TBD
}
},
"womens" : {
2021 :
[['Alamo, Hemisfair'],
['River Walk', 'Mercado']]
,
2019 :
[["Greensboro", "Portland"],
["Chicago", "Albany"]]
,
2018 : {
# TBD
}
}
}
# different scoring systems for different brackets so expected points can be
# calculated
scoring_systems = {
"ESPN" : {
"round" : {
0 : 0,
1 : 0,
2 : 10,
3 : 20,
4 : 40,
5 : 80,
6 : 160,
7 : 320,
},
"cumulative" : {
0 : 0,
1 : 0,
2 : 10,
3 : 30,
4 : 70,
5 : 150,
6 : 310,
7 : 630,
},
},
"wins_only" : {
"round" : {
0 : 0,
1 : 0,
2 : 1,
3 : 1,
4 : 1,
5 : 1,
6 : 1,
7 : 1,
},
"cumulative" : {
0 : 0,
1 : 0,
2 : 1,
3 : 2,
4 : 3,
5 : 4,
6 : 5,
7 : 6,
},
},
"degen_bracket" : {
"round" : {
0 : 0,
1 : 0,
2 : 2,
3 : 3,
4 : 5,
5 : 8,
6 : 13,
7 : 21,
},
"cumulative" : {
0 : 0,
1 : 0,
2 : 2,
3 : 5,
4 : 10,
5 : 18,
6 : 31,
7 : 52,
},
},
"pick_six" : {
"round" : {
0 : 0,
1 : 0,
2 : 1,
3 : 1,
4 : 1,
5 : 1,
6 : 1,
7 : 7,
},
"cumulative" : {
0 : 0,
1 : 0,
2 : 1,
3 : 2,
4 : 3,
5 : 4,
6 : 5,
7 : 12,
},
}
}
# seed pairings used to build the initial bracket
seed_pairings = [[1,16],
[8,9],
[5,12],
[4,13],
[6,11],
[3,14],
[7,10],
[2,15]]
class Model:
def __init__(self, gender='mens', year=2021, number_simulations=1, scoring_sys="degen_bracket"):
self.gender = gender
self.year = year
self.all_teams = self.create_teams()
self.start_bracket = Bracket(model=self)
self.sim_bracket = Bracket(model=self)
self.bracket_pairings = final_four_pairings[gender][year]
self.game_pairing = 0
self.number_simulations = number_simulations
self.completed_simulations = 0
self.scoring_system = scoring_systems[scoring_sys]
self.actual_results = None
self.simulation_results = []
self.special_entries = {
"most_valuable_teams": None,
"most_popular_teams": None,
"chalk": None,
}
self.simulations_won_by_special_entries = {
"most_valuable_teams": 0,
"most_popular_teams": 0,
"chalk": 0,
}
self.entries = {
"imported_entries": [],
"most_valuable_teams": None,
"most_popular_teams": None,
"chalk": None,
}
self.simulations_won_by_imported_entries = []
self.winning_scores_of_simulations = []
pass
def raw_print(self):
for region in self.all_teams:
for seed in self.all_teams[region]:
for team in self.all_teams[region][seed]:
# print(team.name+", "+team.region+", "+team.seed+", "+str(team.total_expected_points)+", "+str(team.total_picked_expected_points)+", "+str(team.total_points_diff))
print(team.name+", "+team.region+", "+team.seed+", "+str(team.total_expected_points)+", "+str(team.total_picked_expected_points)+", "+str(team.total_points_diff)+", "+str(team.wins))
def create_teams(self):
current_path = os.path.dirname(__file__)
team_data = "../team_data/"+str(self.year)+"_all_prepped_data.csv"
path = os.path.join(current_path, team_data)
all_teams = {}
if os.path.exists(path):
print(" found data")
else:
print(" couldn't find data")
# In original iterations of this I would've attached scraping natively to
# this but now I don't think that that really makes sense
raise Exception("There is no data for this combination of team and year")
team_data = pd.read_csv(path)
gender_specific = team_data[team_data.gender == self.gender]
earliest_date = gender_specific.forecast_date[-1:].values[-1]
# data subset is the teams from the earliest date shown. This is the data
# from all of the teams are still in the tournament
df = gender_specific[gender_specific.forecast_date == earliest_date]
for ind in df.index:
picks = {
1:'100.0%',
2:df["R64_picked"][ind],
3:df["R32_picked"][ind],
4:df["S16_picked"][ind],
5:df["E8_picked"][ind],
6:df["F4_picked"][ind],
7:df["NCG_picked"][ind],
}
team_name = df["team_name"][ind]
team_seed = str(df["team_seed"][ind])
# team seeds in the imported file have an a or B suffix for playin games,
# this strips that
# todo uncomment this and repull all data once playins are played
# if len(team_seed) > 2:
# team_seed = team_seed[0:2]
team_region = df["team_region"][ind]
team_rating = df["team_rating"][ind]
team = Team(team_name, team_seed, team_region, team_rating, picks)
if team_region not in all_teams:
all_teams[team_region] = {}
if team_seed not in all_teams[team_region]:
all_teams[team_region][team_seed] = [team]
else:
all_teams[team_region][team_seed].append(team)
return all_teams
def team_look_up(self, team):
teams = self.all_teams[team.region][team.seed]
if len(teams) == 1:
return teams[0]
else:
if teams[0].name == team.name:
return teams[0]
elif teams[1].name == team.name:
return teams[1]
else:
assert False, "couldn't find team"
def reset_bracket(self):
self.sim_bracket.reset_bracket()
pass
# simulation methods
def batch_simulate(self):
for i in range(0, self.number_simulations):
self.sim_bracket.simulate_bracket()
self.update_scores()
self.reset_bracket()
self.completed_simulations += 1
self.calculate_expected_points()
def update_scores(self):
for region in self.all_teams:
for seed in self.all_teams[region]:
for team in self.all_teams[region][seed]:
team.simulation_results.append(team.temp_result)
if team.simulation_results[-1] == 0 and len(self.all_teams[region][seed]) == 1:
team.simulation_results[-1] = 1
team.temp_result = 0
pass
def calculate_expected_points(self):
for region in self.all_teams:
for seed in self.all_teams[region]:
for team in self.all_teams[region][seed]:
total_expected_points = 0
total_picked_expected_points = 0
total_points_diff = 0
for ep in team.expected_points:
round_expected_points = float(team.wins[ep]) / float(self.number_simulations) * float(self.scoring_system["round"][ep])
picked_round_expected_points = float(team.picked_frequency[ep].strip('%'))/100 * float(self.scoring_system["round"][ep])
round_points_diff = round_expected_points - picked_round_expected_points
team.expected_points[ep] = round_expected_points
team.picked_expected_points[ep] = picked_round_expected_points
team.points_diff[ep] = round_points_diff
total_expected_points += round_expected_points
total_picked_expected_points += picked_round_expected_points
total_points_diff += round_points_diff
team.total_expected_points = total_expected_points
team.total_picked_expected_points = total_picked_expected_points
team.total_points_diff = total_points_diff
pass
def output_most_valuable_bracket(self):
# TODO this can be exported into JSON format without going into the intermediate bracket step
self.calculate_expected_points()
most_valuable_bracket = Bracket(self)
self.postprocess_bracket(most_valuable_bracket, "expected_points")
return most_valuable_bracket
def output_random_bracket(self):
# TODO this can be exported into JSON format without going into the intermediate bracket step
# self.calculate_expected_points()
random_bracket = Bracket(self)
self.postprocess_bracket(random_bracket, "randomized")
return random_bracket
def output_most_popular_bracket(self):
# TODO this can be exported into JSON format without going into the intermediate bracket step
most_popular_bracket = Bracket(self)
self.postprocess_bracket(most_popular_bracket, "picked_frequency")
return most_popular_bracket
def update_entry_picks(self):
team_data = r'../web_scraper/'+self.gender+str(self.year)+r'/actual.json'
# chalk_data = r'../web_scraper/'+model.gender+str(model.year)+r'/chalk.json'
current_path = os.path.dirname(__file__)
new_team_data = os.path.join(current_path, team_data)
# chalk_team_data = os.path.join(current_path, chalk_data)
actual_results = json.load(open(new_team_data, "r"))
# chalk_results = json.load(open(chalk_team_data, "r"))
for region in actual_results:
for seed in actual_results[region]:
for team in actual_results[region][seed]:
if self.all_teams[region][seed][0].name == team:
self.all_teams[region][seed][0].entry_picks["actual_results"] = actual_results[region][seed][team]
else:
self.all_teams[region][seed][1].entry_picks["actual_results"] = actual_results[region][seed][team]
entry = {
"team_picks" : actual_results,
"name" : "Actual results",
"entryID" : -1,
"method" : "Actual results",
"source" : "Actual results"
}
self.entries["actual_results"] = Entry(model=self, source=json.dumps(entry), method="json")
def initialize_special_entries(self):
# Initialize the special entries including:
# Most valuable bracket, most popular bracket, chalk bracket
most_valuable_bracket = self.output_most_valuable_bracket()
most_popular_bracket = self.output_most_popular_bracket()
# random_bracket = self.output_random_bracket()
current_path = os.path.dirname(__file__)
chalk_data = r'../web_scraper/'+self.gender+str(self.year)+r'/chalk.json'
chalk_team_data = os.path.join(current_path, chalk_data)
chalk_results = json.load(open(chalk_team_data, "r"))
chalk_entry = {
"team_picks" : chalk_results,
"name" : "Ch<NAME>",
"entryID" : -4,
"method" : "Chalk entry",
"source" : "Chalk entry"
}
mvb_source = self.sim_bracket.export_bracket_to_json(most_valuable_bracket.bracket.root, "most valuable bracket")
mpb_source = self.sim_bracket.export_bracket_to_json(most_popular_bracket.bracket.root, "most popular bracket")
# random_bracket = self.sim_bracket.export_bracket_to_json(random_bracket.bracket.root, "random bracket")
self.special_entries["most_valuable_teams"] = Entry(model=self, source=mvb_source, method="json")
self.special_entries["most_popular_teams"] = Entry(model=self, source=mpb_source, method="json")
self.special_entries["chalk"] = Entry(model=self, source=json.dumps(chalk_entry), method="json")
def analyze_special_entries(self):
# Add in the results for special brackets including:
# Most valuable bracket, most popular bracket, chalk bracket
for entry in self.special_entries:
self.update_special_entry_score(self.special_entries[entry], entry)
def postprocess_via_popularity_and_value(self):
# Add most valuable and most picked brackets
most_valuable_bracket = self.output_most_valuable_bracket()
most_popular_bracket = self.output_most_popular_bracket()
mvb_source = self.sim_bracket.export_bracket_to_json(most_valuable_bracket.bracket.root, "most valuable bracket")
mpb_source = self.sim_bracket.export_bracket_to_json(most_popular_bracket.bracket.root, "most popular bracket")
self.entries["most_valuable_teams"] = Entry(source=mvb_source, method="json")
self.update_special_entry_score(self.entries["most_valuable_teams"])
self.entries["most_popular_teams"] = Entry(source=mpb_source, method="json")
pass
def add_simulation_results_postprocessing(self):
self.actual_results = Simulation_results(self, actual=True)
if len(self.simulation_results) == 0:
for i in range(0, self.number_simulations):
self.simulation_results.append(Simulation_results(self, index=i))
pass
def refresh_scoring_list(self):
for simulation in self.simulation_results:
simulation.import_scoring_list()
def prep_data(self, path):
# Probably never going to use this
return None
# my intuition is that I should be able to to both this and the other
# recursive bracket manipulation functions using callbacks I'm not familiar
# enough with Python to know how to. May come back to this
def postprocess_bracket(self, bracket, criteria):
self.postprocess_recursion(bracket.bracket, criteria)
pass
def postprocess_recursion(self, node, criteria):
# sorted DFS post order
for child in node.children:
# go through until there are no children
if not hasattr(child.winner, 'name'):
self.postprocess_recursion(child, criteria)
# then sim game
node.postprocess_pick_team(criteria)
pass
def export_teams_to_json(self, expanded=True, empty=False, array=True):
if expanded:
return json.dumps(self.all_teams, default=lambda o: o.toJSON(array), sort_keys=True, ensure_ascii=False)
else:
return json.dumps(self.all_teams, default=lambda o: o.toJSON(expanded=False, empty=empty), sort_keys=True, ensure_ascii=False)
def add_entry(self, entry):
entry.index = len(self.entries["imported_entries"])
self.entries["imported_entries"].append(entry)
self.update_imported_entry_score(entry)
# self.
def add_fake_entries(self, number_entries):
for i in range(number_entries):
random_bracket = self.output_random_bracket()
random_bracket = self.sim_bracket.export_bracket_to_json(random_bracket.bracket.root, "random bracket no."+str(i+1),entryID=i+1)
self.add_entry(Entry(model=self, source=random_bracket, method="json"))
pass
# TODO Make this incremental potentially
def add_bulk_entries_from_database(self, number_entries):
current_path = os.path.dirname(__file__)
database = r"../db/"+self.gender+str(self.year)+".db"
database_path = os.path.join(current_path, database)
db = sqlite3.connect(database_path)
current = db.cursor()
pull_query = '''SELECT * FROM entries
WHERE id IN
(SELECT id FROM entries
WHERE name <> 'NULL'
ORDER BY RANDOM()
LIMIT ?) '''
data = tuple([number_entries])
bulk_entries = current.execute(pull_query, data).fetchall()
for entry in bulk_entries:
# I don't feel great about the formatting used to add entries, Seems like
# I should be passing in just the data instead of initializing an object
# here. trying to think of a better structure
self.add_entry(Entry(model=self, method="database", source=entry))
self.refresh_scoring_list()
def update_special_entry_score(self, entry, entry_name):
for region in self.all_teams:
for seed in self.all_teams[region]:
for team in self.all_teams[region][seed]:
team.special_entries[entry_name] = entry.team_picks[team.region][team.seed][team.name]
for i in range(0, len(team.simulation_results)):
if len(entry.scores["simulations"]) <= i:
entry.scores["simulations"].append(0)
entry.scores["simulations"][i] += self.scoring_system["cumulative"][min(team.simulation_results[i], team.special_entries[entry_name])]
entry.scores["actual_results"] += self.scoring_system["cumulative"][min(team.entry_picks["actual_results"], team.special_entries[entry_name])]
def update_imported_entry_score(self, entry):
# update the scoring list for the passed in entry.
for region in self.all_teams:
for seed in self.all_teams[region]:
for team in self.all_teams[region][seed]:
while len(team.entry_picks["imported_entries"]) < entry.index+1:
team.entry_picks["imported_entries"].append(-1)
team.entry_picks["imported_entries"][entry.index] = entry.team_picks[team.region][team.seed][team.name]
for i in range(0, len(team.simulation_results)):
if len(entry.scores["simulations"]) <= i:
entry.scores["simulations"].append(0)
entry.scores["simulations"][i] += self.scoring_system["cumulative"][min(team.simulation_results[i], team.entry_picks["imported_entries"][entry.index])]
entry.scores["actual_results"] += self.scoring_system["cumulative"][min(team.entry_picks["actual_results"], team.entry_picks["imported_entries"][entry.index])]
# Output is a data frame which has the simulation results for each entry as well
# as the special entries
# Todo The general structuring of outputting of results, especially the sorting
# of ranks is pretty inefficient. This would be a potential place to really
# improve the efficiency of the program.
def output_results(self, entries=None, sims=None):
# output_results is used both For initial rankings and also for further
# postprocessing of subsets. Initial rankings are already ranked and so I
# don't want to go through the trouble of doing that again.
# The initial_ranking variables a boolean to check whether this is the
# first time things are ranked.
initial_ranking=True
if not entries:
entry_list = self.entries['imported_entries']
entry_index_list = [i for i in range(len(entry_list))]
else:
entry_list = random.sample(self.entries['imported_entries'], entries)
entry_index_list = [entry.index for entry in entry_list]
initial_ranking = False
if not sims:
simulation_list = self.simulation_results
sim_index_list = [i for i in range(len(simulation_list))]
else:
simulation_list = random.sample(self.simulation_results, sims)
sim_index_list = [sim.simulation_index for sim in simulation_list]
initial_ranking = False
def add_data_frame_entry(entryID, name, array_name, sim_index_list):
all_team_data['entryID'].append(entryID)
all_team_data['name'].append(name)
simulation_results = []
for sim in sim_index_list:
simulation_results.append(array_name[sim])
all_team_data['simulations'].append(simulation_results)
def add_rankings():
all_team_data['ranks'] = [[] for i in range(len(all_team_data['simulations']))]
all_team_data['placings'] = [[] for i in range(len(all_team_data['simulations']))]
if entries and sims:
print(" subset ")
else:
for simulation in simulation_list:
for i in entry_index_list:
all_team_data['ranks'][i].append(simulation.ranking_list['entries'][i])
all_team_data['placings'][i].append(simulation.placing_list['entries'][i])
all_team_data['ranks'][len(entry_list)].append(1.0)
all_team_data['ranks'][len(entry_list)+1].append(simulation.ranking_list['most_valuable_teams'])
all_team_data['ranks'][len(entry_list)+2].append(simulation.ranking_list['most_popular_teams'])
all_team_data['ranks'][len(entry_list)+3].append(simulation.ranking_list['chalk'])
all_team_data['placings'][len(entry_list)].append(1)
all_team_data['placings'][len(entry_list)+1].append(simulation.placing_list['most_valuable_teams'])
all_team_data['placings'][len(entry_list)+2].append(simulation.placing_list['most_popular_teams'])
all_team_data['placings'][len(entry_list)+3].append(simulation.placing_list['chalk'])
def rerank(entry_list):
# Use ranking algorithm for limited scoring data set
all_team_data['ranks'] = [[] for i in range(len(entry_list)+4)]
all_team_data['placings'] = [[] for i in range(len(entry_list)+4)]
winning_score_list = []
winning_index_list = []
winning_score = 0
winning_index = [-1]
for simulation in simulation_list:
array = [entry.scores['simulations'][simulation.simulation_index] for entry in entry_list]
special_scores = {
'scores' : {
'most_valuable_teams' : simulation.scoring_list['most_valuable_teams'],
'most_popular_teams' : simulation.scoring_list['most_popular_teams'],
'chalk' : simulation.scoring_list['chalk'],
},
'ranks' : {
'most_valuable_teams' : -1.0,
'most_popular_teams' : -1.0,
'chalk' : -1.0,
},
'placings' : {
'most_valuable_teams' : -1,
'most_popular_teams' : -1,
'chalk' : -1,
}
}
rank_vector = [0 for i in range(len(array))]
placing_vector = [0 for i in range(len(array))]
tuple_array = [(array[i], i) for i in range(len(array))]
tuple_array.sort(reverse=True)
winning_score = tuple_array[0][0]
# all_team_data['simulations'][len(entry_list)] = winning_score
winning_index = [entry_list[tuple_array[0][1]].index]
(rank, n, i) = (1, 1, 0)
for special in special_scores['scores'].keys():
if special_scores['scores'][special] > winning_score:
special_scores['ranks'][special] = 1.0
special_scores['placings'][special] = 1
elif special_scores['scores'][special] < tuple_array[-1][0]:
special_scores['ranks'][special] = float(len(tuple_array))
special_scores['placings'][special] = len(tuple_array)
while i < len(array):
j = i
while j < len(array) - 1 and tuple_array[j][0] == tuple_array[j+1][0]:
j += 1
if tuple_array[j][0] == winning_score:
winning_index.append(entry_list[tuple_array[j][1]].index)
n = j - i + 1
for j in range(n):
shared_index = tuple_array[i+j][1]
rank_vector[shared_index] = rank + (n - 1) * 0.5
placing_vector[shared_index] = rank
for special in special_scores['scores'].keys():
if special_scores['scores'][special] == tuple_array[i+j][0]:
special_scores['ranks'][special] = rank_vector[shared_index]
special_scores['placings'][special] = rank
elif special_scores['scores'][special] < tuple_array[i][0] and special_scores['scores'][special] > tuple_array[i-1][0]:
assert tuple_array[i][0]>=tuple_array[i-1][0]
special_scores['ranks'][special] = rank_vector[shared_index]
special_scores['placings'][special] = rank
rank += n
i += n
for special in special_scores['scores'].keys():
if special_scores['scores'][special] > winning_score:
special_scores['ranks'][special] = 1.0
special_scores['placings'][special] = 1
# print("a",special, special_scores['ranks'][special], special_scores['scores'][special])
elif special_scores['scores'][special] < tuple_array[-1][0]:
special_scores['ranks'][special] = float(len(tuple_array))
special_scores['placings'][special] = len(tuple_array)
# print("b",special, special_scores['ranks'][special], special_scores['scores'][special])
elif special_scores['scores'][special] == winning_score:
special_scores['ranks'][special] = rank_vector[tuple_array[0][1]]
special_scores['placings'][special] = placing_vector[tuple_array[0][1]]
# print("c",special, special_scores['ranks'][special], special_scores['scores'][special])
else:
i = 1
while i < len(tuple_array):
if tuple_array[i-1][0] > special_scores['scores'][special] > tuple_array[i][0]:
multiples = 0
while tuple_array[i+multiples][0] == tuple_array[i][0] and i < len(tuple_array):
multiples +=1
if i+multiples == len(tuple_array):
break
special_scores['ranks'][special] = i+(multiples-1)*0.5+1
special_scores['placings'][special] = i+1
# print("d",i,special, special_scores['ranks'][special], special_scores['scores'][special])
i = len(tuple_array)
elif special_scores['scores'][special] == tuple_array[i][0]:
special_scores['ranks'][special] = rank_vector[tuple_array[i][1]]
special_scores['placings'][special] = placing_vector[tuple_array[i][1]]
# print("e",i,special, special_scores['ranks'][special], special_scores['scores'][special])
i = len(tuple_array)
else:
i += 1
for special in special_scores['scores'].keys():
assert special_scores['ranks'][special] > 0
assert not (special_scores['ranks'][special] == 1 and special_scores['scores'][special] < winning_score and len(array)>1)
for i in range(len(entry_list)):
all_team_data['ranks'][i].append(rank_vector[i])
all_team_data['placings'][i].append(placing_vector[i])
winning_score_list.append(winning_score)
winning_index_list.append(winning_index)
all_team_data['simulations'][len(entry_list)] = winning_score_list
all_team_data['ranks'][len(entry_list)].append(1.0)
all_team_data['placings'][len(entry_list)].append(1)
all_team_data['ranks'][len(entry_list)+1].append(special_scores['ranks']['most_valuable_teams'])
all_team_data['placings'][len(entry_list)+1].append(special_scores['placings']['most_valuable_teams'])
all_team_data['ranks'][len(entry_list)+2].append(special_scores['ranks']['most_popular_teams'])
all_team_data['placings'][len(entry_list)+2].append(special_scores['placings']['most_popular_teams'])
all_team_data['ranks'][len(entry_list)+3].append(special_scores['ranks']['chalk'])
all_team_data['placings'][len(entry_list)+3].append(special_scores['placings']['chalk'])
# Update winning scores
all_team_data = {
'entryID' : [],
'name' : [],
'simulations' : [],
# 'ranks' : [],
# 'placings' : []
}
for entry in entry_list:
add_data_frame_entry(entry.entryID, entry.name, entry.scores['simulations'], sim_index_list)
add_data_frame_entry(-1, 'winning_score', self.winning_scores_of_simulations, sim_index_list)
add_data_frame_entry(-2, 'most_valuable_teams', self.special_entries['most_valuable_teams'].scores['simulations'], sim_index_list)
add_data_frame_entry(-3, 'most_popular_teams', self.special_entries['most_popular_teams'].scores['simulations'], sim_index_list)
add_data_frame_entry(-4, 'chalk', self.special_entries['chalk'].scores['simulations'], sim_index_list)
if initial_ranking:
add_rankings()
else:
rerank(entry_list)
output_data = | df(data=all_team_data) | pandas.DataFrame |
"""ADDS FUNCTIONALITY TO APPLY FUNCTION ON PANDAS OBJECTS IN PARALLEL
This script add functionality to Pandas so that you can do parallel processing in multiple cores when you use apply method on
dataframes, series or groupby objects.
This file must be imported as a module and it attached following functions to pandas:
* group_apply_parallel - adds apply_parallel method to groupby objects
* series_apply_parallel - adds apply_parallel method to series objects
* df_apply_parallel - adds apply_parallel method to dataframe objects
"""
import pandas as pd
from multiprocess import Pool
import functools
from os import cpu_count
def attachpandas():
pd.core.groupby.generic.DataFrameGroupBy.apply_parallel = group_apply_parallel
pd.core.series.Series.apply_parallel = series_apply_parallel
pd.core.frame.DataFrame.apply_parallel = df_apply_parallel
def group_apply_parallel(self, func, static_data=None, num_processes = cpu_count()):
"""
Add functionality to pandas so that you can do processing on groups on multiple cores at same time.
- This method will pass each group dataframe to the passed func (including key columns on which the group is formed).
- If there is some external data that needs to be used by the function, pass it as a list in static_data, and then accept that list in your func.
You must have a named argument with name 'static_data' if you need to accept static data.
Individual items in static_data list needs to be accessed using indexing.
"""
func = func if static_data is None else functools.partial(func, static_data=static_data)
with Pool(num_processes) as p:
ret_list = p.map(func, [df.copy() for idx, df in self])
if isinstance(ret_list[0], pd.DataFrame) or isinstance(ret_list[0], pd.DataFrame):
return pd.concat(ret_list, keys=[idx for idx, df in self],names=self.keys, axis=0)
out = | pd.DataFrame([idx for idx, df in self], columns=self.keys) | pandas.DataFrame |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "<NAME>"],
[pd.Timestamp("2016-03-16"), "<NAME>"],
[1e20, "<NAME>"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if pd.read_excel.keywords["engine"] == "openpyxl":
pytest.xfail("Maybe not supported by openpyxl")
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, "Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, read_ext):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, read_ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
url = "s3://pandas-test/test1" + read_ext
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, read_ext, datapath):
# FILE
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
local_table = pd.read_excel(localtable)
try:
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on {}".format(" ".join(platform.uname()).strip()))
tm.assert_frame_equal(url_table, local_table)
def test_read_from_pathlib_path(self, read_ext):
# GH12655
from pathlib import Path
str_path = "test1" + read_ext
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = Path("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
@td.check_file_leaks
def test_read_from_py_localpath(self, read_ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join("test1" + read_ext)
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = LocalPath().join("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_seconds(self, read_ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
{
"Time": [
time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54),
]
}
)
actual = pd.read_excel("times_1900" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel("times_1904" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, read_ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
# "mi_column" sheet
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
)
actual = pd.read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = pd.read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = pd.read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "mi_column_name", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "name_with_int", index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "both_name", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = pd.read_excel(
mi_file, "both_name_skiprows", index_col=[0, 1], header=[0, 1], skiprows=2
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex_header_only(self, read_ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = "testmultiindex" + read_ext
result = pd.read_excel(mi_file, "index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
def test_excel_old_index_format(self, read_ext):
# see gh-4679
filename = "test_index_name_pre17" + read_ext
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array(
[
[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None],
)
si = Index(
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array(
[
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None],
)
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, read_ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel("test1" + read_ext, header=arg)
def test_read_excel_chunksize(self, read_ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel("test1" + read_ext, chunksize=100)
def test_read_excel_skiprows_list(self, read_ext):
# GH 4903
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=[0, 2]
)
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=["a", "b", "c", "d"],
)
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=np.array([0, 2])
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, read_ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
expected = pd.read_excel("test1" + read_ext)
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext):
# GH 16645
expected = pd.read_excel("test1" + read_ext)
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, read_ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, nrows="5")
def test_read_excel_squeeze(self, read_ext):
# GH 12157
f = "test_squeeze" + read_ext
actual = pd.read_excel(f, "two_columns", index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name="b")
expected.index.name = "a"
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, "two_columns", squeeze=True)
expected = pd.DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, "one_column", squeeze=True)
expected = pd.Series([1, 2, 3], name="a")
tm.assert_series_equal(actual, expected)
class TestExcelFileRead:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for ExcelFile objects.
"""
func = partial(pd.ExcelFile, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "ExcelFile", func)
def test_excel_passes_na(self, read_ext):
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, "Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, "Sheet1", keep_default_na=True, na_values=["apple"]
)
expected = DataFrame(
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
# 13967
with | pd.ExcelFile("test5" + read_ext) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 12:51:57 2021
@author: Administrator
"""
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
def apply(decorator):
def decorate(cls):
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
class TA:
__version__ = "1.2"
@classmethod
def SMA(cls, ohlc: DataFrame, period: int = 41, column: str = "close") -> Series:
"""
Simple moving average - rolling mean in pandas lingo. Also known as 'MA'.
The simple moving average (SMA) is the most basic of the moving averages used for trading.
"""
return pd.Series(
ohlc[column].rolling(window=period).mean(),
name="{0} period SMA".format(period),
)
@classmethod
def SMM(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
Simple moving median, an alternative to moving average. SMA, when used to estimate the underlying trend in a time series,
is susceptible to rare events such as rapid shocks or other anomalies. A more robust estimate of the trend is the simple moving median over n time periods.
"""
return pd.Series(
ohlc[column].rolling(window=period).median(),
name="{0} period SMM".format(period),
)
@classmethod
def SSMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Smoothed simple moving average.
:param ohlc: data
:param period: range
:param column: open/close/high/low column of the DataFrame
:return: result Series
"""
return pd.Series(
ohlc[column]
.ewm(ignore_na=False, alpha=1.0 / period, min_periods=0, adjust=adjust)
.mean(),
name="{0} period SSMA".format(period),
)
@classmethod
def EMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Exponential Weighted Moving Average - Like all moving average indicators, they are much better suited for trending markets.
When the market is in a strong and sustained uptrend, the EMA indicator line will also show an uptrend and vice-versa for a down trend.
EMAs are commonly used in conjunction with other indicators to confirm significant market moves and to gauge their validity.
"""
return pd.Series(
ohlc[column].ewm(span=period, adjust=adjust).mean(),
name="{0} period EMA".format(period),
)
@classmethod
def DEMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Double Exponential Moving Average - attempts to remove the inherent lag associated to Moving Averages
by placing more weight on recent values. The name suggests this is achieved by applying a double exponential
smoothing which is not the case. The name double comes from the fact that the value of an EMA (Exponential Moving Average) is doubled.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted from the previously doubled EMA.
Because EMA(EMA) is used in the calculation, DEMA needs 2 * period -1 samples to start producing values in contrast to the period
samples needed by a regular EMA
"""
DEMA = (
2 * cls.EMA(ohlc, period)
- cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
)
return pd.Series(DEMA, name="{0} period DEMA".format(period))
@classmethod
def TEMA(cls, ohlc: DataFrame, period: int = 9, adjust: bool = True) -> Series:
"""
Triple exponential moving average - attempts to remove the inherent lag associated to Moving Averages by placing more weight on recent values.
The name suggests this is achieved by applying a triple exponential smoothing which is not the case. The name triple comes from the fact that the
value of an EMA (Exponential Moving Average) is triple.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted 3 times from the previously tripled EMA.
Finally 'EMA of EMA of EMA' is added.
Because EMA(EMA(EMA)) is used in the calculation, TEMA needs 3 * period - 2 samples to start producing values in contrast to the period samples
needed by a regular EMA.
"""
triple_ema = 3 * cls.EMA(ohlc, period)
ema_ema_ema = (
cls.EMA(ohlc, period)
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
)
TEMA = (
triple_ema
- 3 * cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
+ ema_ema_ema
)
return pd.Series(TEMA, name="{0} period TEMA".format(period))
@classmethod
def TRIMA(cls, ohlc: DataFrame, period: int = 18) -> Series:
"""
The Triangular Moving Average (TRIMA) [also known as TMA] represents an average of prices,
but places weight on the middle prices of the time period.
The calculations double-smooth the data using a window width that is one-half the length of the series.
source: https://www.thebalance.com/triangular-moving-average-tma-description-and-uses-1031203
"""
SMA = cls.SMA(ohlc, period).rolling(window=period).sum()
return pd.Series(SMA / period, name="{0} period TRIMA".format(period))
@classmethod
def TRIX(
cls,
ohlc: DataFrame,
period: int = 20,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
The TRIX indicator calculates the rate of change of a triple exponential moving average.
The values oscillate around zero. Buy/sell signals are generated when the TRIX crosses above/below zero.
A (typically) 9 period exponential moving average of the TRIX can be used as a signal line.
A buy/sell signals are generated when the TRIX crosses above/below the signal line and is also above/below zero.
The TRIX was developed by <NAME>, publisher of Technical Analysis of Stocks & Commodities magazine,
and was introduced in Volume 1, Number 5 of that magazine.
"""
data = ohlc[column]
def _ema(data, period, adjust):
return pd.Series(data.ewm(span=period, adjust=adjust).mean())
m = _ema(_ema(_ema(data, period, adjust), period, adjust), period, adjust)
return pd.Series(100 * (m.diff() / m), name="{0} period TRIX".format(period))
@classmethod
def LWMA(cls, ohlc: DataFrame, period: int, column: str = "close") -> Series:
"""
Linear Weighted Moving Average
"""
raise NotImplementedError
@classmethod
def VAMA(cls, ohlcv: DataFrame, period: int = 8, column: str = "close") -> Series:
"""
Volume Adjusted Moving Average
"""
vp = ohlcv["volume"] * ohlcv[column]
volsum = ohlcv["volume"].rolling(window=period).mean()
volRatio = pd.Series(vp / volsum, name="VAMA")
cumSum = (volRatio * ohlcv[column]).rolling(window=period).sum()
cumDiv = volRatio.rolling(window=period).sum()
return pd.Series(cumSum / cumDiv, name="{0} period VAMA".format(period))
@classmethod
def VIDYA(
cls,
ohlcv: DataFrame,
period: int = 9,
smoothing_period: int = 12,
column: str = "close",
) -> Series:
""" Vidya (variable index dynamic average) indicator is a modification of the traditional Exponential Moving Average (EMA) indicator.
The main difference between EMA and Vidya is in the way the smoothing factor F is calculated.
In EMA the smoothing factor is a constant value F=2/(period+1);
in Vidya the smoothing factor is variable and depends on bar-to-bar price movements."""
raise NotImplementedError
@classmethod
def ER(cls, ohlc: DataFrame, period: int = 10, column: str = "close") -> Series:
"""The Kaufman Efficiency indicator is an oscillator indicator that oscillates between +100 and -100, where zero is the center point.
+100 is upward forex trending market and -100 is downwards trending markets."""
change = ohlc[column].diff(period).abs()
volatility = ohlc[column].diff().abs().rolling(window=period).sum()
return pd.Series(change / volatility, name="{0} period ER".format(period))
@classmethod
def KAMA(
cls,
ohlc: DataFrame,
er: int = 10,
ema_fast: int = 2,
ema_slow: int = 30,
period: int = 20,
column: str = "close",
) -> Series:
"""Developed by <NAME>, Kaufman's Adaptive Moving Average (KAMA) is a moving average designed to account for market noise or volatility.
Its main advantage is that it takes into consideration not just the direction, but the market volatility as well."""
er = cls.ER(ohlc, er)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
) ## smoothing constant
sma = pd.Series(
ohlc[column].rolling(period).mean(), name="SMA"
) ## first KAMA is SMA
kama = []
# Current KAMA = Prior KAMA + smoothing_constant * (Price - Prior KAMA)
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), ohlc[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name="{0} period KAMA.".format(period)
) ## apply the kama list to existing index
return sma["KAMA"]
@classmethod
def ZLEMA(
cls,
ohlc: DataFrame,
period: int = 26,
adjust: bool = True,
column: str = "close",
) -> Series:
"""ZLEMA is an abbreviation of Zero Lag Exponential Moving Average. It was developed by <NAME> and <NAME>.
ZLEMA is a kind of Exponential moving average but its main idea is to eliminate the lag arising from the very nature of the moving averages
and other trend following indicators. As it follows price closer, it also provides better price averaging and responds better to price swings."""
lag = (period - 1) / 2
ema = pd.Series(
(ohlc[column] + (ohlc[column].diff(lag))),
name="{0} period ZLEMA.".format(period),
)
zlema = pd.Series(
ema.ewm(span=period, adjust=adjust).mean(),
name="{0} period ZLEMA".format(period),
)
return zlema
@classmethod
def WMA(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
WMA stands for weighted moving average. It helps to smooth the price curve for better trend identification.
It places even greater importance on recent data than the EMA does.
:period: Specifies the number of Periods used for WMA calculation
"""
d = (period * (period + 1)) / 2 # denominator
weights = np.arange(1, period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_close = ohlc[column].rolling(period, min_periods=period)
wma = _close.apply(linear(weights), raw=True)
return pd.Series(wma, name="{0} period WMA.".format(period))
@classmethod
def HMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""
HMA indicator is a common abbreviation of Hull Moving Average.
The average was developed by <NAME> and is used mainly to identify the current market trend.
Unlike SMA (simple moving average) the curve of Hull moving average is considerably smoother.
Moreover, because its aim is to minimize the lag between HMA and price it does follow the price activity much closer.
It is used especially for middle-term and long-term trading.
:period: Specifies the number of Periods used for WMA calculation
"""
import math
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = cls.WMA(ohlc, period=half_length)
wmas = cls.WMA(ohlc, period=period)
ohlc["deltawma"] = 2 * wmaf - wmas
hma = cls.WMA(ohlc, column="deltawma", period=sqrt_length)
return pd.Series(hma, name="{0} period HMA.".format(period))
@classmethod
def EVWMA(cls, ohlcv: DataFrame, period: int = 20) -> Series:
"""
The eVWMA can be looked at as an approximation to the
average price paid per share in the last n periods.
:period: Specifies the number of Periods used for eVWMA calculation
"""
vol_sum = (
ohlcv["volume"].rolling(window=period).sum()
) # floating shares in last N periods
x = (vol_sum - ohlcv["volume"]) / vol_sum
y = (ohlcv["volume"] * ohlcv["close"]) / vol_sum
evwma = [0]
# evwma = (evma[-1] * (vol_sum - volume)/vol_sum) + (volume * price / vol_sum)
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=ohlcv.index, name="{0} period EVWMA.".format(period),
)
@classmethod
def VWAP(cls, ohlcv: DataFrame) -> Series:
"""
The volume weighted average price (VWAP) is a trading benchmark used especially in pension plans.
VWAP is calculated by adding up the dollars traded for every transaction (price multiplied by number of shares traded) and then dividing
by the total shares traded for the day.
"""
return pd.Series(
((ohlcv["volume"] * cls.TP(ohlcv)).cumsum()) / ohlcv["volume"].cumsum(),
name="VWAP.",
)
@classmethod
def SMMA(
cls,
ohlc: DataFrame,
period: int = 42,
column: str = "close",
adjust: bool = True,
) -> Series:
"""The SMMA (Smoothed Moving Average) gives recent prices an equal weighting to historic prices."""
return pd.Series(
ohlc[column].ewm(alpha=1 / period, adjust=adjust).mean(), name="SMMA"
)
@classmethod
def ALMA(
cls, ohlc: DataFrame, period: int = 9, sigma: int = 6, offset: int = 0.85
) -> Series:
"""Arnaud Legoux Moving Average."""
"""dataWindow = _.last(data, period)
size = _.size(dataWindow)
m = offset * (size - 1)
s = size / sigma
sum = 0
norm = 0
for i in [size-1..0] by -1
coeff = Math.exp(-1 * (i - m) * (i - m) / 2 * s * s)
sum = sum + dataWindow[i] * coeff
norm = norm + coeff
return sum / norm"""
raise NotImplementedError
@classmethod
def MAMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""MESA Adaptive Moving Average"""
raise NotImplementedError
@classmethod
def FRAMA(cls, ohlc: DataFrame, period: int = 16, batch: int=10) -> Series:
"""Fractal Adaptive Moving Average
Source: http://www.stockspotter.com/Files/frama.pdf
Adopted from: https://www.quantopian.com/posts/frama-fractal-adaptive-moving-average-in-python
:period: Specifies the number of periods used for FRANA calculation
:batch: Specifies the size of batches used for FRAMA calculation
"""
assert period % 2 == 0, print("FRAMA period must be even")
c = ohlc.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=ohlc.index, name="{0} period FRAMA.".format(period))
@classmethod
def MACD(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
MACD, MACD Signal and MACD difference.
The MACD Line oscillates above and below the zero line, which is also known as the centerline.
These crossovers signal that the 12-day EMA has crossed the 26-day EMA. The direction, of course, depends on the direction of the moving average cross.
Positive MACD indicates that the 12-day EMA is above the 26-day EMA. Positive values increase as the shorter EMA diverges further from the longer EMA.
This means upside momentum is increasing. Negative MACD values indicates that the 12-day EMA is below the 26-day EMA.
Negative values increase as the shorter EMA diverges further below the longer EMA. This means downside momentum is increasing.
Signal line crossovers are the most common MACD signals. The signal line is a 9-day EMA of the MACD Line.
As a moving average of the indicator, it trails the MACD and makes it easier to spot MACD turns.
A bullish crossover occurs when the MACD turns up and crosses above the signal line.
A bearish crossover occurs when the MACD turns down and crosses below the signal line.
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
MACD = pd.Series(EMA_fast - EMA_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def PPO(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
Percentage Price Oscillator
PPO, PPO Signal and PPO difference.
As with MACD, the PPO reflects the convergence and divergence of two moving averages.
While MACD measures the absolute difference between two moving averages, PPO makes this a relative value by dividing the difference by the slower moving average
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
PPO = pd.Series(((EMA_fast - EMA_slow) / EMA_slow) * 100, name="PPO")
PPO_signal = pd.Series(
PPO.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
PPO_histo = pd.Series(PPO - PPO_signal, name="HISTO")
return pd.concat([PPO, PPO_signal, PPO_histo], axis=1)
@classmethod
def VW_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
""""Volume-Weighted MACD" is an indicator that shows how a volume-weighted moving average can be used to calculate moving average convergence/divergence (MACD).
This technique was first used by <NAME>, CMT, and has been written about since at least 2002."""
vp = ohlcv["volume"] * ohlcv[column]
_fast = pd.Series(
(vp.ewm(ignore_na=False, span=period_fast, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_fast, adjust=adjust)
.mean()
),
name="_fast",
)
_slow = pd.Series(
(vp.ewm(ignore_na=False, span=period_slow, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_slow, adjust=adjust)
.mean()
),
name="_slow",
)
MACD = pd.Series(_fast - _slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def EV_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 20,
period_slow: int = 40,
signal: int = 9,
adjust: bool = True,
) -> DataFrame:
"""
Elastic Volume Weighted MACD is a variation of standard MACD,
calculated using two EVWMA's.
:period_slow: Specifies the number of Periods used for the slow EVWMA calculation
:period_fast: Specifies the number of Periods used for the fast EVWMA calculation
:signal: Specifies the number of Periods used for the signal calculation
"""
evwma_slow = cls.EVWMA(ohlcv, period_slow)
evwma_fast = cls.EVWMA(ohlcv, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def MOM(cls, ohlc: DataFrame, period: int = 10, column: str = "close") -> Series:
"""Market momentum is measured by continually taking price differences for a fixed time interval.
To construct a 10-day momentum line, simply subtract the closing price 10 days ago from the last closing price.
This positive or negative value is then plotted around a zero line."""
return pd.Series(ohlc[column].diff(period), name="MOM".format(period))
@classmethod
def ROC(cls, ohlc: DataFrame, period: int = 12, column: str = "close") -> Series:
"""The Rate-of-Change (ROC) indicator, which is also referred to as simply Momentum,
is a pure momentum oscillator that measures the percent change in price from one period to the next.
The ROC calculation compares the current price with the price “n” periods ago."""
return pd.Series(
(ohlc[column].diff(period) / ohlc[column].shift(period)) * 100, name="ROC"
)
@classmethod
def VBM(
cls,
ohlc: DataFrame,
roc_period: int = 12,
atr_period: int = 26,
column: str = "close",
) -> Series:
"""The Volatility-Based-Momentum (VBM) indicator, The calculation for a volatility based momentum (VBM)
indicator is very similar to ROC, but divides by the security’s historical volatility instead.
The average true range indicator (ATR) is used to compute historical volatility.
VBM(n,v) = (Close — Close n periods ago) / ATR(v periods)
"""
return pd.Series(
(
(ohlc[column].diff(roc_period) - ohlc[column].shift(roc_period))
/ cls.ATR(ohlc, atr_period)
),
name="VBM",
)
@classmethod
def RSI(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.
RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when above 70 and oversold when below 30.
Signals can also be generated by looking for divergences, failure swings and centerline crossovers.
RSI can also be used to identify the general trend."""
## get the price diff
delta = ohlc[column].diff()
## positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# EMAs of ups and downs
_gain = up.ewm(alpha=1.0 / period, adjust=adjust).mean()
_loss = down.abs().ewm(alpha=1.0 / period, adjust=adjust).mean()
RS = _gain / _loss
return pd.Series(100 - (100 / (1 + RS)), name="{0} period RSI".format(period))
@classmethod
def IFT_RSI(
cls,
ohlc: DataFrame,
column: str = "close",
rsi_period: int = 5,
wma_period: int = 9,
) -> Series:
"""Modified Inverse Fisher Transform applied on RSI.
Suggested method to use any IFT indicator is to buy when the indicator crosses over –0.5 or crosses over +0.5
if it has not previously crossed over –0.5 and to sell short when the indicators crosses under +0.5 or crosses under –0.5
if it has not previously crossed under +0.5."""
# v1 = .1 * (rsi - 50)
v1 = pd.Series(0.1 * (cls.RSI(ohlc, rsi_period) - 50), name="v1")
# v2 = WMA(wma_period) of v1
d = (wma_period * (wma_period + 1)) / 2 # denominator
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
ift = pd.Series(((v2 ** 2 - 1) / (v2 ** 2 + 1)), name="IFT_RSI")
return ift
@classmethod
def SWI(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""Sine Wave indicator"""
raise NotImplementedError
@classmethod
def DYMI(
cls, ohlc: DataFrame, column: str = "close", adjust: bool = True
) -> Series:
"""
The Dynamic Momentum Index is a variable term RSI. The RSI term varies from 3 to 30. The variable
time period makes the RSI more responsive to short-term moves. The more volatile the price is,
the shorter the time period is. It is interpreted in the same way as the RSI, but provides signals earlier.
Readings below 30 are considered oversold, and levels over 70 are considered overbought. The indicator
oscillates between 0 and 100.
https://www.investopedia.com/terms/d/dynamicmomentumindex.asp
"""
def _get_time(close):
# Value available from 14th period
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = ohlc.iloc[0:index]
else:
subset = ohlc.iloc[(index - time) : index]
return cls.RSI(subset, period=time, adjust=adjust).values[-1]
dates = Series(ohlc.index)
periods = Series(range(14, len(dates)), index=ohlc.index[14:].values)
t = _get_time(ohlc[column])
return periods.map(lambda x: _dmi(x))
@classmethod
def TR(cls, ohlc: DataFrame) -> Series:
"""True Range is the maximum of three price ranges.
Most recent period's high minus the most recent period's low.
Absolute value of the most recent period's high minus the previous close.
Absolute value of the most recent period's low minus the previous close."""
TR1 = pd.Series(ohlc["high"] - ohlc["low"]).abs() # True Range = High less Low
TR2 = pd.Series(
ohlc["high"] - ohlc["close"].shift()
).abs() # True Range = High less Previous Close
TR3 = pd.Series(
ohlc["close"].shift() - ohlc["low"]
).abs() # True Range = Previous Close less Low
_TR = pd.concat([TR1, TR2, TR3], axis=1)
_TR["TR"] = _TR.max(axis=1)
return pd.Series(_TR["TR"], name="TR")
@classmethod
def ATR(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Average True Range is moving average of True Range."""
TR = cls.TR(ohlc)
return pd.Series(
TR.rolling(center=False, window=period).mean(),
name="{0} period ATR".format(period),
)
@classmethod
def SAR(cls, ohlc: DataFrame, af: int = 0.02, amax: int = 0.2) -> Series:
"""SAR stands for “stop and reverse,” which is the actual indicator used in the system.
SAR trails price as the trend extends over time. The indicator is below prices when prices are rising and above prices when prices are falling.
In this regard, the indicator stops and reverses when the price trend reverses and breaks above or below the indicator."""
high, low = ohlc.high, ohlc.low
# Starting values
sig0, xpt0, af0 = True, high[0], af
_sar = [low[0] - (high - low).std()]
for i in range(1, len(ohlc)):
sig1, xpt1, af1 = sig0, xpt0, af0
lmin = min(low[i - 1], low[i])
lmax = max(high[i - 1], high[i])
if sig1:
sig0 = low[i] > _sar[-1]
xpt0 = max(lmax, xpt1)
else:
sig0 = high[i] >= _sar[-1]
xpt0 = min(lmin, xpt1)
if sig0 == sig1:
sari = _sar[-1] + (xpt1 - _sar[-1]) * af1
af0 = min(amax, af1 + af)
if sig0:
af0 = af0 if xpt0 > xpt1 else af1
sari = min(sari, lmin)
else:
af0 = af0 if xpt0 < xpt1 else af1
sari = max(sari, lmax)
else:
af0 = af
sari = xpt0
_sar.append(sari)
return pd.Series(_sar, index=ohlc.index)
@classmethod
def PSAR(cls, ohlc: DataFrame, iaf: int = 0.02, maxaf: int = 0.2) -> DataFrame:
"""
The parabolic SAR indicator, developed by <NAME>, is used by traders to determine trend direction and potential reversals in price.
The indicator uses a trailing stop and reverse method called "SAR," or stop and reverse, to identify suitable exit and entry points.
Traders also refer to the indicator as the parabolic stop and reverse, parabolic SAR, or PSAR.
https://www.investopedia.com/terms/p/parabolicindicator.asp
https://virtualizedfrog.wordpress.com/2014/12/09/parabolic-sar-implementation-in-python/
"""
length = len(ohlc)
high, low, close = ohlc.high, ohlc.low, ohlc.close
psar = close[0 : len(close)]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
hp = high[0]
lp = low[0]
for i in range(2, length):
if bull:
psar[i] = psar[i - 1] + af * (hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af * (lp - psar[i - 1])
reverse = False
if bull:
if low[i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = low[i]
af = iaf
else:
if high[i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = high[i]
af = iaf
if not reverse:
if bull:
if high[i] > hp:
hp = high[i]
af = min(af + iaf, maxaf)
if low[i - 1] < psar[i]:
psar[i] = low[i - 1]
if low[i - 2] < psar[i]:
psar[i] = low[i - 2]
else:
if low[i] < lp:
lp = low[i]
af = min(af + iaf, maxaf)
if high[i - 1] > psar[i]:
psar[i] = high[i - 1]
if high[i - 2] > psar[i]:
psar[i] = high[i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
psar = pd.Series(psar, name="psar", index=ohlc.index)
psarbear = pd.Series(psarbull, name="psarbull", index=ohlc.index)
psarbull = pd.Series(psarbear, name="psarbear", index=ohlc.index)
return pd.concat([psar, psarbull, psarbear], axis=1)
@classmethod
def BBANDS(
cls,
ohlc: DataFrame,
period: int = 20,
MA: Series = None,
column: str = "close",
std_multiplier: float = 2,
) -> DataFrame:
"""
Developed by <NAME>, Bollinger Bands® are volatility bands placed above and below a moving average.
Volatility is based on the standard deviation, which changes as volatility increases and decreases.
The bands automatically widen when volatility increases and narrow when volatility decreases.
This method allows input of some other form of moving average like EMA or KAMA around which BBAND will be formed.
Pass desired moving average as <MA> argument. For example BBANDS(MA=TA.KAMA(20)).
"""
std = ohlc[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(cls.SMA(ohlc, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
@classmethod
def MOBO(
cls,
ohlc: DataFrame,
period: int = 10,
std_multiplier: float = 0.8,
column: str = "close",
) -> DataFrame:
"""
"MOBO bands are based on a zone of 0.80 standard deviation with a 10 period look-back"
If the price breaks out of the MOBO band it can signify a trend move or price spike
Contains 42% of price movements(noise) within bands.
"""
BB = TA.BBANDS(ohlc, period=10, std_multiplier=0.8, column=column)
return BB
@classmethod
def BBWIDTH(
cls, ohlc: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
"""Bandwidth tells how wide the Bollinger Bands are on a normalized basis."""
BB = TA.BBANDS(ohlc, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
@classmethod
def PERCENT_B(
cls, ohlc: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
"""
%b (pronounced 'percent b') is derived from the formula for Stochastics and shows where price is in relation to the bands.
%b equals 1 at the upper band and 0 at the lower band.
"""
BB = TA.BBANDS(ohlc, period, MA, column)
percent_b = pd.Series(
(ohlc["close"] - BB["BB_LOWER"]) / (BB["BB_UPPER"] - BB["BB_LOWER"]),
name="%b",
)
return percent_b
@classmethod
def KC(
cls,
ohlc: DataFrame,
period: int = 20,
atr_period: int = 10,
MA: Series = None,
kc_mult: float = 2,
) -> DataFrame:
"""Keltner Channels [KC] are volatility-based envelopes set above and below an exponential moving average.
This indicator is similar to Bollinger Bands, which use the standard deviation to set the bands.
Instead of using the standard deviation, Keltner Channels use the Average True Range (ATR) to set channel distance.
The channels are typically set two Average True Range values above and below the 20-day EMA.
The exponential moving average dictates direction and the Average True Range sets channel width.
Keltner Channels are a trend following indicator used to identify reversals with channel breakouts and channel direction.
Channels can also be used to identify overbought and oversold levels when the trend is flat."""
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(cls.EMA(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * cls.ATR(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * cls.ATR(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
@classmethod
def DO(
cls, ohlc: DataFrame, upper_period: int = 20, lower_period: int = 5
) -> DataFrame:
"""Donchian Channel, a moving average indicator developed by <NAME>.
It plots the highest high and lowest low over the last period time intervals."""
upper = pd.Series(
ohlc["high"].rolling(center=False, window=upper_period).max(), name="UPPER"
)
lower = pd.Series(
ohlc["low"].rolling(center=False, window=lower_period).min(), name="LOWER"
)
middle = pd.Series((upper + lower) / 2, name="MIDDLE")
return pd.concat([lower, middle, upper], axis=1)
@classmethod
def DMI(cls, ohlc: DataFrame, period: int = 14, adjust: bool = True) -> DataFrame:
"""The directional movement indicator (also known as the directional movement index - DMI) is a valuable tool
for assessing price direction and strength. This indicator was created in 1978 by <NAME>, who also created the popular
relative strength index. DMI tells you when to be long or short.
It is especially useful for trend trading strategies because it differentiates between strong and weak trends,
allowing the trader to enter only the strongest trends.
source: https://www.tradingview.com/wiki/Directional_Movement_(DMI)#CALCULATION
:period: Specifies the number of Periods used for DMI calculation
"""
ohlc["up_move"] = ohlc["high"].diff()
ohlc["down_move"] = -ohlc["low"].diff()
# positive Dmi
def _dmp(row):
if row["up_move"] > row["down_move"] and row["up_move"] > 0:
return row["up_move"]
else:
return 0
# negative Dmi
def _dmn(row):
if row["down_move"] > row["up_move"] and row["down_move"] > 0:
return row["down_move"]
else:
return 0
ohlc["plus"] = ohlc.apply(_dmp, axis=1)
ohlc["minus"] = ohlc.apply(_dmn, axis=1)
diplus = pd.Series(
100
* (ohlc["plus"] / cls.ATR(ohlc, period))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="DI+",
)
diminus = pd.Series(
100
* (ohlc["minus"] / cls.ATR(ohlc, period))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="DI-",
)
return pd.concat([diplus, diminus], axis=1)
@classmethod
def ADX(cls, ohlc: DataFrame, period: int = 14, adjust: bool = True) -> Series:
"""The A.D.X. is 100 * smoothed moving average of absolute value (DMI +/-) divided by (DMI+ + DMI-). ADX does not indicate trend direction or momentum,
only trend strength. Generally, A.D.X. readings below 20 indicate trend weakness,
and readings above 40 indicate trend strength. An extremely strong trend is indicated by readings above 50"""
dmi = cls.DMI(ohlc, period)
return pd.Series(
100
* (abs(dmi["DI+"] - dmi["DI-"]) / (dmi["DI+"] + dmi["DI-"]))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="{0} period ADX.".format(period),
)
@classmethod
def PIVOT(cls, ohlc: DataFrame) -> DataFrame:
"""
Pivot Points are significant support and resistance levels that can be used to determine potential trades.
The pivot points come as a technical analysis indicator calculated using a financial instrument’s high, low, and close value.
The pivot point’s parameters are usually taken from the previous day’s trading range.
This means you’ll have to use the previous day’s range for today’s pivot points.
Or, last week’s range if you want to calculate weekly pivot points or, last month’s range for monthly pivot points and so on.
"""
df = ohlc.shift() # pivot is calculated of the previous trading session
pivot = pd.Series(cls.TP(df), name="pivot") # pivot is basically a lagging TP
s1 = (pivot * 2) - df["high"]
s2 = pivot - (df["high"] - df["low"])
s3 = df["low"] - (2 * (df["high"] - pivot))
s4 = df["low"] - (3 * (df["high"] - pivot))
r1 = (pivot * 2) - df["low"]
r2 = pivot + (df["high"] - df["low"])
r3 = df["high"] + (2 * (pivot - df["low"]))
r4 = df["high"] + (3 * (pivot - df["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
@classmethod
def PIVOT_FIB(cls, ohlc: DataFrame) -> DataFrame:
"""
Fibonacci pivot point levels are determined by first calculating the classic pivot point,
then multiply the previous day’s range with its corresponding Fibonacci level.
Most traders use the 38.2%, 61.8% and 100% retracements in their calculations.
"""
df = ohlc.shift()
pp = pd.Series(cls.TP(df), name="pivot") # classic pivot
r4 = pp + ((df["high"] - df["low"]) * 1.382)
r3 = pp + ((df["high"] - df["low"]) * 1)
r2 = pp + ((df["high"] - df["low"]) * 0.618)
r1 = pp + ((df["high"] - df["low"]) * 0.382)
s1 = pp - ((df["high"] - df["low"]) * 0.382)
s2 = pp - ((df["high"] - df["low"]) * 0.618)
s3 = pp - ((df["high"] - df["low"]) * 1)
s4 = pp - ((df["high"] - df["low"]) * 1.382)
return pd.concat(
[
pp,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
@classmethod
def STOCH(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Stochastic oscillator %K
The stochastic oscillator is a momentum indicator comparing the closing price of a security
to the range of its prices over a certain period of time.
The sensitivity of the oscillator to market movements is reducible by adjusting that time
period or by taking a moving average of the result.
"""
highest_high = ohlc["high"].rolling(center=False, window=period).max()
lowest_low = ohlc["low"].rolling(center=False, window=period).min()
STOCH = pd.Series(
(ohlc["close"] - lowest_low) / (highest_high - lowest_low) * 100,
name="{0} period STOCH %K".format(period),
)
return STOCH
@classmethod
def STOCHD(cls, ohlc: DataFrame, period: int = 3, stoch_period: int = 14) -> Series:
"""Stochastic oscillator %D
STOCH%D is a 3 period simple moving average of %K.
"""
return pd.Series(
cls.STOCH(ohlc, stoch_period).rolling(center=False, window=period).mean(),
name="{0} period STOCH %D.".format(period),
)
@classmethod
def STOCHRSI(
cls, ohlc: DataFrame, rsi_period: int = 14, stoch_period: int = 14
) -> Series:
"""StochRSI is an oscillator that measures the level of RSI relative to its high-low range over a set time period.
StochRSI applies the Stochastics formula to RSI values, instead of price values. This makes it an indicator of an indicator.
The result is an oscillator that fluctuates between 0 and 1."""
rsi = cls.RSI(ohlc, rsi_period)
return pd.Series(
((rsi - rsi.min()) / (rsi.max() - rsi.min()))
.rolling(window=stoch_period)
.mean(),
name="{0} period stochastic RSI.".format(rsi_period),
)
@classmethod
def WILLIAMS(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Williams %R, or just %R, is a technical analysis oscillator showing the current closing price in relation to the high and low
of the past N days (for a given N). It was developed by a publisher and promoter of trading materials, <NAME>.
Its purpose is to tell whether a stock or commodity market is trading near the high or the low, or somewhere in between,
of its recent trading range.
The oscillator is on a negative scale, from −100 (lowest) up to 0 (highest).
"""
highest_high = ohlc["high"].rolling(center=False, window=period).max()
lowest_low = ohlc["low"].rolling(center=False, window=period).min()
WR = pd.Series(
(highest_high - ohlc["close"]) / (highest_high - lowest_low),
name="{0} Williams %R".format(period),
)
return WR * -100
@classmethod
def UO(cls, ohlc: DataFrame, column: str = "close") -> Series:
"""Ultimate Oscillator is a momentum oscillator designed to capture momentum across three different time frames.
The multiple time frame objective seeks to avoid the pitfalls of other oscillators.
Many momentum oscillators surge at the beginning of a strong advance and then form bearish divergence as the advance continues.
This is because they are stuck with one time frame. The Ultimate Oscillator attempts to correct this fault by incorporating longer
time frames into the basic formula."""
k = [] # current low or past close
for row, _row in zip(ohlc.itertuples(), ohlc.shift(1).itertuples()):
k.append(min(row.low, _row.close))
bp = pd.Series(ohlc[column] - k, name="bp") # Buying pressure
Average7 = bp.rolling(window=7).sum() / cls.TR(ohlc).rolling(window=7).sum()
Average14 = bp.rolling(window=14).sum() / cls.TR(ohlc).rolling(window=14).sum()
Average28 = bp.rolling(window=28).sum() / cls.TR(ohlc).rolling(window=28).sum()
return pd.Series(
(100 * ((4 * Average7) + (2 * Average14) + Average28)) / (4 + 2 + 1)
)
@classmethod
def AO(cls, ohlc: DataFrame, slow_period: int = 34, fast_period: int = 5) -> Series:
"""'EMA',
Awesome Oscillator is an indicator used to measure market momentum. AO calculates the difference of a 34 Period and 5 Period Simple Moving Averages.
The Simple Moving Averages that are used are not calculated using closing price but rather each bar's midpoints.
AO is generally used to affirm trends or to anticipate possible reversals. """
slow = pd.Series(
((ohlc["high"] + ohlc["low"]) / 2).rolling(window=slow_period).mean(),
name="slow_AO",
)
fast = pd.Series(
((ohlc["high"] + ohlc["low"]) / 2).rolling(window=fast_period).mean(),
name="fast_AO",
)
return pd.Series(fast - slow, name="AO")
@classmethod
def MI(cls, ohlc: DataFrame, period: int = 9, adjust: bool = True) -> Series:
"""Developed by <NAME>, the Mass Index uses the high-low range to identify trend reversals based on range expansions.
In this sense, the Mass Index is a volatility indicator that does not have a directional bias.
Instead, the Mass Index identifies range bulges that can foreshadow a reversal of the current trend."""
_range = pd.Series(ohlc["high"] - ohlc["low"], name="range")
EMA9 = _range.ewm(span=period, ignore_na=False, adjust=adjust).mean()
DEMA9 = EMA9.ewm(span=period, ignore_na=False, adjust=adjust).mean()
mass = EMA9 / DEMA9
return pd.Series(mass.rolling(window=25).sum(), name="Mass Index")
@classmethod
def BOP(cls, ohlc: DataFrame) -> Series:
"""Balance Of Power indicator"""
return pd.Series(
(ohlc.close - ohlc.open) / (ohlc.high - ohlc.low), name="Balance Of Power"
)
@classmethod
def VORTEX(cls, ohlc: DataFrame, period: int = 14) -> DataFrame:
"""The Vortex indicator plots two oscillating lines, one to identify positive trend movement and the other
to identify negative price movement.
Indicator construction revolves around the highs and lows of the last two days or periods.
The distance from the current high to the prior low designates positive trend movement while the
distance between the current low and the prior high designates negative trend movement.
Strongly positive or negative trend movements will show a longer length between the two numbers while
weaker positive or negative trend movement will show a shorter length."""
VMP = pd.Series((ohlc["high"] - ohlc["low"].shift()).abs())
VMM = pd.Series((ohlc["low"] - ohlc["high"].shift()).abs())
VMPx = VMP.rolling(window=period).sum()
VMMx = VMM.rolling(window=period).sum()
TR = cls.TR(ohlc).rolling(window=period).sum()
VIp = pd.Series(VMPx / TR, name="VIp").interpolate(method="index")
VIm = pd.Series(VMMx / TR, name="VIm").interpolate(method="index")
return pd.concat([VIm, VIp], axis=1)
@classmethod
def KST(
cls, ohlc: DataFrame, r1: int = 10, r2: int = 15, r3: int = 20, r4: int = 30
) -> DataFrame:
"""Know Sure Thing (KST) is a momentum oscillator based on the smoothed rate-of-change for four different time frames.
KST measures price momentum for four different price cycles. It can be used just like any momentum oscillator.
Chartists can look for divergences, overbought/oversold readings, signal line crossovers and centerline crossovers."""
r1 = cls.ROC(ohlc, r1).rolling(window=10).mean()
r2 = cls.ROC(ohlc, r2).rolling(window=10).mean()
r3 = cls.ROC(ohlc, r3).rolling(window=10).mean()
r4 = cls.ROC(ohlc, r4).rolling(window=15).mean()
k = pd.Series((r1 * 1) + (r2 * 2) + (r3 * 3) + (r4 * 4), name="KST")
signal = pd.Series(k.rolling(window=10).mean(), name="signal")
return pd.concat([k, signal], axis=1)
@classmethod
def TSI(
cls,
ohlc: DataFrame,
long: int = 25,
short: int = 13,
signal: int = 13,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""True Strength Index (TSI) is a momentum oscillator based on a double smoothing of price changes."""
## Double smoother price change
momentum = pd.Series(ohlc[column].diff()) ## 1 period momentum
_EMA25 = pd.Series(
momentum.ewm(span=long, min_periods=long - 1, adjust=adjust).mean(),
name="_price change EMA25",
)
_DEMA13 = pd.Series(
_EMA25.ewm(span=short, min_periods=short - 1, adjust=adjust).mean(),
name="_price change double smoothed DEMA13",
)
## Double smoothed absolute price change
absmomentum = pd.Series(ohlc[column].diff().abs())
_aEMA25 = pd.Series(
absmomentum.ewm(span=long, min_periods=long - 1, adjust=adjust).mean(),
name="_abs_price_change EMA25",
)
_aDEMA13 = pd.Series(
_aEMA25.ewm(span=short, min_periods=short - 1, adjust=adjust).mean(),
name="_abs_price_change double smoothed DEMA13",
)
TSI = pd.Series((_DEMA13 / _aDEMA13) * 100, name="TSI")
signal = pd.Series(
TSI.ewm(span=signal, min_periods=signal - 1, adjust=adjust).mean(),
name="signal",
)
return pd.concat([TSI, signal], axis=1)
@classmethod
def TP(cls, ohlc: DataFrame) -> Series:
"""Typical Price refers to the arithmetic average of the high, low, and closing prices for a given period."""
return pd.Series((ohlc["high"] + ohlc["low"] + ohlc["close"]) / 3, name="TP")
@classmethod
def ADL(cls, ohlcv: DataFrame) -> Series:
"""The accumulation/distribution line was created by <NAME> to determine the flow of money into or out of a security.
It should not be confused with the advance/decline line. While their initials might be the same, these are entirely different indicators,
and their uses are different as well. Whereas the advance/decline line can provide insight into market movements,
the accumulation/distribution line is of use to traders looking to measure buy/sell pressure on a security or confirm the strength of a trend."""
MFM = pd.Series(
((ohlcv["close"] - ohlcv["low"])
- (ohlcv["high"] - ohlcv["close"])) / (ohlcv["high"] - ohlcv["low"]),
name="MFM",
) # Money flow multiplier
MFV = pd.Series(MFM * ohlcv["volume"], name="MFV")
return MFV.cumsum()
@classmethod
def CHAIKIN(cls, ohlcv: DataFrame, adjust: bool = True) -> Series:
"""Chaikin Oscillator, named after its creator, <NAME>, the Chaikin oscillator is an oscillator that measures the accumulation/distribution
line of the moving average convergence divergence (MACD). The Chaikin oscillator is calculated by subtracting a 10-day exponential moving average (EMA)
of the accumulation/distribution line from a three-day EMA of the accumulation/distribution line, and highlights the momentum implied by the
accumulation/distribution line."""
return pd.Series(
cls.ADL(ohlcv).ewm(span=3, min_periods=2, adjust=adjust).mean()
- cls.ADL(ohlcv).ewm(span=10, min_periods=9, adjust=adjust).mean()
)
@classmethod
def MFI(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""The money flow index (MFI) is a momentum indicator that measures
the inflow and outflow of money into a security over a specific period of time.
MFI can be understood as RSI adjusted for volume.
The money flow indicator is one of the more reliable indicators of overbought and oversold conditions, perhaps partly because
it uses the higher readings of 80 and 20 as compared to the RSI's overbought/oversold readings of 70 and 30"""
tp = cls.TP(ohlc)
rmf = pd.Series(tp * ohlc["volume"], name="rmf") ## Real Money Flow
_mf = pd.concat([tp, rmf], axis=1)
_mf["delta"] = _mf["TP"].diff()
def pos(row):
if row["delta"] > 0:
return row["rmf"]
else:
return 0
def neg(row):
if row["delta"] < 0:
return row["rmf"]
else:
return 0
_mf["neg"] = _mf.apply(neg, axis=1)
_mf["pos"] = _mf.apply(pos, axis=1)
mfratio = pd.Series(
_mf["pos"].rolling(window=period).sum()
/ _mf["neg"].rolling(window=period).sum()
)
return pd.Series(
100 - (100 / (1 + mfratio)), name="{0} period MFI".format(period)
)
@classmethod
def OBV(cls, ohlcv: DataFrame, column: str = "close") -> Series:
"""
On Balance Volume (OBV) measures buying and selling pressure as a cumulative indicator that adds volume on up days and subtracts volume on down days.
OBV was developed by <NAME> and introduced in his 1963 book, Granville's New Key to Stock Market Profits.
It was one of the first indicators to measure positive and negative volume flow.
Chartists can look for divergences between OBV and price to predict price movements or use OBV to confirm price trends.
source: https://en.wikipedia.org/wiki/On-balance_volume#The_formula
:param pd.DataFrame ohlc: 'open, high, low, close' pandas DataFrame
:return pd.Series: result is pandas.Series
"""
ohlcv["OBV"] = np.nan
neg_change = ohlcv[column] < ohlcv[column].shift(1)
pos_change = ohlcv[column] > ohlcv[column].shift(1)
if pos_change.any():
ohlcv.loc[pos_change, "OBV"] = ohlcv["volume"]
if neg_change.any():
ohlcv.loc[neg_change, "OBV"] = -ohlcv["volume"]
return pd.Series(ohlcv["OBV"].cumsum(), name="OBV")
@classmethod
def WOBV(cls, ohlcv: DataFrame, column: str = "close") -> Series:
"""
Weighted OBV
Can also be seen as an OBV indicator that takes the price differences into account.
In a regular OBV, a high volume bar can make a huge difference,
even if the price went up only 0.01, and it it goes down 0.01
instead, that huge volume makes the OBV go down, even though
hardly anything really happened.
"""
wobv = pd.Series(ohlcv["volume"] * ohlcv[column].diff(), name="WOBV")
return wobv.cumsum()
@classmethod
def VZO(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""VZO uses price, previous price and moving averages to compute its oscillating value.
It is a leading indicator that calculates buy and sell signals based on oversold / overbought conditions.
Oscillations between the 5% and 40% levels mark a bullish trend zone, while oscillations between -40% and 5% mark a bearish trend zone.
Meanwhile, readings above 40% signal an overbought condition, while readings above 60% signal an extremely overbought condition.
Alternatively, readings below -40% indicate an oversold condition, which becomes extremely oversold below -60%."""
sign = lambda a: (a > 0) - (a < 0)
r = ohlc[column].diff().apply(sign) * ohlc["volume"]
dvma = r.ewm(span=period, adjust=adjust).mean()
vma = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
return pd.Series(100 * (dvma / vma), name="VZO")
@classmethod
def PZO(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
The formula for PZO depends on only one condition: if today's closing price is higher than yesterday's closing price,
then the closing price will have a positive value (bullish); otherwise it will have a negative value (bearish).
source: http://traders.com/Documentation/FEEDbk_docs/2011/06/Khalil.html
:period: Specifies the number of Periods used for PZO calculation
"""
sign = lambda a: (a > 0) - (a < 0)
r = ohlc[column].diff().apply(sign) * ohlc[column]
cp = pd.Series(r.ewm(span=period, adjust=adjust).mean())
tc = cls.EMA(ohlc, period)
return pd.Series(100 * (cp / tc), name="{} period PZO".format(period))
@classmethod
def EFI(
cls,
ohlcv: DataFrame,
period: int = 13,
column: str = "close",
adjust: bool = True,
) -> Series:
"""Elder's Force Index is an indicator that uses price and volume to assess the power
behind a move or identify possible turning points."""
# https://tradingsim.com/blog/elders-force-index/
fi = pd.Series(ohlcv[column].diff() * ohlcv["volume"])
return pd.Series(
fi.ewm(ignore_na=False, span=period, adjust=adjust).mean(),
name="{0} period Force Index".format(period),
)
@classmethod
def CFI(
cls, ohlcv: DataFrame, column: str = "close", adjust: bool = True
) -> Series:
"""
Cummulative Force Index.
Adopted from Elder's Force Index.
"""
fi1 = pd.Series(ohlcv["volume"] * ohlcv[column].diff())
cfi = pd.Series(
fi1.ewm(ignore_na=False, min_periods=9, span=10, adjust=adjust).mean(),
name="CFI",
)
return cfi.cumsum()
@classmethod
def EBBP(cls, ohlc: DataFrame) -> DataFrame:
"""Bull power and bear power by Dr. <NAME> show where today’s high and low lie relative to the a 13-day EMA"""
bull_power = pd.Series(ohlc["high"] - cls.EMA(ohlc, 13), name="Bull.")
bear_power = pd.Series(ohlc["low"] - cls.EMA(ohlc, 13), name="Bear.")
return pd.concat([bull_power, bear_power], axis=1)
@classmethod
def EMV(cls, ohlcv: Series, period: int = 14) -> Series:
"""Ease of Movement (EMV) is a volume-based oscillator that fluctuates above and below the zero line.
As its name implies, it is designed to measure the 'ease' of price movement.
prices are advancing with relative ease when the oscillator is in positive territory.
Conversely, prices are declining with relative ease when the oscillator is in negative territory."""
distance = pd.Series(
((ohlcv["high"] + ohlcv["low"]) / 2)
- (ohlcv["high"].shift() + ohlcv["low"].shift()) / 2
)
box_ratio = pd.Series(
(ohlcv["volume"] / 1000000) / (ohlcv["high"] - ohlcv["low"])
)
_emv = pd.Series(distance / box_ratio)
return pd.Series(
_emv.rolling(window=period).mean(), name="{0} period EMV.".format(period)
)
@classmethod
def CCI(cls, ohlc: DataFrame, period: int = 20, constant: float = 0.015) -> Series:
"""Commodity Channel Index (CCI) is a versatile indicator that can be used to identify a new trend or warn of extreme conditions.
CCI measures the current price level relative to an average price level over a given period of time.
The CCI typically oscillates above and below a zero line. Normal oscillations will occur within the range of +100 and −100.
Readings above +100 imply an overbought condition, while readings below −100 imply an oversold condition.
As with other overbought/oversold indicators, this means that there is a large probability that the price will correct to more representative levels.
source: https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
:param pd.DataFrame ohlc: 'open, high, low, close' pandas DataFrame
:period: int - number of periods to take into consideration
:factor float: the constant at .015 to ensure that approximately 70 to 80 percent of CCI values would fall between -100 and +100.
:return pd.Series: result is pandas.Series
"""
tp = cls.TP(ohlc)
tp_rolling = tp.rolling(window=period, min_periods=0)
# calculate MAD (Mean Deviation)
# https://www.khanacademy.org/math/statistics-probability/summarizing-quantitative-data/other-measures-of-spread/a/mean-absolute-deviation-mad-review
mad = tp_rolling.apply(lambda s: abs(s - s.mean()).mean(), raw=True)
return pd.Series(
(tp - tp_rolling.mean()) / (constant * mad),
name="{0} period CCI".format(period),
)
@classmethod
def COPP(cls, ohlc: DataFrame, adjust: bool = True) -> Series:
"""The Coppock Curve is a momentum indicator, it signals buying opportunities when the indicator moved from negative territory to positive territory."""
roc1 = cls.ROC(ohlc, 14)
roc2 = cls.ROC(ohlc, 11)
return pd.Series(
(roc1 + roc2).ewm(span=10, min_periods=9, adjust=adjust).mean(),
name="Coppock Curve",
)
@classmethod
def BASP(cls, ohlc: DataFrame, period: int = 40, adjust: bool = True) -> DataFrame:
"""BASP indicator serves to identify buying and selling pressure."""
sp = ohlc["high"] - ohlc["close"]
bp = ohlc["close"] - ohlc["low"]
spavg = sp.ewm(span=period, adjust=adjust).mean()
bpavg = bp.ewm(span=period, adjust=adjust).mean()
nbp = bp / bpavg
nsp = sp / spavg
varg = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
nv = ohlc["volume"] / varg
nbfraw = pd.Series(nbp * nv, name="Buy.")
nsfraw = pd.Series(nsp * nv, name="Sell.")
return pd.concat([nbfraw, nsfraw], axis=1)
@classmethod
def BASPN(cls, ohlc: DataFrame, period: int = 40, adjust: bool = True) -> DataFrame:
"""
Normalized BASP indicator
"""
sp = ohlc["high"] - ohlc["close"]
bp = ohlc["close"] - ohlc["low"]
spavg = sp.ewm(span=period, adjust=adjust).mean()
bpavg = bp.ewm(span=period, adjust=adjust).mean()
nbp = bp / bpavg
nsp = sp / spavg
varg = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
nv = ohlc["volume"] / varg
nbf = pd.Series((nbp * nv).ewm(span=20, adjust=adjust).mean(), name="Buy.")
nsf = pd.Series((nsp * nv).ewm(span=20, adjust=adjust).mean(), name="Sell.")
return pd.concat([nbf, nsf], axis=1)
@classmethod
def CMO(
cls,
ohlc: DataFrame,
period: int = 9,
factor: int = 100,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
Chande Momentum Oscillator (CMO) - technical momentum indicator invented by the technical analyst <NAME>.
It is created by calculating the difference between the sum of all recent gains and the sum of all recent losses and then
dividing the result by the sum of all price movement over the period.
This oscillator is similar to other momentum indicators such as the Relative Strength Index and the Stochastic Oscillator
because it is range bounded (+100 and -100)."""
# get the price diff
delta = ohlc[column].diff()
# positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# EMAs of ups and downs
_gain = up.ewm(com=period, adjust=adjust).mean()
_loss = down.ewm(com=period, adjust=adjust).mean().abs()
return pd.Series(factor * ((_gain - _loss) / (_gain + _loss)), name="CMO")
@classmethod
def CHANDELIER(
cls,
ohlc: DataFrame,
short_period: int = 22,
long_period: int = 22,
k: int = 3,
) -> DataFrame:
"""
Chandelier Exit sets a trailing stop-loss based on the Average True Range (ATR).
The indicator is designed to keep traders in a trend and prevent an early exit as long as the trend extends.
Typically, the Chandelier Exit will be above prices during a downtrend and below prices during an uptrend.
"""
l = pd.Series(
ohlc["high"].rolling(window=long_period).max() - cls.ATR(ohlc, 22) * k,
name="Long.",
)
s = pd.Series(
ohlc["low"].rolling(window=short_period).min() + cls.ATR(ohlc, 22) * k,
name="Short.",
)
return pd.concat([s, l], axis=1)
@classmethod
def QSTICK(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""
QStick indicator shows the dominance of black (down) or white (up) candlesticks, which are red and green in Chart,
as represented by the average open to close change for each of past N days."""
_close = ohlc["close"].tail(period)
_open = ohlc["open"].tail(period)
return pd.Series(
(_close - _open) / period, name="{0} period QSTICK.".format(period)
)
@classmethod
def TMF(cls, ohlcv: DataFrame, period: int = 21) -> Series:
"""Indicator by <NAME> which improves upon CMF.
source: https://user42.tuxfamily.org/chart/manual/Twiggs-Money-Flow.html"""
ohlcv["ll"] = [min(l, c) for l, c in zip(ohlcv["low"], ohlcv["close"].shift(1))]
ohlcv["hh"] = [
max(h, c) for h, c in zip(ohlcv["high"], ohlcv["close"].shift(1))
]
ohlcv["range"] = (
2 * ((ohlcv["close"] - ohlcv["ll"]) / (ohlcv["hh"] - ohlcv["ll"])) - 1
)
ohlcv["rangev"] = None
# TMF Signal Line = EMA(TMF)
# return TMF
raise NotImplementedError
@classmethod
def WTO(
cls,
ohlc: DataFrame,
channel_lenght: int = 10,
average_lenght: int = 21,
adjust: bool = True,
) -> DataFrame:
"""
Wave Trend Oscillator
source: http://www.fxcoaching.com/WaveTrend/
"""
ap = cls.TP(ohlc)
esa = ap.ewm(span=channel_lenght, adjust=adjust).mean()
d = pd.Series(
(ap - esa).abs().ewm(span=channel_lenght, adjust=adjust).mean(), name="d"
)
ci = (ap - esa) / (0.015 * d)
wt1 = pd.Series(ci.ewm(span=average_lenght, adjust=adjust).mean(), name="WT1.")
wt2 = pd.Series(wt1.rolling(window=4).mean(), name="WT2.")
return pd.concat([wt1, wt2], axis=1)
@classmethod
def FISH(cls, ohlc: DataFrame, period: int = 10, adjust: bool = True) -> Series:
"""
Fisher Transform was presented by <NAME>. It assumes that price distributions behave like square waves.
"""
from numpy import log, seterr
seterr(divide="ignore")
med = (ohlc["high"] + ohlc["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
@classmethod
def ICHIMOKU(
cls,
ohlc: DataFrame,
tenkan_period: int = 9,
kijun_period: int = 26,
senkou_period: int = 52,
chikou_period: int = 26,
) -> DataFrame:
"""
The Ichimoku Cloud, also known as Ichimoku Kinko Hyo, is a versatile indicator that defines support and resistance,
identifies trend direction, gauges momentum and provides trading signals.
Ichimoku Kinko Hyo translates into “one look equilibrium chart”.
"""
tenkan_sen = pd.Series(
(
ohlc["high"].rolling(window=tenkan_period).mean()
+ ohlc["low"].rolling(window=tenkan_period).mean()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
ohlc["high"].rolling(window=kijun_period).mean()
+ ohlc["low"].rolling(window=kijun_period).mean()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) ## Leading span
senkou_span_b = pd.Series(
(
(
ohlc["high"].rolling(window=senkou_period).mean()
+ ohlc["low"].rolling(window=senkou_period).mean()
)
/ 2
),
name="SENKOU",
)
chikou_span = pd.Series(
ohlc["close"].shift(chikou_period).rolling(window=chikou_period).mean(),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
@classmethod
def APZ(
cls,
ohlc: DataFrame,
period: int = 21,
dev_factor: int = 2,
MA: Series = None,
adjust: bool = True,
) -> DataFrame:
"""
The adaptive price zone (APZ) is a technical indicator developed by <NAME>.
The APZ is a volatility based indicator that appears as a set of bands placed over a price chart.
Especially useful in non-trending, choppy markets,
the APZ was created to help traders find potential turning points in the markets.
"""
if not isinstance(MA, pd.Series):
MA = cls.DEMA(ohlc, period)
price_range = pd.Series(
(ohlc["high"] - ohlc["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
# upper_band = dev_factor * volatility_value + dema
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
@classmethod
def SQZMI(cls, ohlc: DataFrame, period: int = 20, MA: Series = None) -> DataFrame:
"""
Squeeze Momentum Indicator
The Squeeze indicator attempts to identify periods of consolidation in a market.
In general the market is either in a period of quiet consolidation or vertical price discovery.
By identifying these calm periods, we have a better opportunity of getting into trades with the potential for larger moves.
Once a market enters into a “squeeze”, we watch the overall market momentum to help forecast the market direction and await a release of market energy.
:param pd.DataFrame ohlc: 'open, high, low, close' pandas DataFrame
:period: int - number of periods to take into consideration
:MA pd.Series: override internal calculation which uses SMA with moving average of your choice
:return pd.Series: indicator calcs as pandas Series
SQZMI['SQZ'] is bool True/False, if True squeeze is on. If false, squeeeze has fired.
"""
if not isinstance(MA, pd.core.series.Series):
ma = pd.Series(cls.SMA(ohlc, period))
else:
ma = None
bb = cls.BBANDS(ohlc, period=period, MA=ma)
kc = cls.KC(ohlc, period=period, kc_mult=1.5)
comb = pd.concat([bb, kc], axis=1)
def sqz_on(row):
if row["BB_LOWER"] > row["KC_LOWER"] and row["BB_UPPER"] < row["KC_UPPER"]:
return True
else:
return False
comb["SQZ"] = comb.apply(sqz_on, axis=1)
return pd.Series(comb["SQZ"], name="{0} period SQZMI".format(period))
@classmethod
def VPT(cls, ohlc: DataFrame) -> Series:
"""
Volume Price Trend
The Volume Price Trend uses the difference of price and previous price with volume and feedback to arrive at its final form.
If there appears to be a bullish divergence of price and the VPT (upward slope of the VPT and downward slope of the price) a buy opportunity exists.
Conversely, a bearish divergence (downward slope of the VPT and upward slope of the price) implies a sell opportunity.
"""
hilow = (ohlc["high"] - ohlc["low"]) * 100
openclose = (ohlc["close"] - ohlc["open"]) * 100
vol = ohlc["volume"] / hilow
spreadvol = (openclose * vol).cumsum()
vpt = spreadvol + spreadvol
return pd.Series(vpt, name="VPT")
@classmethod
def FVE(cls, ohlc: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
"""
FVE is a money flow indicator, but it has two important innovations: first, the F VE takes into account both intra and
interday price action, and second, minimal price changes are taken into account by introducing a price threshold.
"""
hl2 = (ohlc["high"] + ohlc["low"]) / 2
tp = TA.TP(ohlc)
smav = ohlc["volume"].rolling(window=period).mean()
mf = pd.Series((ohlc["close"] - hl2 + tp.diff()), name="mf")
_mf = | pd.concat([ohlc["close"], ohlc["volume"], mf], axis=1) | pandas.concat |
import os
import pandas as pd
from scripts.common.configuration import Configuration
from scripts.common.db import DataBase
from scripts.common import periods as taxes_periods
def generate_report_periods():
configuration = Configuration()
db = DataBase(configuration.get_db_directory())
expenses = db.retrieve_expenses()
incomes = db.retrieve_incomes()
periods = taxes_periods.generate_periods(configuration.get_year())
summary = []
for d in periods:
start_date = | pd.to_datetime(d[0]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
| pd.Timedelta('5 hours') | pandas.Timedelta |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
out = dd.read_parquet(fn, engine=engine)
assert_eq(out, ddf)
check_compression(engine, fn, compression)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=["x"])
check_compression(engine, fn, compression)
@pytest.fixture(
params=[
# fastparquet 0.1.3
{
"columns": [
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.7.1
{
"columns": [
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.8.0
{
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
},
# TODO: fastparquet update
]
)
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(
pandas_metadata
)
assert index_names == ["idx"]
assert column_names == ["A"]
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata["index_columns"] == ["__index_level_0__"]:
assert mapping == {"__index_level_0__": "idx", "A": "A"}
else:
assert mapping == {"idx": "idx", "A": "A"}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ["x"]
e_mapping = {"__index_level_0__": None, "x": "x"}
e_column_index_names = [None]
md = {
"columns": [
{
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "__index_level_0__",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
# pyarrow 0.8.0 None for index
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "x",
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": None,
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
@PYARROW_MARK
def test_read_no_metadata(tmpdir, engine):
# use pyarrow.parquet to create a parquet file without
# pandas metadata
tmp = str(tmpdir) + "table.parq"
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=["A", "B"]
)
pq.write_table(table, tmp)
result = dd.read_parquet(tmp, engine=engine)
expected = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
assert_eq(result, expected)
def test_parse_pandas_metadata_duplicate_index_columns():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_parse_pandas_metadata_column_with_index_name():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_writing_parquet_with_kwargs(tmpdir, engine):
fn = str(tmpdir)
path1 = os.path.join(fn, "normal")
path2 = os.path.join(fn, "partitioned")
pytest.importorskip("snappy")
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
engine_kwargs = {
"pyarrow-dataset": {
"compression": "snappy",
"coerce_timestamps": None,
"use_dictionary": True,
},
"fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None},
}
engine_kwargs["pyarrow-legacy"] = engine_kwargs["pyarrow-dataset"]
ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])
out = dd.read_parquet(path1, engine=engine)
assert_eq(out, ddf, check_index=(engine != "fastparquet"))
# Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
with dask.config.set(scheduler="sync"):
ddf.to_parquet(
path2, engine=engine, partition_on=["a"], **engine_kwargs[engine]
)
out = dd.read_parquet(path2, engine=engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):
fn = str(tmpdir)
with pytest.raises(TypeError):
ddf.to_parquet(fn, engine=engine, unknown_key="unknown_value")
@ANY_ENGINE_MARK
def test_to_parquet_with_get(tmpdir):
from dask.multiprocessing import get as mp_get
tmpdir = str(tmpdir)
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, compute_kwargs={"scheduler": my_get})
assert flag[0]
result = dd.read_parquet(os.path.join(tmpdir, "*"))
assert_eq(result, df, check_index=False)
def test_select_partitioned_column(tmpdir, engine):
pytest.importorskip("snappy")
fn = str(tmpdir)
size = 20
d = {
"signal1": np.random.normal(0, 0.3, size=size).cumsum() + 50,
"fake_categorical1": np.random.choice(["A", "B", "C"], size=size),
"fake_categorical2": np.random.choice(["D", "E", "F"], size=size),
}
df = dd.from_pandas(pd.DataFrame(d), 2)
df.to_parquet(
fn,
compression="snappy",
write_index=False,
engine=engine,
partition_on=["fake_categorical1", "fake_categorical2"],
)
df_partitioned = dd.read_parquet(fn, engine=engine)
df_partitioned[df_partitioned.fake_categorical1 == "A"].compute()
def test_with_tz(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version < parse_version("0.3.0"):
pytest.skip("fastparquet<0.3.0 did not support this")
with warnings.catch_warnings():
if engine == "fastparquet":
# fastparquet-442
warnings.simplefilter("ignore", FutureWarning) # pandas 0.25
fn = str(tmpdir)
df = pd.DataFrame([[0]], columns=["a"], dtype="datetime64[ns, UTC]")
df = dd.from_pandas(df, 1)
df.to_parquet(fn, engine=engine)
df2 = dd.read_parquet(fn, engine=engine)
assert_eq(df, df2, check_divisions=False, check_index=False)
@PYARROW_MARK
def test_arrow_partitioning(tmpdir):
# Issue #3518
path = str(tmpdir)
data = {
"p": np.repeat(np.arange(3), 2).astype(np.int8),
"b": np.repeat(-1, 6).astype(np.int16),
"c": np.repeat(-2, 6).astype(np.float32),
"d": np.repeat(-3, 6).astype(np.float64),
}
pdf = pd.DataFrame(data)
ddf = dd.from_pandas(pdf, npartitions=2)
ddf.to_parquet(path, engine="pyarrow", write_index=False, partition_on="p")
ddf = dd.read_parquet(path, index=False, engine="pyarrow")
ddf.astype({"b": np.float32}).compute()
def test_informative_error_messages():
with pytest.raises(ValueError) as info:
dd.read_parquet("foo", engine="foo")
assert "foo" in str(info.value)
assert "arrow" in str(info.value)
assert "fastparquet" in str(info.value)
def test_append_cat_fp(tmpdir, engine):
path = str(tmpdir)
# https://github.com/dask/dask/issues/4120
df = pd.DataFrame({"x": ["a", "a", "b", "a", "b"]})
df["x"] = df["x"].astype("category")
ddf = dd.from_pandas(df, npartitions=1)
dd.to_parquet(ddf, path, engine=engine)
dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)
d = dd.read_parquet(path, engine=engine).compute()
assert d["x"].tolist() == ["a", "a", "b", "a", "b"] * 2
@PYARROW_MARK
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]}),
pd.DataFrame({"x": ["c", "a", "b"]}),
pd.DataFrame({"x": ["cc", "a", "bbb"]}),
pd.DataFrame({"x": [b"a", b"b", b"c"]}),
pytest.param(pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])})),
pytest.param(pd.DataFrame({"x": pd.Categorical([1, 2, 1])})),
pd.DataFrame({"x": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms
pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us
pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"),
# pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"),
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"),
pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"),
pd.DataFrame({"x": [3, 2, 1]}).astype("float32"),
pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]),
pd.DataFrame(
{"x": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name="foo")
),
pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}),
pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]),
pd.DataFrame({"0": [3, 2, 1]}),
pd.DataFrame({"x": [3, 2, None]}),
pd.DataFrame({"-": [3.0, 2.0, None]}),
pd.DataFrame({".": [3.0, 2.0, None]}),
pd.DataFrame({" ": [3.0, 2.0, None]}),
],
)
def test_roundtrip_arrow(tmpdir, df):
# Index will be given a name when preserved as index
tmp_path = str(tmpdir)
if not df.index.name:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp_path, engine="pyarrow", write_index=True)
ddf2 = dd.read_parquet(tmp_path, engine="pyarrow", gather_statistics=True)
assert_eq(ddf, ddf2)
def test_datasets_timeseries(tmpdir, engine):
tmp_path = str(tmpdir)
df = dask.datasets.timeseries(
start="2000-01-01", end="2000-01-10", freq="1d"
).persist()
df.to_parquet(tmp_path, engine=engine)
df2 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, df2)
def test_pathlib_path(tmpdir, engine):
import pathlib
df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
path = pathlib.Path(str(tmpdir))
ddf.to_parquet(path, engine=engine)
ddf2 = dd.read_parquet(path, engine=engine)
assert_eq(ddf, ddf2)
@PYARROW_LE_MARK
def test_pyarrow_metadata_nthreads(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmp_path, engine="pyarrow")
ops = {"dataset": {"metadata_nthreads": 2}}
ddf2 = dd.read_parquet(tmp_path, engine="pyarrow-legacy", **ops)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_categories_large(tmpdir, engine):
# Issue #5112
fn = str(tmpdir.join("parquet_int16.parq"))
numbers = np.random.randint(0, 800000, size=1000000)
df = pd.DataFrame(numbers.T, columns=["name"])
df.name = df.name.astype("category")
df.to_parquet(fn, engine="fastparquet", compression="uncompressed")
ddf = dd.read_parquet(fn, engine=engine, categories={"name": 80000})
assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))
@write_read_engines()
def test_read_glob_no_meta(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"), engine=read_engine, gather_statistics=False
)
assert_eq(ddf, ddf2, check_divisions=False)
@write_read_engines()
def test_read_glob_yes_meta(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
paths = glob.glob(os.path.join(tmp_path, "*.parquet"))
paths.append(os.path.join(tmp_path, "_metadata"))
ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)
assert_eq(ddf, ddf2, check_divisions=False)
@pytest.mark.parametrize("statistics", [True, False, None])
@pytest.mark.parametrize("remove_common", [True, False])
@write_read_engines()
def test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
if remove_common and os.path.exists(os.path.join(tmp_path, "_common_metadata")):
os.unlink(os.path.join(tmp_path, "_common_metadata"))
ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)
assert_eq(ddf, ddf2, check_divisions=False)
assert ddf.divisions == tuple(range(0, 420, 30))
if statistics is False or statistics is None and read_engine.startswith("pyarrow"):
assert ddf2.divisions == (None,) * 14
else:
assert ddf2.divisions == tuple(range(0, 420, 30))
@write_read_engines()
def test_statistics_nometa(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine, write_metadata_file=False)
ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=True)
assert_eq(ddf, ddf2)
assert ddf.divisions == tuple(range(0, 420, 30))
assert ddf2.divisions == tuple(range(0, 420, 30))
@pytest.mark.parametrize("schema", ["infer", None])
def test_timeseries_nulls_in_schema(tmpdir, engine, schema):
# GH#5608: relative path failing _metadata/_common_metadata detection.
tmp_path = str(tmpdir.mkdir("files"))
tmp_path = os.path.join(tmp_path, "../", "files")
ddf2 = (
dask.datasets.timeseries(start="2000-01-01", end="2000-01-03", freq="1h")
.reset_index()
.map_partitions(lambda x: x.loc[:5])
)
ddf2 = ddf2.set_index("x").reset_index().persist()
ddf2.name = ddf2.name.where(ddf2.timestamp == "2000-01-01", None)
# Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write
dataset = {"validate_schema": False} if engine == "pyarrow-legacy" else {}
ddf2.to_parquet(tmp_path, engine=engine, write_metadata_file=False, schema=schema)
ddf_read = dd.read_parquet(tmp_path, engine=engine, dataset=dataset)
assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)
@PYARROW_LE_MARK
@pytest.mark.parametrize("numerical", [True, False])
@pytest.mark.parametrize(
"timestamp", ["2000-01-01", "2000-01-02", "2000-01-03", "2000-01-04"]
)
def test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):
tmp_path = str(tmpdir)
ddf2 = dd.from_pandas(
pd.DataFrame(
{
"timestamp": [
pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03"),
pd.Timestamp("2000-01-04"),
],
"id": np.arange(4, dtype="float64"),
"name": ["cat", "dog", "bird", "cow"],
}
),
npartitions=2,
).persist()
if numerical:
ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)
ddf2.id = ddf2.id.astype("float64")
else:
ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)
# There should be no schema error if you specify a schema on write
schema = pa.schema(
[("timestamp", pa.timestamp("ns")), ("id", pa.float64()), ("name", pa.string())]
)
ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine="pyarrow")
assert_eq(
dd.read_parquet(
tmp_path,
dataset={"validate_schema": True},
index=False,
engine="pyarrow-legacy",
),
ddf2,
check_divisions=False,
check_index=False,
)
@PYARROW_LE_MARK
def test_read_inconsistent_schema_pyarrow(tmpdir):
# Note: This is a proxy test for a cudf-related issue fix
# (see cudf#5062 github issue). The cause of that issue is
# schema inconsistencies that do not actually correspond to
# different types, but whether or not the file/column contains
# null values.
df1 = pd.DataFrame({"id": [0, 1], "val": [10, 20]})
df2 = pd.DataFrame({"id": [2, 3], "val": [30, 40]})
desired_type = "int64"
other_type = "int32"
df1.val = df1.val.astype(desired_type)
df2.val = df2.val.astype(other_type)
df_expect = pd.concat([df1, df2], ignore_index=True)
df_expect["val"] = df_expect.val.astype(desired_type)
df1.to_parquet(os.path.join(tmpdir, "0.parquet"), engine="pyarrow")
df2.to_parquet(os.path.join(tmpdir, "1.parquet"), engine="pyarrow")
# Read Directory
check = dd.read_parquet(
str(tmpdir), dataset={"validate_schema": False}, engine="pyarrow-legacy"
)
assert_eq(check.compute(), df_expect, check_index=False)
# Read List
check = dd.read_parquet(
os.path.join(tmpdir, "*.parquet"),
dataset={"validate_schema": False},
engine="pyarrow-legacy",
)
assert_eq(check.compute(), df_expect, check_index=False)
def test_graph_size_pyarrow(tmpdir, engine):
import pickle
fn = str(tmpdir)
ddf1 = dask.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="60S", partition_freq="1H"
)
ddf1.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert len(pickle.dumps(ddf2.__dask_graph__())) < 25000
@pytest.mark.parametrize("preserve_index", [True, False])
@pytest.mark.parametrize("index", [None, np.random.permutation(2000)])
def test_getitem_optimization(tmpdir, engine, preserve_index, index):
tmp_path_rd = str(tmpdir.mkdir("read"))
tmp_path_wt = str(tmpdir.mkdir("write"))
df = pd.DataFrame(
{"A": [1, 2] * 1000, "B": [3, 4] * 1000, "C": [5, 6] * 1000}, index=index
)
df.index.name = "my_index"
ddf = dd.from_pandas(df, 2, sort=False)
ddf.to_parquet(tmp_path_rd, engine=engine, write_index=preserve_index)
ddf = dd.read_parquet(tmp_path_rd, engine=engine)["B"]
# Write ddf back to disk to check that the round trip
# preserves the getitem optimization
out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)
dsk = optimize_dataframe_getitem(out.dask, keys=[out.key])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert subgraph_rd.columns == ["B"]
assert next(iter(subgraph_rd.dsk.values()))[0].columns == ["B"]
subgraph_wt = hlg_layer(dsk, "to-parquet")
assert isinstance(subgraph_wt, Blockwise)
assert_eq(ddf.compute(optimize_graph=False), ddf.compute())
def test_getitem_optimization_empty(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
df2 = dd.read_parquet(fn, columns=[], engine=engine)
dsk = optimize_dataframe_getitem(df2.dask, keys=[df2._name])
subgraph = next(iter(dsk.layers.values()))
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == []
def test_getitem_optimization_multi(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
a = dd.read_parquet(fn, engine=engine)["B"]
b = dd.read_parquet(fn, engine=engine)[["C"]]
c = dd.read_parquet(fn, engine=engine)[["C", "A"]]
a1, a2, a3 = dask.compute(a, b, c)
b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)
assert_eq(a1, b1)
assert_eq(a2, b2)
assert_eq(a3, b3)
def test_getitem_optimization_after_filter(tmpdir, engine):
df = pd.DataFrame({"a": [1, 2, 3] * 5, "b": range(15), "c": range(15)})
dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)
ddf = dd.read_parquet(tmpdir, engine=engine)
df2 = df[df["b"] > 10][["a"]]
ddf2 = ddf[ddf["b"] > 10][["a"]]
dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert set(subgraph_rd.columns) == {"a", "b"}
assert_eq(df2, ddf2)
def test_getitem_optimization_after_filter_complex(tmpdir, engine):
df = pd.DataFrame({"a": [1, 2, 3] * 5, "b": range(15), "c": range(15)})
dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)
ddf = dd.read_parquet(tmpdir, engine=engine)
df2 = df[["b"]]
df2 = df2.assign(d=1)
df2 = df[df2["d"] == 1][["b"]]
ddf2 = ddf[["b"]]
ddf2 = ddf2.assign(d=1)
ddf2 = ddf[ddf2["d"] == 1][["b"]]
dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert set(subgraph_rd.columns) == {"b"}
assert_eq(df2, ddf2)
def test_layer_creation_info(tmpdir, engine):
df = pd.DataFrame({"a": range(10), "b": ["cat", "dog"] * 5})
dd.from_pandas(df, npartitions=1).to_parquet(
tmpdir, engine=engine, partition_on=["b"]
)
# Apply filters directly in dd.read_parquet
filters = [("b", "==", "cat")]
ddf1 = dd.read_parquet(tmpdir, engine=engine, filters=filters)
assert "dog" not in ddf1["b"].compute()
# Results will not match if we use dd.read_parquet
# without filters
ddf2 = dd.read_parquet(tmpdir, engine=engine)
with pytest.raises(AssertionError):
assert_eq(ddf1, ddf2)
# However, we can use `creation_info` to regenerate
# the same collection with `filters` defined
info = ddf2.dask.layers[ddf2._name].creation_info
kwargs = info.get("kwargs", {})
kwargs["filters"] = filters
ddf3 = info["func"](*info.get("args", []), **kwargs)
assert_eq(ddf1, ddf3)
@ANY_ENGINE_MARK
def test_blockwise_parquet_annotations(tmpdir):
df = pd.DataFrame({"a": np.arange(40, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=2)
expect.to_parquet(str(tmpdir))
with dask.annotate(foo="bar"):
ddf = dd.read_parquet(str(tmpdir))
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
layer = next(iter(layers.values()))
assert isinstance(layer, DataFrameIOLayer)
assert layer.annotations == {"foo": "bar"}
@ANY_ENGINE_MARK
def test_optimize_blockwise_parquet(tmpdir):
size = 40
npartitions = 2
tmp = str(tmpdir)
df = pd.DataFrame({"a": np.arange(size, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=npartitions)
expect.to_parquet(tmp)
ddf = dd.read_parquet(tmp)
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
assert isinstance(list(layers.values())[0], Blockwise)
# Check single-layer result
assert_eq(ddf, expect)
# Increment by 1
ddf += 1
expect += 1
# Increment by 10
ddf += 10
expect += 10
# `ddf` should now have THREE Blockwise layers
layers = ddf.__dask_graph__().layers
assert len(layers) == 3
assert all(isinstance(layer, Blockwise) for layer in layers.values())
# Check that `optimize_blockwise` fuses all three
# `Blockwise` layers together into a singe `Blockwise` layer
keys = [(ddf._name, i) for i in range(npartitions)]
graph = optimize_blockwise(ddf.__dask_graph__(), keys)
layers = graph.layers
name = list(layers.keys())[0]
assert len(layers) == 1
assert isinstance(layers[name], Blockwise)
# Check final result
assert_eq(ddf, expect)
@PYARROW_MARK
def test_split_row_groups(tmpdir, engine):
"""Test split_row_groups read_parquet kwarg"""
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(
tmp, engine="pyarrow", row_group_size=100
)
ddf3 = dd.read_parquet(tmp, engine=engine, split_row_groups=True, chunksize=1)
assert ddf3.npartitions == 4
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 2
dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
chunksize=1,
)
assert ddf3.npartitions == 12
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 4
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [1, 12])
@pytest.mark.parametrize("gather_statistics", [True, False])
def test_split_row_groups_int(tmpdir, split_row_groups, gather_statistics, engine):
tmp = str(tmpdir)
row_group_size = 10
npartitions = 4
half_size = 400
df = pd.DataFrame(
{
"i32": np.arange(2 * half_size, dtype=np.int32),
"f": np.arange(2 * half_size, dtype=np.float64),
}
)
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(
tmp, engine="pyarrow", row_group_size=row_group_size
)
dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=row_group_size
)
ddf2 = dd.read_parquet(
tmp,
engine=engine,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
)
expected_rg_cout = int(half_size / row_group_size)
assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [8, 25])
def test_split_row_groups_int_aggregate_files(tmpdir, engine, split_row_groups):
# Use pyarrow to write a multi-file dataset with
# multiple row-groups per file
row_group_size = 10
size = 800
df = pd.DataFrame(
{
"i32": np.arange(size, dtype=np.int32),
"f": np.arange(size, dtype=np.float64),
}
)
dd.from_pandas(df, npartitions=4).to_parquet(
str(tmpdir), engine="pyarrow", row_group_size=row_group_size, write_index=False
)
# Read back with both `split_row_groups>1` and
# `aggregate_files=True`
ddf2 = dd.read_parquet(
str(tmpdir),
engine=engine,
split_row_groups=split_row_groups,
aggregate_files=True,
)
# Check that we are aggregating files as expected
npartitions_expected = math.ceil((size / row_group_size) / split_row_groups)
assert ddf2.npartitions == npartitions_expected
assert len(ddf2) == size
assert_eq(df, ddf2, check_index=False)
@PYARROW_MARK
def test_split_row_groups_filter(tmpdir, engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
search_val = 600
filters = [("f", "==", search_val)]
dd.from_pandas(df, npartitions=4).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf2 = dd.read_parquet(tmp, engine=engine)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
filters=filters,
)
assert (ddf3["i32"] == search_val).any().compute()
assert_eq(
ddf2[ddf2["i32"] == search_val].compute(),
ddf3[ddf3["i32"] == search_val].compute(),
)
@ANY_ENGINE_MARK
def test_optimize_getitem_and_nonblockwise(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df.to_parquet(path)
df2 = dd.read_parquet(path)
df2[["a", "b"]].rolling(3).max().compute()
@ANY_ENGINE_MARK
def test_optimize_and_not(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df.to_parquet(path)
df2 = dd.read_parquet(path)
df2a = df2["a"].groupby(df2["c"]).first().to_delayed()
df2b = df2["b"].groupby(df2["c"]).first().to_delayed()
df2c = df2[["a", "b"]].rolling(2).max().to_delayed()
df2d = df2.rolling(2).max().to_delayed()
(result,) = dask.compute(df2a + df2b + df2c + df2d)
expected = [
dask.compute(df2a)[0][0],
dask.compute(df2b)[0][0],
dask.compute(df2c)[0][0],
dask.compute(df2d)[0][0],
]
for a, b in zip(result, expected):
assert_eq(a, b)
@write_read_engines()
def test_chunksize_empty(tmpdir, write_engine, read_engine):
df = pd.DataFrame({"a": pd.Series(dtype="int"), "b": pd.Series(dtype="float")})
ddf1 = dd.from_pandas(df, npartitions=1)
ddf1.to_parquet(tmpdir, engine=write_engine)
ddf2 = dd.read_parquet(tmpdir, engine=read_engine, chunksize="1MiB")
assert_eq(ddf1, ddf2, check_index=False)
@PYARROW_MARK
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("partition_on", [None, "a"])
@pytest.mark.parametrize("chunksize", [4096, "1MiB"])
@write_read_engines()
def test_chunksize_files(
tmpdir, chunksize, partition_on, write_engine, read_engine, metadata
):
if partition_on and read_engine == "fastparquet" and not metadata:
pytest.skip("Fastparquet requires _metadata for partitioned data.")
df_size = 100
df1 = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
}
)
ddf1 = dd.from_pandas(df1, npartitions=9)
ddf1.to_parquet(
str(tmpdir),
engine=write_engine,
partition_on=partition_on,
write_metadata_file=metadata,
write_index=False,
)
ddf2 = dd.read_parquet(
str(tmpdir),
engine=read_engine,
chunksize=chunksize,
aggregate_files=partition_on if partition_on else True,
)
# Check that files where aggregated as expected
if chunksize == 4096:
assert ddf2.npartitions < ddf1.npartitions
elif chunksize == "1MiB":
if partition_on:
assert ddf2.npartitions == 3
else:
assert ddf2.npartitions == 1
# Check that the final data is correct
if partition_on:
df2 = ddf2.compute().sort_values(["b", "c"])
df1 = df1.sort_values(["b", "c"])
assert_eq(df1[["b", "c"]], df2[["b", "c"]], check_index=False)
else:
assert_eq(ddf1, ddf2, check_divisions=False, check_index=False)
@write_read_engines()
@pytest.mark.parametrize("aggregate_files", ["a", "b"])
def test_chunksize_aggregate_files(tmpdir, write_engine, read_engine, aggregate_files):
chunksize = "1MiB"
partition_on = ["a", "b"]
df_size = 100
df1 = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.choice(["small", "large"], size=df_size),
"c": np.random.random(size=df_size),
"d": np.random.randint(1, 100, size=df_size),
}
)
ddf1 = dd.from_pandas(df1, npartitions=9)
ddf1.to_parquet(
str(tmpdir),
engine=write_engine,
partition_on=partition_on,
write_index=False,
)
ddf2 = dd.read_parquet(
str(tmpdir),
engine=read_engine,
chunksize=chunksize,
aggregate_files=aggregate_files,
)
# Check that files where aggregated as expected
if aggregate_files == "a":
assert ddf2.npartitions == 3
elif aggregate_files == "b":
assert ddf2.npartitions == 6
# Check that the final data is correct
df2 = ddf2.compute().sort_values(["c", "d"])
df1 = df1.sort_values(["c", "d"])
assert_eq(df1[["c", "d"]], df2[["c", "d"]], check_index=False)
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("chunksize", [None, 1024, 4096, "1MiB"])
def test_chunksize(tmpdir, chunksize, engine, metadata):
nparts = 2
df_size = 100
row_group_size = 5
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
).set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=metadata,
)
if metadata:
path = str(tmpdir)
else:
dirname = str(tmpdir)
files = os.listdir(dirname)
assert "_metadata" not in files
path = os.path.join(dirname, "*.parquet")
ddf2 = dd.read_parquet(
path,
engine=engine,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
index="index",
aggregate_files=True,
)
assert_eq(ddf1, ddf2, check_divisions=False)
num_row_groups = df_size // row_group_size
if not chunksize:
assert ddf2.npartitions == num_row_groups
else:
# Check that we are really aggregating
assert ddf2.npartitions < num_row_groups
if chunksize == "1MiB":
# Largest chunksize will result in
# a single output partition
assert ddf2.npartitions == 1
@write_read_engines()
def test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):
path = str(tmpdir.join("test.parquet"))
pdf = df.copy()
pdf.index.name = "index"
pdf.to_parquet(
path, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf_read = dd.read_parquet(
path,
engine=read_engine,
chunksize="10 kiB",
gather_statistics=True,
split_row_groups=True,
index="index",
)
assert_eq(pdf, ddf_read)
@FASTPARQUET_MARK
def test_read_pandas_fastparquet_partitioned(tmpdir, engine):
pdf = pd.DataFrame(
[{"str": str(i), "int": i, "group": "ABC"[i % 3]} for i in range(6)]
)
path = str(tmpdir)
pdf.to_parquet(path, partition_cols=["group"], engine="fastparquet")
ddf_read = dd.read_parquet(path, engine=engine)
assert len(ddf_read["group"].compute()) == 6
assert len(ddf_read.compute().group) == 6
def test_read_parquet_getitem_skip_when_getting_read_parquet(tmpdir, engine):
# https://github.com/dask/dask/issues/5893
pdf = pd.DataFrame({"A": [1, 2, 3, 4, 5, 6], "B": ["a", "b", "c", "d", "e", "f"]})
path = os.path.join(str(tmpdir), "data.parquet")
pd_engine = "pyarrow" if engine.startswith("pyarrow") else "fastparquet"
pdf.to_parquet(path, engine=pd_engine)
ddf = dd.read_parquet(path, engine=engine)
a, b = dask.optimize(ddf["A"], ddf)
# Make sure we are still allowing the getitem optimization
ddf = ddf["A"]
dsk = optimize_dataframe_getitem(ddf.dask, keys=[(ddf._name, 0)])
read = [key for key in dsk.layers if key.startswith("read-parquet")][0]
subgraph = dsk.layers[read]
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == ["A"]
@pytest.mark.parametrize("gather_statistics", [None, True])
@write_read_engines()
def test_filter_nonpartition_columns(
tmpdir, write_engine, read_engine, gather_statistics
):
tmpdir = str(tmpdir)
df_write = pd.DataFrame(
{
"id": [1, 2, 3, 4] * 4,
"time": np.arange(16),
"random": np.random.choice(["cat", "dog"], size=16),
}
)
ddf_write = dd.from_pandas(df_write, npartitions=4)
ddf_write.to_parquet(
tmpdir, write_index=False, partition_on=["id"], engine=write_engine
)
ddf_read = dd.read_parquet(
tmpdir,
index=False,
engine=read_engine,
gather_statistics=gather_statistics,
filters=[(("time", "<", 5))],
)
df_read = ddf_read.compute()
assert len(df_read) == len(df_read[df_read["time"] < 5])
assert df_read["time"].max() < 5
@PYARROW_MARK
def test_pandas_metadata_nullable_pyarrow(tmpdir):
tmpdir = str(tmpdir)
ddf1 = dd.from_pandas(
pd.DataFrame(
{
"A": | pd.array([1, None, 2], dtype="Int64") | pandas.array |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import bz2
import copy
import gzip
import os
import shutil
from pathlib import Path
from typing import Any, List, Mapping
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from source_s3.source_files_abstract.formats.parquet_parser import PARQUET_TYPES, ParquetParser
from .abstract_test_parser import AbstractTestParser
from .conftest import TMP_FOLDER
SAMPLE_DIRECTORY = Path(__file__).resolve().parent.joinpath("sample_files/")
def compress(archive_name: str, filename: str) -> str:
compress_filename = f"{filename}.{archive_name}"
with open(filename, "rb") as f_in:
if archive_name == "gz":
with gzip.open(compress_filename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
elif archive_name == "bz2":
with bz2.open(compress_filename, "wb") as f_out: # type: ignore[assignment]
shutil.copyfileobj(f_in, f_out)
return compress_filename
class TestParquetParser(AbstractTestParser):
filetype = "parquet"
record_types = PARQUET_TYPES
@classmethod
def generate_parquet_file(
cls, name: str, columns: Mapping[str, str], num_rows: int, custom_rows: Mapping[int, List[str]] = None
) -> str:
"""Generates a random data and save it to a tmp file"""
filename = os.path.join(TMP_FOLDER, name + "." + cls.filetype)
pq_writer = None
types = list(columns.values()) if num_rows else []
custom_rows = custom_rows or {}
column_names = list(columns.keys())
buffer = []
for i in range(num_rows):
buffer.append(custom_rows.get(i) or cls._generate_row(types))
if i != (num_rows - 1) and len(buffer) < 100:
continue
data = {col_values[0]: list(col_values[1:]) for col_values in zip(column_names, *buffer)}
buffer = []
df = | pd.DataFrame(data) | pandas.DataFrame |
import collections
import itertools
import multiprocessing
import os
import random
import re
import signal
import sys
import threading
import time
import traceback
import click
import numpy
import pandas as pd
from tqdm import tqdm
from estee.common import imode
from estee.schedulers import WorkStealingScheduler
from estee.schedulers.basic import AllOnOneScheduler, RandomAssignScheduler
from estee.schedulers.camp import Camp2Scheduler
from estee.schedulers.clustering import LcScheduler
from estee.schedulers.genetic import GeneticScheduler
from estee.schedulers.others import BlevelScheduler, DLSScheduler, ETFScheduler, MCPGTScheduler, \
MCPScheduler, TlevelScheduler
from estee.schedulers.queue import BlevelGtScheduler, RandomGtScheduler, TlevelGtScheduler
from estee.serialization.dask_json import json_deserialize, json_serialize
from estee.simulator import MaxMinFlowNetModel, SimpleNetModel
from estee.simulator import Simulator, Worker
from estee.simulator.trace import FetchEndTraceEvent
def generate_seed():
seed = os.getpid() * time.time()
for b in os.urandom(4):
seed *= b
seed = int(seed) % 2**32
random.seed(seed)
numpy.random.seed(seed)
generate_seed()
SCHEDULERS = {
"single": AllOnOneScheduler,
"blevel": BlevelScheduler,
"blevel-gt": BlevelGtScheduler,
"tlevel": TlevelScheduler,
"tlevel-gt": TlevelGtScheduler,
"random": RandomAssignScheduler,
"random-gt": RandomGtScheduler,
"dls": DLSScheduler,
"etf": ETFScheduler,
"mcp": MCPScheduler,
"mcp-gt": MCPGTScheduler,
"genetic": GeneticScheduler,
"camp2": lambda: Camp2Scheduler(5000),
"lc": LcScheduler,
"ws": WorkStealingScheduler
}
NETMODELS = {
"simple": SimpleNetModel,
"maxmin": MaxMinFlowNetModel
}
CLUSTERS = {
"2x8": [{"cpus": 8}] * 2,
"4x4": [{"cpus": 4}] * 4,
"8x4": [{"cpus": 4}] * 8,
"16x4": [{"cpus": 4}] * 16,
"32x4": [{"cpus": 4}] * 32,
"8x8": [{"cpus": 8}] * 8,
"16x8": [{"cpus": 8}] * 16,
"stairs16": [{"cpus": i} for i in range(1, 6)] + [{"cpus": 1}],
"32x16": [{"cpus": 16}] * 32,
"64x16": [{"cpus": 16}] * 64,
"128x16": [{"cpus": 16}] * 128,
"256x16": [{"cpus": 16}] * 256,
}
BANDWIDTHS = {
"8G": 8192,
"2G": 2048,
"512M": 512,
"128M": 128,
"32M": 32
}
IMODES = {
"exact": imode.process_imode_exact,
"blind": imode.process_imode_blind,
"mean": imode.process_imode_mean,
"user": imode.process_imode_user,
}
SCHED_TIMINGS = {
# min_sched_interval, sched_time
"0/0": (0, 0),
"0.1/0.05": (0.1, 0.05),
"0.4/0.05": (0.4, 0.05),
"1.6/0.05": (1.6, 0.05),
"6.4/0.05": (6.4, 0.05)
}
COLUMNS = ["graph_set",
"graph_name",
"graph_id",
"cluster_name",
"bandwidth",
"netmodel",
"scheduler_name",
"imode",
"min_sched_interval",
"sched_time",
"time",
"execution_time",
"total_transfer"]
Instance = collections.namedtuple("Instance",
("graph_set", "graph_name", "graph_id", "graph",
"cluster_name", "bandwidth", "netmodel",
"scheduler_name", "imode", "min_sched_interval", "sched_time",
"count"))
class BenchmarkConfig:
graph_cache = {}
def __init__(self, graph_frame, schedulers, clusters, netmodels, bandwidths,
imodes, sched_timings, count):
self.graph_frame = graph_frame
self.schedulers = schedulers
self.clusters = clusters
self.netmodels = netmodels
self.bandwidths = bandwidths
self.imodes = imodes
self.sched_timings = sched_timings
self.count = count
def generate_instances(self):
def calculate_imodes(graph, graph_id):
if graph_id not in BenchmarkConfig.graph_cache:
BenchmarkConfig.graph_cache[graph_id] = {}
for mode in IMODES:
g = json_deserialize(graph)
IMODES[mode](g)
BenchmarkConfig.graph_cache[graph_id][mode] = json_serialize(g)
for graph_def, cluster_name, bandwidth, netmodel, scheduler_name, mode, sched_timing \
in itertools.product(self.graph_frame.iterrows(), self.clusters, self.bandwidths,
self.netmodels, self.schedulers, self.imodes,
self.sched_timings):
g = graph_def[1]
calculate_imodes(g["graph"], g["graph_id"])
graph = BenchmarkConfig.graph_cache[g["graph_id"]][mode]
(min_sched_interval, sched_time) = SCHED_TIMINGS[sched_timing]
instance = Instance(
g["graph_set"], g["graph_name"], g["graph_id"], graph,
cluster_name, BANDWIDTHS[bandwidth], netmodel,
scheduler_name,
mode,
min_sched_interval, sched_time,
self.count)
yield instance
def __repr__(self):
return """
============ Config ========================
scheduler : {schedulers}
cluster : {clusters}
netmodel : {netmodels}
bandwidths: {bandwidths}
imode : {imodes}
timings : {timings}
REPEAT : {repeat}
============================================
""".format(
schedulers=", ".join(self.schedulers),
clusters=", ".join(self.clusters),
netmodels=", ".join(self.netmodels),
bandwidths=", ".join(self.bandwidths),
imodes=", ".join(self.imodes),
timings=", ".join(self.sched_timings),
repeat=self.count
)
def run_single_instance(instance):
time.sleep(1)
inf = 2**32
def create_worker(wargs):
if instance.netmodel == "simple":
return Worker(**wargs, max_downloads=inf, max_downloads_per_worker=inf)
return Worker(**wargs)
begin_time = time.monotonic()
workers = [create_worker(wargs) for wargs in CLUSTERS[instance.cluster_name]]
netmodel = NETMODELS[instance.netmodel](instance.bandwidth)
scheduler = SCHEDULERS[instance.scheduler_name]()
simulator = Simulator(instance.graph, workers, scheduler, netmodel, trace=True)
try:
sim_time = simulator.run()
runtime = time.monotonic() - begin_time
transfer = 0
for e in simulator.trace_events:
if isinstance(e, FetchEndTraceEvent):
transfer += e.output.size
return sim_time, runtime, transfer
except Exception:
traceback.print_exc()
print("ERROR INSTANCE: {}".format(instance), file=sys.stderr)
return None, None, None
def benchmark_scheduler(instance):
return [run_single_instance(instance)
for _ in range(instance.count)]
def process_multiprocessing(instance):
instance = instance._replace(graph=json_deserialize(instance.graph))
return benchmark_scheduler(instance)
def run_multiprocessing(pool, instances):
return pool.imap(process_multiprocessing, instances)
def process_dask(conf):
(graph, instance) = conf
instance = instance._replace(graph=json_deserialize(graph))
return benchmark_scheduler(instance)
def run_dask(instances, cluster):
from dask.distributed import Client
client = Client(cluster)
graphs = {}
instance_to_graph = {}
instances = list(instances)
for (i, instance) in enumerate(instances):
if instance.graph not in graphs:
graphs[instance.graph] = client.scatter([instance.graph], broadcast=True)[0]
inst = instance._replace(graph=None)
instance_to_graph[inst] = graphs[instance.graph]
instances[i] = inst
results = client.map(process_dask, ((instance_to_graph[i], i) for i in instances))
return client.gather(results)
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def compute(instances, timeout=0, dask_cluster=None):
rows = []
if not instances:
return rows
if dask_cluster:
iterator = run_dask(instances, dask_cluster)
else:
pool = multiprocessing.Pool(initializer=init_worker)
iterator = run_multiprocessing(pool, instances)
if timeout:
print("Timeout set to {} seconds".format(timeout))
def run():
counter = 0
try:
for instance, result in tqdm(zip(instances, iterator), total=len(instances)):
counter += 1
for r_time, r_runtime, r_transfer in result:
if r_time is not None:
rows.append((
instance.graph_set,
instance.graph_name,
instance.graph_id,
instance.cluster_name,
instance.bandwidth,
instance.netmodel,
instance.scheduler_name,
instance.imode,
instance.min_sched_interval,
instance.sched_time,
r_time,
r_runtime,
r_transfer
))
except:
print("Benchmark interrupted, iterated {} instances. Writing intermediate results"
.format(counter))
if timeout:
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout reached")
else:
run()
return rows
def run_benchmark(configs, oldframe, resultfile, skip_completed, timeout=0, dask_cluster=None):
for config in configs:
print(config)
instances = create_instances(configs, oldframe, skip_completed, 5)
rows = compute(instances, timeout, dask_cluster)
if not rows:
print("No results were computed")
return
frame = pd.DataFrame(rows, columns=COLUMNS)
print(frame.groupby(["graph_name", "graph_id", "cluster_name",
"bandwidth", "netmodel", "imode", "min_sched_interval", "sched_time",
"scheduler_name"]).mean())
if len(frame) > 0:
base, ext = os.path.splitext(resultfile)
path = "{}.backup{}".format(base, ext)
print("Creating backup of old results to '{}'".format(path))
write_resultfile(oldframe, path)
newframe = pd.concat([oldframe, frame], ignore_index=True)
write_resultfile(newframe, resultfile)
print("{} entries in new '{}'".format(newframe["time"].count(), resultfile))
def skip_completed_instances(instances, frame, repeat, columns, batch):
skipped_output = 0
skipped_batch = 0
counts = frame.groupby(columns).size()
result = []
for instance in instances:
hashed = tuple(getattr(instance, col) for col in columns)
existing_count = 0
if hashed in counts:
count = counts.loc[hashed]
skipped_output += count
existing_count += count
if hashed in batch:
count = batch[hashed]
skipped_batch += count
existing_count += count
if existing_count == 0:
result.append(instance)
elif existing_count < repeat:
result.append(instance._replace(count=repeat - existing_count))
if skipped_output or skipped_batch:
print("Skipping {} instances from output, {} from batch, {} left".format(skipped_output,
skipped_batch,
len(result)))
return result
def limit_max_count(instances, max_count):
result = []
for instance in instances:
if instance.count > max_count:
remaining = instance.count
while remaining > 0:
count = min(max_count, remaining)
remaining -= count
result.append(instance._replace(count=count))
else:
result.append(instance)
return result
def create_instances(configs, frame, skip_completed, max_count):
total_instances = []
columns = ["graph_id",
"cluster_name",
"bandwidth",
"netmodel",
"scheduler_name",
"imode",
"min_sched_interval",
"sched_time"]
batch = {}
for config in configs:
instances = list(config.generate_instances())
if skip_completed:
instances = skip_completed_instances(instances, frame, config.count, columns, batch)
instances = limit_max_count(instances, max_count)
for instance in instances:
hashed = tuple(getattr(instance, col) for col in columns)
batch[hashed] = instance.count + batch.get(hashed, 0)
total_instances += instances
return total_instances
def load_resultfile(resultfile, append):
if os.path.isfile(resultfile):
if not append:
print("Result file '{}' already exists\n"
"Remove --no-append to append results to it".format(resultfile),
file=sys.stderr)
exit(1)
print("Appending to result file '{}'".format(resultfile))
oldframe = pd.read_csv(resultfile)
assert list(oldframe.columns) == COLUMNS
else:
print("Creating result file '{}'".format(resultfile))
oldframe = | pd.DataFrame([], columns=COLUMNS) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
import openpyxl
from openpyxl import load_workbook
class DataFrameFeature():
_NaN = "NaN" # represent the NaN value in df
# filter column values by remove string behind "sep", and r-strip space
@staticmethod
def filter_column_value(df, *, column_name, sep):
col = df.columns.get_loc(column_name) # get col_idx by col_name
for row in range(df.shape[0]):
value = df.iloc[row, col]
# if the country value is Nan, do nothing
if pd.isna(value):
continue
sep_idx = value.find(sep) # find character "sep" in given string
# if not found the target sep, right strip space only, else cut the string behind sep
if sep_idx == -1:
df.iloc[row, col] = value.rstrip(" ")
else:
df.iloc[row, col] = value[:sep_idx].rstrip(" ")
# get country list and counts according by Category
@staticmethod
def get_country_set(df, *, category):
list_ctry = []
total_ctry = 0
col_ctry = df.columns.get_loc("Country")
col_ctgy = df.columns.get_loc("Category")
col_cert = df.columns.get_loc("Certificate")
for row in range(df.shape[0]):
ctry = df.iloc[row, col_ctry]
ctgy = df.iloc[row, col_ctgy]
cert = df.iloc[row, col_cert]
if pd.isna(ctgy):
continue
if category in ctgy:
if pd.isna(ctry):
continue
if ctry in list_ctry:
continue
elif ctry.find("/") != -1:
list_ctry.append(ctry + "(" + cert + ")")
total_ctry += len(ctry.split("/"))
else:
list_ctry.append(ctry + "(" + cert + ")")
total_ctry += 1
return total_ctry, list_ctry
# truncate df according to the value in given column
@staticmethod
def truncate(df, *, column, first_value, last_value):
first_row = last_row = -1
n_col = column
b_first = False # make sure we found the first_value as flag
row = df.shape[0] # return df row number
for i in range(row):
if df.iloc[i, n_col] == first_value:
b_first = True
first_row = i + 1 # we don't need the row we found
# check if last_value is NaN or not
if last_value == DataFrameFeature.NaN and | pd.isna(df.iloc[i, n_col]) | pandas.isna |
# Load the necessary libraries
# Set the seed to 123
import pandas as pd
import numpy as np
# load the dataset into the memory
data = pd.read_csv('Logistic_regression.csv')
# Pre-processing steps
'''You may need to clean the variables, impute the missing values and convert the categorical variables to one-hot encoded
following variables need to be converted to one_hot encoded
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
your final table should be like the following
array(['age', 'duration', 'campaign', 'pdays', 'previous', 'emp_var_rate',
'cons_price_idx', 'cons_conf_idx', 'euribor3m', 'nr_employed', 'y',
'job_admin.', 'job_blue-collar', 'job_entrepreneur',
'job_housemaid', 'job_management', 'job_retired',
'job_self-employed', 'job_services', 'job_student',
'job_technician', 'job_unemployed', 'job_unknown',
'marital_divorced', 'marital_married', 'marital_single',
'marital_unknown', 'education_Basic', 'education_high.school',
'education_illiterate', 'education_professional.course',
'education_university.degree', 'education_unknown', 'default_no',
'default_unknown', 'default_yes', 'housing_no', 'housing_unknown',
'housing_yes', 'loan_no', 'loan_unknown', 'loan_yes',
'contact_cellular', 'contact_telephone', 'month_apr', 'month_aug',
'month_dec', 'month_jul', 'month_jun', 'month_mar', 'month_may',
'month_nov', 'month_oct', 'month_sep', 'day_of_week_fri',
'day_of_week_mon', 'day_of_week_thu', 'day_of_week_tue',
'day_of_week_wed', 'poutcome_failure', 'poutcome_nonexistent',
'poutcome_success'], dtype=object)
'''
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
for var in cat_vars:
cat_list = 'var' + '_' + var
cat_list = pd.get_dummies(data[var], prefix=var)
data1 = data.join(cat_list)
data = data1
cat_vars = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome']
data_vars = data.columns.values.tolist()
to_keep = [i for i in data_vars if i not in cat_vars]
data=data[to_keep]
'''
separate the features and the target variable
'''
x = data.loc[:, data.columns != 'y']
y = data.loc[:, data.columns == 'y']
# x = Your code goes here
# y = Your code goes here
'''
as your target class is imbalanced you need to use SMOTE function to balance it, first separate your data into training and testing set.
then use SMOTE function on the Training set.remember to not touch the testing set.
'''
from imblearn.over_sampling import SMOTE
s=SMOTE(random_state=0)
x_train=x.sample(frac=0.7,random_state=0)
x_test=x.loc[~data.index.isin(x_train.index)]
y_train=y.sample(frac=0.7,random_state=0)
y_test=y.loc[~data.index.isin(x_train.index)]
columns=x_train.columns
s_data_x,s_data_y=s.fit_sample(x_train,y_train)
s_data_x= | pd.DataFrame(data=s_data_x,columns=columns) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start="2000", periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="H", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="S", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2006-12-31", ("w", 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
def test_constructor_error(self):
start = Period("02-Apr-2005", "B")
end_intv = Period("2006-12-31", ("w", 1))
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = (
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize(
"freq", ["M", "Q", "A", "D", "B", "T", "S", "L", "U", "N", "H"]
)
def test_recreate_from_data(self, freq):
org = period_range(start="2001/04/01", freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq="A")
expected = Index([str(num) for num in raw])
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range("2000-01-01", periods=10, freq="D"))
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodArray to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range("1/1/2000", periods=10), dtype=PeriodDtype("D"))
exp = Series(period_range("1/1/2000", periods=10))
| tm.assert_series_equal(s, exp) | pandas.util.testing.assert_series_equal |
import collections
import copy
import ixmp
import itertools
import os
import warnings
import pandas as pd
import numpy as np
from message_ix import default_paths
from ixmp.utils import pd_read, pd_write
from message_ix.utils import isscalar, logger
DEFAULT_SOLVE_OPTIONS = {
'advind': 0,
'lpmethod': 2,
'threads': 4,
'epopt': 1e-6,
}
def init_storage(scen, *args, **kwargs):
# Initiating a set to specifiy storage level (no commodity balance needed)
scen.init_set('level_storage')
# Initiating a set to specifiy storage reservoir technology
scen.init_set('storage_tec')
# Initiating a set to map storage reservoir to its charger/discharger
scen.init_set('map_tec_storage', idx_sets=['technology', 'storage_tec'])
# Initiating a parameter to specify the order of sub-annual time steps
scen.init_par('time_seq', idx_sets=['time'])
# Initiating a parameter for relating the content f storage in two
# different time steps (in even in two periods) together
scen.init_par('relation_storage',
idx_sets=['node', 'technology', 'level', 'year', 'year',
'time', 'time'],
idx_names=['node', 'technology', 'level', 'year_first',
'year_last', 'time_first', 'time_last'])
# Initiating two parameters for specifying lower and upper bounds of
# storage reservoir, and storage losses all as % of installed capacity
# (values should be between 0 and 1)
par_stor = ['bound_storage_lo', 'bound_storage_up', 'storage_loss']
for parname in par_stor:
scen.init_par(parname, idx_sets=['node', 'technology',
'level', 'year', 'time'])
def _init_scenario(s, commit=False):
"""Initialize a MESSAGEix Scenario object with default values"""
inits = (
{
'test': 'level_storage' not in s.set_list(),
'exec': [(init_storage, {'args': (s,)}), ],
},
)
pass_idx = [i for i, init in enumerate(inits) if init['test']]
if len(pass_idx) == 0:
return # leave early, all init tests pass
if commit:
s.check_out()
for idx in pass_idx:
for exec_info in inits[idx]['exec']:
func = exec_info[0]
args = exec_info[1].pop('args', tuple())
kwargs = exec_info[1].pop('kwargs', dict())
func(*args, **kwargs)
if commit:
s.commit('Initialized wtih standard sets and params')
class Scenario(ixmp.Scenario):
"""|MESSAGEix| Scenario.
This class extends :class:`ixmp.Scenario` and inherits all its methods. It
defines additional methods specific to |MESSAGEix|.
"""
def __init__(self, mp, model, scenario=None, version=None, annotation=None,
cache=False, clone=None, **kwargs):
"""Initialize a new message_ix.Scenario (structured input data and solution)
or get an existing scenario from the ixmp database instance
Parameters
----------
mp : ixmp.Platform
model : string
model name
scenario : string
scenario name
version : string or integer
initialize a new scenario (if version == 'new'), or
load a specific version from the database (if version is integer)
annotation : string
a short annotation/comment (when initializing a new scenario)
cache : boolean
keep all dataframes in memory after first query (default: False)
clone : Scenario, optional
make a clone of an existing scenario
"""
if 'scen' in kwargs:
warnings.warn(
'`scen` is deprecated and will be removed in the next' +
' release, please use `scenario`')
scenario = kwargs.pop('scen')
if version is not None and clone is not None:
raise ValueError(
'Can not provide both version and clone as arguments')
if clone is not None:
jscen = clone._jobj.clone(model, scenario, annotation,
clone._keep_sol, clone._first_model_year)
elif version == 'new':
scheme = 'MESSAGE'
jscen = mp._jobj.newScenario(model, scenario, scheme, annotation)
elif isinstance(version, int):
jscen = mp._jobj.getScenario(model, scenario, version)
else:
jscen = mp._jobj.getScenario(model, scenario)
self.is_message_scheme = True
super(Scenario, self).__init__(mp, model, scenario, jscen, cache=cache)
if not self.has_solution():
_init_scenario(self, commit=version != 'new')
def cat_list(self, name):
"""return a list of all categories for a set
Parameters
----------
name : string
name of the set
"""
return ixmp.to_pylist(self._jobj.getTypeList(name))
def add_cat(self, name, cat, keys, is_unique=False):
"""Map elements from *keys* to category *cat* within set *name*.
Parameters
----------
name : str
Name of the set.
cat : str
Name of the category.
keys : str or list of str
Element keys to be added to the category mapping.
is_unique: bool, optional
If `True`, then *cat* must have only one element. An exception is
raised if *cat* already has an element, or if ``len(keys) > 1``.
"""
self._jobj.addCatEle(name, str(cat), ixmp.to_jlist(keys), is_unique)
def cat(self, name, cat):
"""return a list of all set elements mapped to a category
Parameters
----------
name : string
name of the set
cat : string
name of the category
"""
return ixmp.to_pylist(self._jobj.getCatEle(name, cat))
def has_solution(self):
"""Returns True if scenario currently has a solution"""
try:
return not np.isnan(self.var('OBJ')['lvl'])
except Exception:
return False
def add_spatial_sets(self, data):
"""Add sets related to spatial dimensions of the model.
Parameters
----------
data : dict
Mapping of `level` → `member`. Each member may be:
- A single label for elements.
- An iterable of labels for elements.
- A recursive :class:`dict` following the same convention, defining
sub-levels and their members.
Examples
--------
>>> s = message_ix.Scenario()
>>> s.add_spatial_sets({'country': 'Austria'})
>>> s.add_spatial_sets({'country': ['Austria', 'Germany']})
>>> s.add_spatial_sets({'country': {
... 'Austria': {'state': ['Vienna', 'Lower Austria']}}})
"""
# TODO test whether unbalanced data or multiply-defined levels are
# handled correctly. How to define 'Germany' as a country *only* but
# two states within 'Austria'?
# >>> s.add_spatial_sets({'country': {
# ... 'Austria': {'country': 'Vienna'}}})
# >>> s.add_spatial_sets({'country': {
# ... 'Austria': {'state': 'Vienna'},
# ... 'Canada': {'province': 'Ontario'},
# ... }})
nodes = []
levels = []
hierarchy = []
def recurse(k, v, parent='World'):
if isinstance(v, collections.Mapping):
for _parent, _data in v.items():
for _k, _v in _data.items():
recurse(_k, _v, parent=_parent)
level = k
children = [v] if isscalar(v) else v
for child in children:
hierarchy.append([level, child, parent])
nodes.append(child)
levels.append(level)
for k, v in data.items():
recurse(k, v)
self.add_set("node", nodes)
self.add_set("lvl_spatial", levels)
self.add_set("map_spatial_hierarchy", hierarchy)
def add_horizon(self, data):
"""Add sets related to temporal dimensions of the model.
Parameters
----------
data : dict-like
Year sets. "year" is a required key. "firstmodelyear" is optional;
if not provided, the first element of "year" is used.
Examples
--------
>>> s = message_ix.Scenario()
>>> s.add_horizon({'year': [2010, 2020]})
>>> s.add_horizon({'year': [2010, 2020], 'firstmodelyear': 2020})
"""
if 'year' not in data:
raise ValueError('"year" must be in temporal sets')
horizon = data['year']
self.add_set("year", horizon)
first = data['firstmodelyear'] if 'firstmodelyear'\
in data else horizon[0]
self.add_cat("year", "firstmodelyear", first, is_unique=True)
def vintage_and_active_years(self, ya_args=None, in_horizon=True):
"""Return sets of vintage and active years for use in data input.
For a valid pair `(year_vtg, year_act)`, the following conditions are
satisfied:
1. Both the vintage year (`year_vtg`) and active year (`year_act`) are
in the model's ``year`` set.
2. `year_vtg` <= `year_act`.
3. `year_act` <= the model's first year **or** `year_act` is in the
smaller subset :meth:`ixmp.Scenario.years_active` for the given
`ya_args`.
Parameters
----------
ya_args : tuple of (node, technology, year_vtg), optional
Arguments to :meth:`ixmp.Scenario.years_active`.
in_horizon : bool, optional
Restrict years returned to be within the current model horizon.
Returns
-------
pandas.DataFrame
with columns, "year_vtg" and "year_act", in which each row is a
valid pair.
"""
horizon = self.set('year')
first = self.cat('year', 'firstmodelyear')[0] or horizon[0]
if ya_args:
if len(ya_args) != 3:
raise ValueError('3 arguments are required if using `ya_args`')
years_active = self.years_active(*ya_args)
combos = itertools.product([ya_args[2]], years_active)
else:
combos = itertools.product(horizon, horizon)
# TODO: casting to int here is probably bad, but necessary for now
first = int(first)
combos = [(int(y1), int(y2)) for y1, y2 in combos]
def valid(y_v, y_a):
# TODO: casting to int here is probably bad
ret = y_v <= y_a
if in_horizon:
ret &= y_a >= first
return ret
year_pairs = [(y_v, y_a) for y_v, y_a in combos if valid(y_v, y_a)]
v_years, a_years = zip(*year_pairs)
return pd.DataFrame({'year_vtg': v_years, 'year_act': a_years})
def solve(self, model='MESSAGE', solve_options={}, **kwargs):
"""Solve the Scenario.
Parameters
----------
model : string
the type of model to solve (e.g., MESSAGE or MESSAGE-MACRO)
solve_options : dict
name, value pairs to use for GAMS solver optfile,
see `message_ix.DEFAULT_SOLVE_OPTIONS` for defaults and see
https://www.gams.com/latest/docs/S_CPLEX.html for possible
arguments
By default, :meth:`ixmp.Scenario.solve` is called with "MESSAGE" as the
*model* argument; see the documentation of that method for other
arguments. *model* may also be overwritten, e.g.:
>>> s.solve(model='MESSAGE-MACRO')
"""
# TODO: we generate the cplex.opt file on the fly. this is *not* safe
# agaisnt race conditions. It is possible to generate opt files with
# random names (see
# https://www.gams.com/latest/docs/UG_GamsCall.html#GAMSAOoptfile);
# however, we need to clean up the code in ixmp that passes arguments
# to gams to do so.
fname = os.path.join(default_paths.model_path(), 'cplex.opt')
opts = copy.deepcopy(DEFAULT_SOLVE_OPTIONS)
opts.update(solve_options)
lines = '\n'.join('{} = {}'.format(k, v) for k, v in opts.items())
with open(fname, 'w') as f:
f.writelines(lines)
ret = super(Scenario, self).solve(model=model, **kwargs)
os.remove(fname)
return ret
def rename(self, name, mapping, keep=False):
"""Rename an element in a set
Parameters
----------
name : str
name of the set to change (e.g., 'technology')
mapping : str
mapping of old (current) to new set element names
keep : bool, optional, default: False
keep the old values in the model
"""
try:
self.check_out()
commit = True
except:
commit = False
keys = list(mapping.keys())
values = list(mapping.values())
# search for from_tech in sets and replace
for item in self.set_list():
ix_set = self.set(item)
if isinstance(ix_set, pd.DataFrame):
if name in ix_set.columns and not ix_set.empty:
for key, value in mapping.items():
df = ix_set[ix_set[name] == key]
if not df.empty:
df[name] = value
self.add_set(item, df)
elif ix_set.isin(keys).any(): # ix_set is pd.Series
for key, value in mapping.items():
if ix_set.isin([key]).any():
self.add_set(item, value)
# search for from_tech in pars and replace
for item in self.par_list():
if name not in self.idx_names(item):
continue
for key, value in mapping.items():
df = self.par(item, filters={name: [key]})
if not df.empty:
df[name] = value
self.add_par(item, df)
# this removes all instances of from_tech in the model
if not keep:
for key in keys:
self.remove_set(name, key)
# commit
if commit:
self.commit('Renamed {} using mapping {}'.format(name, mapping))
def to_excel(self, fname):
"""Save a scenario as an Excel file. NOTE: Cannot export
solution currently (only model data) due to limitations in excel sheet
names (cannot have multiple sheet names which are identical except for
upper/lower case).
Parameters
----------
fname : string
path to file
"""
funcs = {
'set': (self.set_list, self.set),
'par': (self.par_list, self.par),
}
ix_name_map = {}
dfs = {}
for ix_type, (list_func, get_func) in funcs.items():
for item in list_func():
df = get_func(item)
df = | pd.Series(df) | pandas.Series |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, | Timedelta('1s') | pandas.Timedelta |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = | pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv') | pandas.read_csv |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
output = anonym_small.categorical_fake({'name': 'name_female'},
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake({'ssn': 'ssn', 'web': 'url'},
seed=42,
inplace=False)
expected = pd.DataFrame({'ssn': {0: '655-15-0410', 1: '760-36-4013'},
'web': {0: 'http://www.hill.net/',
1: 'http://johnson.com/'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_fake_auto(anonym_small):
output = anonym_small.categorical_fake_auto(seed=42, inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'},
'ssn': {0: '655-15-0410', 1: '760-36-4013'}})
| pdt.assert_frame_equal(expected, output) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: deep_ml_curriculum
# language: python
# name: deep_ml_curriculum
# ---
# # Time Series Forcasting
#
# In time series forcasting (TSF) the goal is to predict the future values using the behaviour of data in the past. We can use some of the tehniques we learned about in the last notebook. For instance, Holt-Winters methods can be used for forcasting as well as analysis.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
plt.rcParams["figure.figsize"] = [12,5]
warnings.simplefilter("ignore")
# We will load a subset of London Smart meters dataset. This dataset shows electricity consumption of 5,567 houses in London. We will only use the data for a single block.
#
# The data shows daily consumption of each house and various statistics regarding their daily consumption. The original data is from [UK Power Networks](https://data.london.gov.uk/dataset/smartmeter-energy-use-data-in-london-households)
# Load data
df = block0 = pd.read_csv("../../data/processed/smartmeter/block_0.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']]
# Get the mean over all houses, by day
df = df.groupby('day').mean().iloc[:-1]
# Rename energy to target
df = df.rename(columns={'energy_sum':'target'})
df.plot()
df
# In forcasting we try to predict the next step, therefore it is essential that we specify the frequency of data so the model knows what we mean by next step.
#
# Pandas data frames have frequency property, which we need to set
df.index
# You can see at the bottom `freq` is set to `None`. We need to specify the the data is monthly and the dates are start of the month. So we use `freq = "MS"`.
df.index.freq = "1D"
# __Note:__ Most of the algorithms have ways of infering the frequency if it is not set. But it is always safer to set it ourselves rather than leave it for the algorithms to figure out.
# To measure whether we are doing well in our prediction or not, commonly we split the data into two parts, one for training the model and the other for evaluating the forcasting quality. In time series we train on the past and predict on the future, so the validation set needs to be in the future.
#
# The part that is used for taining is called training set and for time series it usually is the data from the beginning up to a certain point in time. The part that is used for evaluation is may be called validation set, test set, or evaluation set. The validation set comes right after the training set, because we use the training set to understand the behaviour of data and then we want to know what is going to happen right after that.
#
#
# Let's split our data into training and validation set. Let's split in a way so that last 30% is in validation set and the rest in training set.
# +
# We are forecasting, so split into past and future
n_split = -int(len(df)*0.7)
df_train = df[:-n_split]
df_valid = df[-n_split:]
ax = df_train['target'].plot(legend=True, label="Train")
df_valid['target'].plot(ax=ax, legend=True, label="Validation")
# -
# ## Stationarity
#
# A time series is considered stationary when its properties (mean and standard deviation) does not change with time. Therefore, any time series with trend or seasonality is not stationary. An example of stationary data is white noise:
plt.figure(figsize=(12, 8))
plt.plot(range(100), np.arange(100)/50, ls=':', c='b', label='line - not stationary')
plt.plot(range(100),np.sin(np.arange(100)/5)-2, c='b', label='sin - not stationary')
plt.plot(range(100), np.zeros(100), c='r', label='zeros - stationary')
plt.plot(range(100), np.random.randn(100)+4, ls='--', c='r', label='random noise - stationary')
plt.legend()
plt.xlabel('time [days]')
plt.title('examples of non/stationary series')
# Why is random noise stationary?
# The std and mean are constant
np.random.seed(42)
random_noise = pd.Series(np.random.randn(200))
plt.plot(random_noise, label='random noise')
random_noise.rolling(30).mean().plot(label='mean')
random_noise.rolling(30).std().plot(label='std')
plt.legend()
# Sin - this is not stationary
# The std and mean are not constant
np.random.seed(42)
series_sin = pd.Series(np.sin(np.arange(200)/5))
plt.plot(series_sin, label='sin(x/5)')
series_sin.rolling(50).mean().plot(label='mean')
series_sin.rolling(50).std().plot(label='std')
plt.legend()
# While it is easy to tell if a time series is not stationary when there is a clear trend, in some cases it might be pretty difficult to decide whether a time series is stationary or not. Therefore, we use statistical tests to make a decision.
#
# __Why is it important if a time series is stationary or not?__<br>
# We know that in a stationary time series the characteristics will remain constant. This makes it easier to predict their future behaviour as we expect them to behave similarly. But when the series is not stationary we don't know how it is going to behave in the future. In reality, most of the time series we are going to work with are not stationary. But using various techniques we might be able to transform them into a stationary time series. This is exactly what we just did. We use STL to remove the trend and seasonality to get a stationary time series.
# #### Augmented Dickey-Fuller test
#
# [Augmented Dickey-Fuller test](https://en.wikipedia.org/wiki/Augmented_Dickey%E2%80%93Fuller_test) (ADF) is a statistical test for stationarity. We are not going to discuss the statistical details of this test, but what matters to us is the result.
#
# The null hpothesis of ADF is: `the series is stationary.`
#
# Let's test it on our data.
#
# +
from statsmodels.tsa.stattools import adfuller
def adf_p_value(data):
p = adfuller(data)[1]
# If p-value is lower than a threshold (commonly 0.05),
if p<0.05:
# it means the null hypothesis is rejected and therefore the time series is stationary.
return 'stationary (p={:2.2g})'.format(p)
else:
return 'not stationary (p={:2.2g})'.format(p)
# -
adf_p_value(df["target"])
# The function returns many values, but the one that we are interested in is p-value, which the second value. If it is less than 0.05, it means time series is stationary. In this case it is far from 0.05 and that is what we expected as the data has clear trend.<br>
# Now let's turn it into a function that only return the p-value and run the test on white noise.
adf_p_value(random_noise)
# The value is very small, which suggests we can reject the null hypothesis and therefore the series is stationary.
# ## Decomposing
# What if we remove trend and seasonality from the data using STL method?
from statsmodels.tsa.seasonal import seasonal_decompose
res = seasonal_decompose(df[:100], model="mul")
res.plot()
''
# If we remove the seasonal and trend component what is left is the residuals.<br>
# The residuals might have `NaN` in it. If so, we need to remove them before performing the test.
adf_p_value(res.resid.dropna().values[:, 0])
# The residual is stationary since the p value is lower than 0.05.
df.plot()
df.diff().plot()
df.diff(2).plot()
# Another technique to make a time series stationary is differencing. Differencing means that we calculate the difference between two consecutive points in time. Then we use the differences for forcasting.<br>
# Let's see how differencing will affect our data. Pandas has a builtin method for differencing (`.diff()`):
df.diff()
# We need to get rid of `NaN` so we can run the test.
adf_p_value(df.diff().dropna()["target"])
# As we can see p-value is below the 0.05 threshold, which means differencing helped to convert data into stationary time series. <br>
# In some cases you might need to perform differencing multiple times to reach stationary results.
adf_p_value(df.diff(2).dropna()["target"])
# ## Autocorrelation
# Another characteristics of a time series is autocorrelation. Autocorrelation is simply the correlation between the points in the time series and the points before them (sometimes called lagged values).
#
# The shaded area is the confidence threshold on the correlation using Bartlett's formula $1/\sqrt{N}$ which assumes a guassian distribution. If a correlations is below this threshold is it's likely to be a coincidence.
from statsmodels.graphics.tsaplots import plot_acf
df.plot()
plot_acf(df)
plt.xlabel('Lag (day)')
plt.ylabel('Correlation coeffecient')
''
# The points closer together in time have higher correlation compared to the points further apart. This is an expected behaviour. However, how quickly does the correlation decreases is important.
# ## Autoregressive models (AR)
# An [autoregressive model](https://en.wikipedia.org/wiki/Autoregressive_model), is a time series model which assumes a linear relationship between each point in time and its past $p$ points.
#
# $$y_t=c+\sum_{i=1}^{p}\phi_iy_{t-i}$$
# For instance a first order AR (also shown as AR(1)) can be written as:<br>
# $$y_t=c+\phi_1 y_{t-1}$$
# This model can be found in statsmodels in ar_model submodule.
# This is to avoid some warning messages from statsmodels
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
from statsmodels.tsa.ar_model import AR, ARResults
# Let's try an AR model on our data.
# +
model = AR(df_train)
# Then we train the model specifying the order of AR. Let's start by trying `1`.
trained_model = model.fit(
maxlag=2,
trend='nc',
)
# Now the model is trained. We can view model's values:
print('params\n', trained_model.params)
# -
# More importantly, we can forecast using the trained model.
# +
# specify at which time-step in the training data the model should start and at which time-step it should stop
start = len(df_train)
end = len(df_train) + len(df_valid) - 1
forecast = trained_model.predict(start, end)
fig = plt.figure()
ax = fig.gca()
df_train['target'].plot(ax=ax, legend=True, label="Train")
df_valid['target'].plot(ax=ax, legend=True, label="Actual")
forecast.plot(ax=ax, legend=True, label="Forecast")
# -
# ## Metrics
# It's not very close. But how close? We need to put a value on the goodness of the result. To do this, we can use metrics. There are various metrics which can be used here, such as root of mean squared error (RMSE), mean squered error (MSE), mean absolute error (MAE), $R^2$, and many more. Sometimes for a certain application you might need to use particular metric.<br>
#
# We will use Mean Absolute Percentage Error.
#
# $$MAPE=\frac{\lvert y_{true}-y_{pred}\rvert}{y_{true}}$$
#
# There is a package called Scikit Learn which is a commonly used for machine learning and data science. This package contains many useful functions and algorithms. One of them is the metrics submodule where various types of metrics are available.
#
#
# +
from sklearn.metrics.regression import _check_reg_targets
def mape(y_true, y_pred, epsilon=1e-3):
"""
Mean absolute percentage error
This function already exists in newer versions of sklearn.
https://scikit-learn.org/dev/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, 'uniform_average')
# This is the important line
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
return np.average(mape)
# -
# That's not so good! Let's calculate mean absolute error:
mape(df_valid, forecast)
# Now let's try larger models by increasing order of AR. It looks at a longer term trend now.
model = AR(df_train)
model = model.fit(maxlag=7, trend='nc')
start = len(df_train)
end = len(df_train) + len(df_valid) - 1
forecast = model.predict(start, end)
fig = plt.figure()
ax = fig.gca()
df_train['target'].plot(ax=ax, legend=True, label="Train")
df_valid['target'].plot(ax=ax, legend=True, label="Actual")
forecast.plot(ax=ax, legend=True, label="Forecast")
mape(df_valid, forecast)
# Note that the MAPE is lower, meaning it is a better fit
# <div class="alert alert-success">
# <h2>Exercise</h2>
#
# Try a few other values yourself and see if you get a better/lower result than mape=0.4
#
# - try trend='nc', which makes it return to the mean.
# - try a great lag, which gives it more parameters
#
#
# Does it *look* better as well? Is MAPE capturing your intuition about a good fit?
#
#
# <details>
# <summary><b>→ Hints</b></summary>
#
# * try `model.fit(maxlag=30, trend='c')`
#
# </details>
#
# <br/>
# <br/>
# <details>
# <summary>
# <b>→ Solution</b>
# </summary>
#
# ```python
# model = AR(df_train)
# model = model.fit(maxlag=30, trend='nc')
# start = len(df_train)
# end = len(df_train) + len(df_valid) - 1
# forecast = model.predict(start, end)
# fig = plt.figure()
# ax = fig.gca()
# df_train['target'].plot(ax=ax, legend=True, label="Train")
# df_valid['target'].plot(ax=ax, legend=True, label="Actual")
# forecast.plot(ax=ax, legend=True, label="Forecast")
# mape(df_valid, forecast)
# ```
#
# </details>
#
# </div>
#
#
# # Prophet
# Prophet is a time series analysis and forecasting package developed by Facebook. Prophet allows you to train forecasting models with minimal need to adjust the models parameters. Prophet is particularly useful when you are dealing with data that has multiple levels of seasonality.
#
# Let's start by importing the library. The name of the package is `fbprophet`.
from fbprophet import Prophet
# +
# Load data
df = block0 = pd.read_csv("../../data/processed/smartmeter/block_0.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']]
# Get the mean over all houses, by day
df = df.groupby('day').mean()
# Rename energy to target
df = df.rename(columns={'energy_sum':'target'}).iloc[:-1]
n_split = -int(len(df)*0.85)
df_train = df[:-n_split]
df_valid = df[-n_split:]
ax = df_train['target'].plot(legend=True, label="Train")
df_valid['target'].plot(ax=ax, legend=True, label="Validation")
# df.plot()
# -
# Prophet needs the input data to be in a very specific format. The data needs to have a column containing daily dates called `"ds"`, and a column containing values named `"y"`. So we create a new data frame and use the required column names.
# +
df_trainp = pd.DataFrame({"ds": df_train.index, "y": df_train["target"]}).reset_index(drop=True)
df_trainp
df_validp = pd.DataFrame({"ds": df_valid.index, "y": df_valid["target"]}).reset_index(drop=True)
df_validp
# -
# Now the data is ready. We need to create a Prophet model and train it on the data.
# %%time
model = Prophet(holidays_prior_scale=0.01)
model.fit(df_trainp)
# And that's it! The model is trained and ready to be used.<br>
# Let's forecast the next year using the model. To forecast using Prophet we need to first create an empty dataframe for future values. This data frame contains the future dates. Then we feed this dataframe to `.predict()` and will get the forecasted values.
future = model.make_future_dataframe(periods=365)
future.head()
# __Note:__ as you can see this data frame has only future dates.
forecast = model.predict(future)
forecast.head()
# The result contains various components of the time series. The forecasted values can be found on `yhat` column. It is difficult to see how model has performed, so let's plot the results. We can do that using Prophets built-in plot function.
fig = model.plot(forecast)
fig.gca().plot(df_validp.ds, df_validp['y'], 'k.', c='r', label='validation')
plt.legend()
''
# As you can see at some periods the predictions are poor and at some points they are pretty close. Let's have a closer look at the future.
fig = model.plot(forecast)
fig.gca().plot(df_validp.ds, df_validp['y'], 'k.', c='r', label='validation')
plt.xlim(pd.to_datetime(["2013-06-15", "2013-08-15"]))
plt.ylim([10, 24])
# The model has found annual and weekly seasonalities. We can have closer look at these components using `.plot_components()`
model.plot_components(forecast)
1
# Now we can see which days of the week are associated with more energy consumption (it's not suprising to see Saturday and Sunday) and also how time of the year affects the energy consumption.
# ## Cross_validation
# We created a model and forecasted the future. But we still don't know how good the model is.
#
# So like before we need a training and a validation set. We train a model on a training set, and then measure the accuracy of its prediction on validation set using metrics.
#
#
# One issue with this approach is that even when we get a value for prediction accuracy of a model, how do we know this value is reliable. Let's say we are comparing two models and mean absolute error for model A is 0.5 and for model B is 0.45. How do we know that B is better than A and it didn't just get lucky over this data set?
#
# One way to ensure which one is better is by comparing them over multiple sections of data sets. This approach is called `cross validation`. In Prophet, we start by training the model over the data from the beginning up to a certain point (cut-off point) and then predict for a few time steps (Horizon). Then we move the cut-off point by a certain period and repeat the process. We can then calculate the metrics for each model over multiple sections of the data and have a better comparison at the end.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/b/b5/K-fold_cross_validation_EN.svg"/>
# You need to specify the following inputs:
# - initial: The initial length of training set.
# - period: How much the cut-off point is moved after each training process.
# - horizon: Length of forecasting period for which the metrics are calculcated.
#
# +
from fbprophet.diagnostics import cross_validation
# Cross validation
cv = cross_validation(model, initial="365 days", period="90 days", horizon="30 days")
cv.head()
# -
# The cross validation data frame shows the forecasted value (yhat) and its confidence range (yhat_upper and yhat_lower). We can use `performance_metrics` function to calculate the metrics.
# +
from fbprophet.diagnostics import performance_metrics
perf = performance_metrics(cv)
perf.index = pd.Index(perf.horizon.dt.days, name='days')
perf
# -
# The dataframe above has multiple metrics for model's predictions.
#
# <font color = red>__Note:__ In some versions of Prophet all the result is aggregated based on how far forecasted point is from cut-off point. If this is not the case, then you will see horizon column has repeated values (for instance multiple "10 days" entries) and you will need to use groupby.</font>
# +
# uncomment and run if performance metrics are not aggregated based on horizon days
# perf = perf.groupby('horizon').mean()
# perf
# -
# Before running the next cell look at the performance data frame and find the first and last horizon days and enter it in the next cell as `start` and `end`.
perf[['mape']][:-1].plot(ylim=[0, 0.3])
plt.title("Mean Absolute Percent Error of forecasts")
# This plot shows the further we are from the cut-off point the larger the error is, which is what we expect. Now, let's compare this model with another one.<br>
#
# ## Holidays
#
# Prophet has the ability to include the effect of holidays on the time series as well. Let's see whether adding public holidays to the model will make it any better.
holiday_df = pd.read_csv(
"../../data/processed/smartmeter/uk_bank_holidays.csv",
names=("ds", "holiday"),
header=0,
)
holiday_df.head()
# +
model2 = Prophet(holidays=holiday_df)
model2.fit(df_trainp)
# Cross validation
cv2 = cross_validation(model2, initial="365 days", period="90 days", horizon="30 days")
perf2 = performance_metrics(cv2)
perf2.index = pd.Index(perf2.horizon.dt.days, name='days')
perf2
# +
# uncomment and run if performance metrics are not aggregated based on horizon days
# perf2 = perf2.groupby('horizon').mean()
# perf2
# -
# Now let's compare the models.
ax=plt.gca()
perf['mape'][:-1].plot(ax=ax, label="+ holidays")
perf2['mape'][:-1].plot(ax=ax, label="- holidays")
plt.title("Mean Absolute Percent Error of forecasts")
plt.ylabel("mape")
plt.legend()
# It seems adding holidays slightly lowered the error for the first couple of weeks.
# We can separately plot the models including the errors for all the horizons.
# +
from fbprophet.plot import plot_cross_validation_metric
fig = plot_cross_validation_metric(cv, metric="mape")
plt.ylim(0, 1)
plt.xlim(0, 27)
# -
# ## Trends
# One interesting feature of Prophet is that it can identify when the trend of the data is changing. We can add these change points to the plot as well.
# +
from fbprophet.plot import add_changepoints_to_plot
model = Prophet()
model.fit(df_trainp)
future = model.make_future_dataframe(periods=len(df_validp))
forecast = model.predict(future)
fig = model.plot(forecast)
ax = fig.gca()
a = add_changepoints_to_plot(ax, model, forecast)
ax.plot(df_validp.ds, df_validp['y'], 'k.', c='r', label='validation')
# -
# We can change the sensitivity of the model to the changes by setting `changepoint_prior_scale`.
model = Prophet(
changepoint_range=0.90,
changepoint_prior_scale=0.2,
)
model.fit(df_trainp)
future = model.make_future_dataframe(periods=len(df_validp))
forecast = model.predict(future)
fig = model.plot(forecast)
ax = fig.gca()
a = add_changepoints_to_plot(ax, model, forecast)
ax.plot(df_validp.ds, df_validp['y'], 'k.', c='r', label='validation')
# Prophet has many other parameters you can set to improve your model, including seasonality, growth type, etc. You can find more information about Facebook Prophet [here](https://facebook.github.io/prophet/docs/diagnostics.html).
# # Exercise
# Now that we have learned about various time series forecasting techniques, try to apply some of these techniques to another block of houses from electricity usage.
# <div class="alert alert-success">
# <h2>Exercise</h2>
#
# Now that we have learned about various time series forecasting techniques, try to apply some of these techniques to another block of houses from electricity usage.
#
# ```python
# # Load data
# df = block1 = pd.read_csv("../../data/processed/smartmeter/block_1.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']]
# # Get the mean over all houses, by day
# df = df.groupby('day').mean()
# # Rename energy to target
# df = df.rename(columns={'energy_sum':'target'}).iloc[:-1]
#
# n_split = -int(len(df)*0.85)
# df_train = df[:-n_split]
# df_valid = df[-n_split:]
#
# df_trainp = pd.DataFrame({"ds": df_train.index, "y": df_train["target"]}).reset_index(drop=True)
# df_validp = pd.DataFrame({"ds": df_valid.index, "y": df_valid["target"]}).reset_index(drop=True)
#
# # COPY PREVIOUS CELL HERE (And change parameters)
# ```
#
#
# <details>
# <summary><b>→ Hints</b></summary>
#
# * Copy the cell above, and enter the new dataframe
# * Perhaps try `Prophet( changepoint_range=0.90, changepoint_prior_scale=0.2, holidays=holiday_df,)`
#
# </details>
#
# <br/>
# <br/>
# <details>
# <summary>
# <b>→ Solution</b>
# </summary>
#
# ```python
# # Load data
# df = block1 = pd.read_csv("../../data/processed/smartmeter/block_1.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']]
# # Get the mean over all houses, by day
# df = df.groupby('day').mean()
# # Rename energy to target
# df = df.rename(columns={'energy_sum':'target'}).iloc[:-1]
#
# n_split = -int(len(df)*0.85)
# df_train = df[:-n_split]
# df_valid = df[-n_split:]
#
# df_trainp = pd.DataFrame({"ds": df_train.index, "y": df_train["target"]}).reset_index(drop=True)
# df_validp = pd.DataFrame({"ds": df_valid.index, "y": df_valid["target"]}).reset_index(drop=True)
#
# # help(Prophet)
# model = Prophet(
# changepoint_range=0.90,
# changepoint_prior_scale=0.2,
# holidays=holiday_df,
# )
# model.fit(df_trainp)
# future = model.make_future_dataframe(periods=len(df_validp))
# forecast = model.predict(future)
# fig = model.plot(forecast)
# ax = fig.gca()
# a = add_changepoints_to_plot(ax, model, forecast)
# ax.plot(df_validp.ds, df_validp['y'], 'k.', c='r', label='validation')
# ```
#
# </details>
#
# </div>
# +
# Load data
df = block1 = pd.read_csv("../../data/processed/smartmeter/block_1.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']]
# Get the mean over all houses, by day
df = df.groupby('day').mean()
# Rename energy to target
df = df.rename(columns={'energy_sum':'target'}).iloc[:-1]
n_split = -int(len(df)*0.85)
df_train = df[:-n_split]
df_valid = df[-n_split:]
df_trainp = pd.DataFrame({"ds": df_train.index, "y": df_train["target"]}).reset_index(drop=True)
df_validp = pd.DataFrame({"ds": df_valid.index, "y": df_valid["target"]}).reset_index(drop=True)
# COPY PREVIOUS CELL HERE (And change parameters)
# -
# # (Advanced) Custom Seasonality
#
# This library is made by facebook for tracking user trends. That means it is set up for growing tends to do with humans, with holidays and weekly seasonality. What if we have data that has a differen't seasonality?
#
# Here we use current speed from the [IMOS - Australian National Mooring Network (ANMN) Facility - Current velocity time-series](https://catalogue-imos.aodn.org.au/geonetwork/srv/api/records/ae86e2f5-eaaf-459e-a405-e654d85adb9c). We will use tidal periods related to the Sun and Moon instead of human calender periods related to Weeks and Holidays.
# +
# from https://catalogue-imos.aodn.org.au/geonetwork/srv/api/records/ae86e2f5-eaaf-459e-a405-e654d85adb9c
import xarray as xr
xd = xr.open_dataset("../../data/processed/IMOS_ANMN/IMOS_ANMN-WA_AETVZ_20111221T060300Z_WATR20_FV01_WATR20-1112-Continental-194_END-20120704T050500Z_C-20200916T043212Z.nc")
name='CSPD'
df = xd.isel(HEIGHT_ABOVE_SENSOR=0)['CSPD'].isel(TIME=slice(0, -1000)).to_dataframe()[['CSPD']]
# Take the log, and smooth it by resampling to 4 hours
df['CSPD'] = np.log(df['CSPD'])
df = df.resample('4H').mean()
# Format for prophet
df = pd.DataFrame({"ds": df.index, "y": df['CSPD']})
# Split
n_split = -int(len(df)*0.7)
df_trainp = df[:-n_split]
df_validp = df[-n_split:]
ax = df_trainp['y'].plot(legend=True, label="Train")
df_validp['y'].plot(ax=ax, legend=True, label="Validation")
plt.ylabel('Current Speed')
# +
# %%time
# First let's try it with the default calender/holiday seasonalities
model = Prophet()
model.fit(df_trainp)
forecast = model.predict(df_validp)
forecast.index = forecast.ds
fig = model.plot(forecast)
a = add_changepoints_to_plot(plt.gca(), model, forecast)
fig.gca().plot(df_validp.index, df_validp['y'], 'k.', c='r', label='validation')
plt.show()
''
# -
# %%time
# Cross validation
cv = cross_validation(model, horizon="7 days", period="4 days", initial="60 days", parallel='threads')
perf = performance_metrics(cv)
perf.index = pd.Index(perf.horizon.dt.days, name='days')
print('mape', perf.mape.mean())
# perf.mape.plot()
# perf
model.plot_components(forecast)
1
# This is tidal data, and the default (daily, weeklly, yearly) seasonalities don't capture the dominant monthly seasonality in the tides. Lets add tidal frequencies and see if it does better.
#
# Also not that we have made growth flat, since tides tend to return to the mean.
# +
# %%time
model = Prophet(
growth='flat', # Recent addition https://github.com/facebook/prophet/pull/1466
# Disable default seasons
yearly_seasonality=False,
holidays=None,
daily_seasonality=False,
weekly_seasonality=False,
holidays_prior_scale=0.001,
)
# Add periods from the theory of tides https://en.wikipedia.org/wiki/Theory_of_tides (additive)
# Period is in days
# Fourier order is how many fourier functions can be used, higher is more complex and unstable
# Short
# model.add_seasonality(name='M4', period=6.21/24, fourier_order=1)
# model.add_seasonality(name='M6', period=4.14/24, fourier_order=1)
# model.add_seasonality(name='M6', period=8.17/24, fourier_order=1)
# Semi-diurnal
model.add_seasonality(name='M2', period=12.4206012/24, fourier_order=1)
model.add_seasonality(name='S2', period=12/24, fourier_order=1)
# model.add_seasonality(name='K2', period=12.65834751/24, fourier_order=1)
# diurnal
model.add_seasonality(name='K1', period=23.93447213/24, fourier_order=2)
# model.add_seasonality(name='O1', period=25.81933871/24, fourier_order=2)
# Monthly and higher
model.add_seasonality(name='Mm', period=27.554631896, fourier_order=2)
# model.add_seasonality(name='quarterly', period=91.25, fourier_order=5)
# model.add_seasonality(name='Ssa', period=182.628180208, fourier_order=1)
# model.add_seasonality(name='Sa', period=365.256360417, fourier_order=1)
model.fit(df_trainp)
forecast = model.predict(df_validp)
forecast.index = forecast.ds
fig = model.plot(forecast)
fig.gca().plot(df_validp.index, df_validp['y'], 'k.', c='r', label='validation')
a = add_changepoints_to_plot(plt.gca(), model, forecast)
plt.show()
''
# -
# Cross validation
cv = cross_validation(model, horizon="7 days", period="4 days", initial="60 days", parallel='threads')
perf = performance_metrics(cv)
perf.index = | pd.Index(perf.horizon.dt.days, name='days') | pandas.Index |
#!/usr/bin/env python3
import os
import json
import h5py
import argparse
import pandas as pd
import numpy as np
import tinydb as db
from tinydb.storages import MemoryStorage
from pprint import pprint
import matplotlib.pyplot as plt
plt.style.use('../clint.mpl')
from matplotlib.colors import LogNorm
from pygama import DataGroup
import pygama.io.lh5 as lh5
import pygama.analysis.histograms as pgh
import pygama.analysis.peak_fitting as pgf
def main():
doc="""
analysis of Aug 2020 OPPI+CAGE commissioning runs (138-141)
tasks:
- load calibration from energy_cal
- show 1460 peak stability
- show removal of low-e retrigger noise
- look at waveforms near 5 MeV, confirm they're muon-related
- look at low-e waveforms, examine noise
- determine pz correction value
"""
rthf = argparse.RawTextHelpFormatter
par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
arg, st, sf = par.add_argument, 'store_true', 'store_false'
arg('-q', '--query', nargs=1, type=str,
help="select file group to calibrate: -q 'run==1' ")
args = par.parse_args()
# load main DataGroup, select files from cmd line
dg = DataGroup('cage.json', load=True)
if args.query:
que = args.query[0]
dg.fileDB.query(que, inplace=True)
else:
dg.fileDB = dg.fileDB[-1:]
view_cols = ['runtype', 'run', 'cycle', 'startTime', 'runtime', 'threshold']
print(dg.fileDB[view_cols])
# -- run routines --
# show_raw_spectrum(dg)
# show_cal_spectrum(dg)
# show_wfs(dg)
# data_cleaning(dg)
# peak_drift(dg)
# pole_zero(dg)
label_alpha_runs(dg)
def show_raw_spectrum(dg):
"""
show spectrum w/ onbd energy and trapE
- get calibration constants for onbd energy and 'trapE' energy
- TODO: fit each expected peak and get resolution vs energy
"""
# get file list and load energy data (numpy array)
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
dsp_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
edata = lh5.load_nda(dsp_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/dsp')
rt_min = dg.fileDB['runtime'].sum()
u_start = dg.fileDB.iloc[0]['startTime']
t_start = pd.to_datetime(u_start, unit='s') # str
print('Found energy data:', [(et, len(ev)) for et, ev in edata.items()])
print(f'Runtime (min): {rt_min:.2f}')
elo, ehi, epb, etype = 0, 25000, 10, 'trapEmax'
ene_uncal = edata[etype]
hist, bins, _ = pgh.get_hist(ene_uncal, range=(elo, ehi), dx=epb)
# normalize by runtime
hist_rt = np.divide(hist, rt_min * 60)
plt.plot(np.nan, np.nan, '-w', lw=1, label=t_start)
plt.semilogy(bins[1:], hist_rt, ds='steps', c='b', lw=1,
label=f'{etype}, {rt_min:.2f} mins')
plt.xlabel(etype, ha='right', x=1)
plt.ylabel('cts / sec', ha='right', y=1)
plt.legend()
plt.tight_layout()
plt.show()
def show_cal_spectrum(dg):
"""
apply calibration to dsp file
"""
# get file list and load energy data (numpy array)
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
dsp_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
edata = lh5.load_nda(dsp_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/dsp')
rt_min = dg.fileDB['runtime'].sum()
u_start = dg.fileDB.iloc[0]['startTime']
t_start = | pd.to_datetime(u_start, unit='s') | pandas.to_datetime |
import time
import datetime
import numpy as np
import pandas as pd
import random
import re
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor
from nltk.stem.snowball import SnowballStemmer
random.seed(1729)
start_time = time.time()
date = str(datetime.datetime.now().strftime(format='%m%d'))
print("::Start time- ", datetime.datetime.now())
snowball = SnowballStemmer('english')
print('### Importing...%s minutes ###' % (round((time.time() - start_time) / 60, 2)))
train = pd.read_csv('input/train.csv', encoding="ISO-8859-1")#[:1000]
test = | pd.read_csv('input/test.csv', encoding="ISO-8859-1") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 19:51:21 2018
@author: Bob
"""
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sqlalchemy import create_engine
from config import config
import pandas as pd
import numpy as np
import unidecode
import psycopg2
import re
import click
from tqdm import tqdm
from mpproj.routefinder.StyleInformation import *
def MPAnalyzer():
'''Finishes cleaning routes using formulas that require information about
the whole database.
The Bayesian rating system, route clustering algorithm and calculation of
TFIDF values require information about all routes, and not just one that is
of interest. Therefore, this file must be run after all data collection
has finished. This function is a handler for six functions:
- bayesian_rating: Calculates the weighted quality rating for each
route
- route_clusters: Groups routes together based on geographic distance
- idf: Calculates inverse-document-frequency for words in the route
descriptions
- tfidf: Calclates term-frequency-inverse-document-frequency for words
in route descriptions
- normalize: Normalizes vectors for TFIDF values
- find_route_styles: Compares routes to the ideal to help categorize
Returns:
Updated SQL Database
'''
print('Connecting to the PostgreSQL database...', end='')
engine = create_engine(
'postgresql+psycopg2://postgres:postgres@localhost:5432/routes')
params = config.config()
conn = psycopg2.connect(**params)
cursor = conn.cursor()
print('Connected')
tqdm.pandas()
def tfidf(min_occur=0.001, max_occur=0.9):
''' Calculates Term-Frequency-Inverse-Document-Frequency for a body of
documents.
Term-Frequency-Inverse-Document-Frequency(TFIDF) is a measure of the
importance of words in a body of work measured by how well they help to
distinguish documents. Words that appear frequently in documents score
high on the Term-Frequency metric, but if they are common across the
corpus, they will have low Inverse-Document-Frequency scores. TFIDF
can then be used to compare documents to each other, or, in this case,
to documents with known topics.
TFIDF = TF * IDF
TF = Term Frequency
IDF = Inverse Document Frequency
Args:
min_occur(int): The minimum number of documents that a word has to
appear in to be counted. Included to ignore words that only
appear in a few documents, and are therefore not very useful
for categorization.
max_occur(int): The maximum number of documents that a word can
appear in to be counted. This is included to ignore highly
common words that don't help with categorization.
Returns:
routes(pandas Dataframe): Holds route-document information,
including term-frequency, inverse-document-frequency, TFIDF,
and normalized TFIDF values
Updated SQL Database: Updates the TFIDF table on main DB with the
routes dataframe
'''
print('Getting number of routes', end=' ', flush=True)
cursor.execute('SELECT COUNT(route_id) FROM Routes')
num_docs = cursor.fetchone()[0]
print(num_docs)
print('Getting route text data', flush=True)
min_occur *= num_docs
max_occur *= num_docs
query = 'SELECT route_id, word, tf FROM Words'
routes = pd.read_sql(query, con=conn, index_col='route_id')
print('Removing non-essential words.', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
weed_out,
min_occur=min_occur,
max_occur=max_occur)\
.set_index('route_id')
print('Getting IDF', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
idf,
num_docs=num_docs).set_index('route_id')
print('Calculating TFIDF', flush=True)
routes['tfidf'] = routes['tf'] * routes['idf']
print('Normalizing TFIDF values', flush=True)
routes = routes.groupby(routes.index, group_keys=False)
routes = routes.progress_apply(lambda x: normalize('tfidf', table=x))
print('Writing TFIDF scores to SQL', flush=True)
routes = routes.set_index('route_id')
routes = routes[['word', 'idf', 'tfidfn']]
# This will take a long time
routes.to_sql('TFIDF', con=engine, if_exists='replace', chunksize=1000)
def weed_out(table, min_occur, max_occur):
'''Removes words that are too common or too rare
Args:
table(Series): Instances of a word
min_occur: Fewest number acceptable
max_occur: Greatest number acceptable
Returns:
table: updated series'''
if min_occur < len(table) < max_occur:
return table.reset_index()
def idf(word, num_docs):
''' Finds inverse document frequency for each word in the selected
corpus.
Inverse document frequency(IDF) is a measure of how often a word
appears in a body of documents. The value is calculated by:
IDF = 1 + log(N / dfj)
N = Total number of documents in the corpus
dfj = Document frequency of a certain word, i.e., the number of
documents that the word appears in.
Args:
word(pandas dataframe): A dataframe composed of all instances of a
word in a corpus.
num_docs(int): The total number of documents in the corpus
Returns:
word(pandas dataframe): The same document with the calculated IDF
score appended.
'''
word['idf'] = 1 + np.log(num_docs / len(word))
return word.reset_index()
def normalize(*columns, table, inplace=False):
''' Normalizes vector length.
Vector values must be normalized to a unit vector to control for
differences in length. This process is done by calculating the length
of a vector and dividing each term by that value. The resulting
'unit-vector' will have a length of 1.
Args:
table(pandas dataframe): Table hosting vector to be normalized
*columns(str): Names of columns to be normalized
inplace(Boolean, default = False):
If inplace=False, adds new columns with normalized values.
If inplace=True, replaces the columns.
Returns:
table(pandas dataframe): Updated dataframe with normalized values.
'''
for column in columns:
if not inplace:
column_name = column + 'n'
elif inplace:
column_name = column
length = np.sqrt(np.sum(table[column] ** 2))
table[column_name] = table[column] / length
return table.reset_index()
def fill_null_loc():
"""Fills empty route location data.
Not all routes have latitude and longitude coordinates, so we must use
the coordinates of their parent area instead as a rough estimate. This
function first grabs all routes with no data, then fills in the data
with the lowest level area it can, going up as many areas as needed
until it finds one with proper coordinates.
Returns:
Updated SQL Database
"""
print('Filling in empty locations', flush=True)
# Select a route without location data
cursor.execute('''
SELECT route_id, area_id, name FROM Routes
WHERE latitude is Null OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
while route is not None:
# Route ID
rid = route[0]
# From ID
fid = route[1]
name = route[2]
print(f'Finding location information for {name}')
# Loops until it finds proper data
lat, long = None, None
while lat == None or long == None:
# Gets latitude and longitude from parent area
cursor.execute(f'''
SELECT
latitude,
longitude,
from_id
FROM Areas
WHERE id = {fid}
LIMIT 1''')
loc = cursor.fetchone()
lat, long = loc[0], loc[1]
fid = loc[2]
# Updates DB
cursor.execute(f'''
UPDATE Routes
SET
latitude = {lat},
longitude = {long}
WHERE route_id = {rid}''')
conn.commit()
cursor.execute('''
SELECT
route_id,
area_id,
name
FROM Routes
WHERE
latitude is Null
OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
def route_clusters(routes):
''' Clusters routes into area groups that are close enough to travel
between when finding climbing areas.
Routes can be sorted into any number of sub-areas below the 'region'
parent. By clustering the routes based on latitude and longitude
instead of the name of the areas and parent areas, the sorting
algorithm will be able to more accurately determine which routes are
close together. This function uses SciKit's Density Based Scan
clustering algorithm. The algorithm works by grouping points together
in space based on upper-limits of distance and minimum numbers of
members of a cluster. More generally, the algorithm first finds the
epsilon neighborhood of a point. This is the set of all points whose
distance from a given point is less than a specified value epsilon.
Then, it finds the connected core-points, which are the points that
have at least the minimum number of connected points in its
neighborhood. Non-core points are ignored here. Finally, the
algorithm assigns each non-core point to a nearby cluster if is within
epsilon, or assigns it to noise if it is not.
The advantages of this is that the scan clusters data of any shape, has
a robust response to outliers and noise, and that the epsilon and min
points variables can be adjusted.
This function returns the label/name for the cluster that a route
appears in, as well as the number of other routes in that same cluster.
This will allow the sorting algorithm to more heavily weight routes
that are clustered near others.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifies
- latitude (float)
- longitude (float)
Returns:
routes(pandas df): Updated with clustered area group number:
- route_id (int, unique): Unique route identifies
- area_group (int): Cluster id
'''
# Route location
lats = routes['latitude']
longs = routes['longitude']
locs = []
for x in range(len(lats)):
locs.append((lats.iloc[x], longs.iloc[x]))
# Converted into df
locs = StandardScaler().fit_transform(locs)
# Max distance in latitude
epsilon = 0.0007
# Min number of routes in a cluster
min_routes = 3
# Distance baced scan
db = DBSCAN(eps=epsilon, min_samples=min_routes).fit(locs)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
# Cluster names
labels = db.labels_
unique, counts = np.unique(labels, return_counts=True)
counts = dict(zip(unique, counts))
# Number of routes in the same cluster as a given route
area_counts = []
for label in labels:
if label >= 0:
# Counts number of routes
area_counts.append(counts[label])
# Areas are given a cluster id of -1 if the are not part of a
# cluster
elif label == -1:
# If so, there is only 1 route in their 'cluster'
area_counts.append(1)
routes['area_group'] = labels
routes['area_counts'] = area_counts
routes = routes[['area_group', 'area_counts']]
return routes
def bayesian_rating(routes):
''' Updates route quality with weighted average.
The Bayesian average rating system helps to mitigate the effects of
user ratings for routes that only have a few reviews. The weighted
rating works by first finding the average rating for all routes, and
using that to bring low-rated routes up and high-rated routes down.
The result - the Bayes rating - is an updated rating weighted by the
average number of stars across all routes. The weight decreases
according to the number of votes cast.
Bayesian rating = (r * v) + (a * 10) / (v + 10)
r = Route rating
v = Number of votes
a = Average rating across all routes
Essentially, the function gives each route phantom-users who all give
the route the average score. For routes with a high number of ratings
the effect of the additional phantom users is minimal, but for routes
with only one or two actual user ratings, the effect is large. This
keeps 4-star rated routes from dominating the sorting algorithm if they
only have a few votes, and helps promote unrated routes that may be of
high quality.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifiers
- stars (float): Raw average rating
- votes (int): Number of user ratings
Returns:
routes(pandas df): Updated dataframe with Bayes rating and columns:
- route_id (int, unique): Unique route identifies
- bayes (float): Weighted average rating
'''
# Average rating of all routes
stars = pd.read_sql('SELECT stars FROM Routes', con=conn)
avg_stars = np.mean(stars)['stars']
# Weighted Bayesian rating
routes['bayes'] = round((((routes['votes'] * routes['stars'])
+ avg_stars * 10) / (routes['votes'] + 10)), 1)
return routes['bayes'].to_frame()
def find_route_styles(*styles, path='Descriptions/'):
''' Returns weighted scores that represent a route's likelihood of
containing any of a series of features, e.g., a roof, arete, or crack.
Route names, descriptions, and user comments can indicate the presence
of rock and route features. Term-Frequency-Inverse-Document-Frequency
(TFIDF) values for the blocks of text gathered for each route can be
compared to 'archetypal' routes to glean insight into these features.
This comparison is further clarified using Bayesian statistics to
measure the credibility of the comparision, and is then adjusted to
reflect that. At present, each route is compared against archetypal
routes with the following features:
Aretes - A sharp vertical edge of a block, cliff or boulder
Chimney - A large vertical crack that a climber can fit in and
climb using opposing pressure
Crack - Smaller cracks ranging from finger-sized to a few inches
wide (off-width)
Slab - Low-angle rock faces (less than vertical)
Overhang - Roofs, caves or more-than-vertical rock faces
More styles or archetypes can be added in the future by creating .txt
files and adding them to the 'Descriptions' sub-folder, then adding the
style to the *styles argument.
Args:
*styles(str): The name of the files that each route will be
compared against.
path(str): Folder location of the Database
Returns:
Updated SQL Database with weighted route scores
'''
def text_splitter(text):
'''Splits text into words and removes punctuation.
Once the text has been scraped it must be split into individual
words for further processing. The text is all put in lowercase,
then stripped of punctuation and accented letters. Tokenizing helps
to further standardize the text, then converts it to a list of
words. Each word is then stemmed using a Porter stemmer. This
removes suffixes that make similar words look different, turning,
for example, 'walking' or 'walked' into 'walk'. Stop words are
also filtered out at this stage.
Args:
text(str): Single string of text to be handled
Returns:
text(list): List of processed words.'''
# Converts to lowercase
text = text.lower()
# Strips punctuation and converts accented characters to unaccented
text = re.sub(r"[^\w\s]", '', text)
text = unidecode.unidecode(text)
# Tokenizes words and returns a list
text = word_tokenize(text)
# Remove stopwords
stop_words = set(stopwords.words('english'))
# Stems each word in the list
ps = PorterStemmer()
text = [ps.stem(word) for word in text if word not in stop_words]
return text
def archetypal_tf(*styles, path):
''' Returns term-frequency data for descriptions of archetypal
climbing routes and styles. This will be used later to categorize
routes.
Term-Frequency = t / L
t = Number of appearances for a word in a document
L = Number of total words in the document
Args:
*styles(str): Name of .txt file to parse. Can either be the
plain name or have the .txt suffix
path(str): Path to folder with route descriptions
Returns:
tf.csv(CSV File): CSV File of term frequency for each style.
This will help determine if TF values are what is expected
when adding new styles.
archetypes(Pandas Dataframe): Holds words term-frequency values
for words in the files.'''
# Initializes Dataframe
archetypes = pd.DataFrame()
for style in styles:
# Formats suffix
if style.endswith('.txt'):
# Opens .txt file
try:
file = open(path + style)
style = style[:-4]
# Returns errors
except OSError as e:
return e
else:
try:
file = open(path + style + '.txt')
except OSError as e:
return e
# Creates single block of text
text = ''
for line in file:
text += line
# Splits and processes text
text = text_splitter(text)
# Length of document in words
length = len(text)
# Counts appearances of each word
text = pd.DataFrame({'word': text})['word']\
.value_counts()\
.rename('counts')\
.to_frame()
# Calculates Term-Frequency
text[style] = text['counts'].values / length
text = text[style]
# Creates master Dataframe of Termfrequency data for each style
archetypes = pd.concat([archetypes, text], axis=1, sort=True)
archetypes.to_csv(path + 'TF.csv')
return archetypes
def archetypal_idf(words):
''' Findes inverse document frequency (IDF) for each word in the
archetypal style documents.
The archetypal documents should not be included in the calculation
of IDF values, so this function just pulls the IDF values from the
database after they are calculated. IDF is a measure of how often a
word appears in a body of documents. The value is calculated by:
IDF = 1 + log(N / dfj)
N = Total number of documents in the corpus
dfj = Document frequency of a certain word, i.e., the number
of documents that the word appears in.
Args:
word(list): All unique words in all the archetype documents
Returns:
archetypes(pandas dataframe): IDF values for each word pulled
from the Database.'''
# Formats query to include list of unique words
query = f'''
SELECT
DISTINCT(word),
idf
FROM "TFIDF"
WHERE word IN {words}'''
# Pulls SQL data into Pandas dataframe
archetypes = pd.read_sql(query, con=conn, index_col='word')
return archetypes
def get_routes(route_ids=None):
'''Creates Pandas Dataframe of normalized TFIDF values for each
word in each route description.
Args:
route_ids: Optional. Allows for a slice to be parsed.
Returns:
routes(Pandas Series): MultiIndex series with indexes
'route_id' and 'word' and column 'tfidfn' - Normalized TFIDF'''
# Pulls route_id, word, and normalized TFIDF value
if route_ids is None:
query = '''
SELECT
route_id,
word,
tfidfn
FROM "TFIDF"'''
else:
route_ids = tuple(route_ids)
query = f'''
SELECT
route_id,
word,
tfidfn
FROM "TFIDF"
WHERE route_id in {route_ids}'''
# Creates Pandas Dataframe
routes = pd.read_sql(
query,
con=engine,
index_col=['route_id', 'word'])
routes = routes.squeeze()
return routes
def get_word_count(route_ids=None):
'''Finds length of route description in words.
Args:
route_ids: Optional. Allows for a slice to be parsed
Returns:
word_count(Pandas dataframe): Dataframe with index route_id and
column 'word_count' - length of a route description in
words'''
# Pulls route_id and word_count for each route
if route_ids is None:
query = 'SELECT route_id, word_count FROM Words'
else:
route_ids = tuple(route_ids)
query = f'''
SELECT
route_id,
word_count
FROM Words
WHERE route_id in {route_ids}'''
# Calculates document length
word_count = pd.read_sql(query,
con=conn,
index_col='route_id').groupby(level=0)
# We will take the log of the word count later, so we cannot leave
# zeroes in the series
word_count = word_count.progress_apply(lambda x: np.sum(x) + 0.01)
word_count.fillna(0.01, inplace=True)
return word_count
def cosine_similarity(route, archetypes):
'''Compares routes to archetypes to help categorize route style.
Cosine similarity is the angle between two vectors. Here, the
normalized TFIDF values for each word in the route description and
archetype documents serve as the coordinates of the vector. Finding
the cosine similarity is therefore simply their dot-product.
Cosine Similarity = Σ(ai * bi)
ai = TFIDF for a word in the route description
bi = TFIDF for the same word in the archetype document.
The similarity will range between 0 and 1, 1 being identical and 0
having no similarity.
Args:
route(Pandas dataframe): MultiIndex frame with indexes route_id
and word and columns normalized TFDIF values
archetypes(Pandas dataframe): Frame with index word and columns
normalized TFIDF values.
Returns:
terrain(Pandas dataframe): Frame with columns for each style,
holding cosine simlarity values.'''
try:
rid = route.index[0][0]
except:
return
route = archetypes.multiply(route, axis=0)
terrain = pd.DataFrame(index=[rid])
for column in route:
cosine_sim = np.sum(route[column])
terrain[column] = cosine_sim
return terrain
def score_routes(*styles, word_count, path, routes):
'''Gets TF, IDF data for archetypes, then finds TFIDF and cosine
similarity for each route/style combination.
Finding the raw cosine similarity scores requires the functions
archetypal_tf, archetypal_idf, and normalize. This function helps
organize the retrieval and processing of the data for those functions.
Args:
word_count(Pandas dataframe): Dataframe with index route_id and
column 'word_count' - length of a route description in
words
Returns:
TFIDF.csv(CSV file): TFIDF for each word in each style. This
helps users determine if the TFIDF values are what they
would expect when adding new styles.
routes(Pandas dataframe): Holds cosine similarity for each
route/style combination'''
if click.confirm('Rescore archetypes?'):
# Gets Term-Frequency data for words in archetype documents
archetypes = archetypal_tf(*styles, path=path)
# Gets list of unique words in archetype documents
words = tuple(archetypes.index.tolist())
# Gets IDF Values for those words from the Database
idf = archetypal_idf(words)
# Selects words for archetype documents that have a correpsonding
# IDF value in the database
archetypes = archetypes[archetypes.index.isin(idf.index)]
# Multiplies TF by IDF values to get TFIDF score
archetypes = archetypes.mul(idf['idf'], axis=0)
# Normalizes TFIDF scores
archetypes = normalize(
table=archetypes,
inplace=True,
*styles)
archetypes = archetypes.rename(
columns={'index': 'word'}
).set_index('word')
# Writes to CSV
archetypes.to_csv(path + 'TFIDF.csv')
archetypes = pd.read_csv(path + 'TFIDF.csv', index_col='word')
# Groups words by route_id, then finds cosine similarity for each
# route-style combination
routes = routes.groupby('route_id').progress_apply(
cosine_similarity,
archetypes=archetypes)
# Reformats routes dataframe
routes.index = routes.index.droplevel(1)
routes = pd.concat([routes, word_count], axis=1, sort=False)
routes.fillna(0, inplace=True)
return routes
def weighted_scores(*styles, table, inplace=False):
'''Weights cosine similarity based on credibility.
The cosine similarity between a route and a style archetype
measures how close the two documents are. Depending on the score
and the word count of the route, however, this score can be more or
less believable. Using Bayesian statistics helps weight the scores
based on the credibility.
We can plot word count and cosine similarity in two dimensions.
Normalizing each so that the maximum value is one results in a
plane with four edge cases:
cosine similarity | word count
0 0
1 0
0 1
1 1
When both word count and cosine similarity is high, the
believability of the cosine score is at its highest. This is
analagous to a route that scores well with the 'overhang' document,
therefore mentioning words like 'overhang' or 'roof' frequently,
that also has a lot of words.
If the word count is high and the cosine similarity is low the
believability of the score is high, but not as high as before.
This is analagous to a route that never mentions words associated
with 'overhang' despite a high word count. We can be reasonably
sure in this case that the route does not have an overhang.
If the word count of a route is low but the cosine score is high,
we can be reasonably sure that the score is somewhat accurate. This
is a result of a route called, for instance, 'Overhang Route'.
Despite the low word count, it is highly likely that the route has
an overhang on it.
Finally, for routes that have both low word count and cosine score,
we have no way to be sure of the presence (or absence) of a
feature. In this case, our best guess is that the route is at
chance of featuring a given style of climbing.
If we chart word count, cosine similarity, and the credibility of
the cosine score, we are left with a cone with a point at the
origin, reaching up at a 45 degree angle along the credibility (z)
axis. Each route will exist somewhere on the surface of the cone.
To make use of this, we need to calculate this position. The height
to the cone gives us the credibility, and can be calculated with:
Credibility = sqrt(W ** 2 + C ** 2) * tan(45 degrees)
Since tan(45 degrees) is 1, this simplifies to:
Credibility = sqrt(W ** 2 + C ** 2)
W = Word count
C = Cosine similarity
The credibility of a route's score can be fed back into the score
to find a weighted route score. As the word count and cosine score
get close to zero, the average score should play more of a role in
the outcome. Therefore:
Score = C * sqrt(W ** 2 + C ** 2) + (1 - C)(1 - W) * Cm
W = word count
C = cosine Similarity
Cm = Average cosine similarity across routes
Finally, the scores are processed with a Sigmoid function,
specifically the logistic function.
f(x) = L / 1 + e^(-k(x-x'))
L = upper bound
e = Euler's constant
k = logistic growth rate
x' = Sigmoid midpoint
By manipulating the constants in this function, we can find a
continuous threshold-like set of values that are bounded by 0 and
1. The midpoint of the threshold is the mean value of the scores
plus one standard devaition. Therefore, the function used here is:
f(x) = 1 / (1 + e^(-100(x - x'))
x' = mean + sigma
e = Euler's constant
Args:
*styles(str): Names of the style archetypes
table(Pandas dataframe): Master dataframe of cosine scores for
each route
inplace(Boolean, default = False):
If inplace=False, adds new columns with weighted values.
If inplace=True, replaces the columns.
Returns:
Updated SQL Database'''
# Gets name for the columns to write data
if inplace:
count = 'word_count'
else:
count = 'word_count_norm'
# As the word count increases, the credibility increases as a
# logarithmic function
table[count] = np.log10(table['word_count'])
table_min = table[count].min()
table_max = table[count].max()
table_diff = table_max - table_min
table[count] = (table[count].values - table_min) / table_diff
# Gets weighted scores for each style
for style in styles:
# Stores name to write data on
if inplace:
column_name = style
else:
column_name = style + '_weighted'
# Find average cosine similarity across routes
style_avg = table[style].mean()
# Calculate weighted rating
table[column_name] = (
table[style].values * np.sqrt(
table[style].values ** 2 + table[count].values ** 2)
+ (1 - table[count].values) * (1 - table[style].values)
* style_avg)
threshold = table[column_name].mean() + table[column_name].std()
# Calculates final score using Sigmoid function
table[column_name] = (
1 / (1 + np.e ** (-100 *
(table[column_name]
- threshold))))
return table
# Run functions
print('Getting route information')
routes = get_routes()
print('Getting word count')
word_count = get_word_count()
print('Scoring routes')
routes = score_routes(
*styles,
word_count=word_count,
path=path,
routes=routes)
print('Getting weighted scores')
routes = weighted_scores(*styles, table=routes, inplace=True)
# Collects the full database
query = 'SELECT * FROM Routes'
all_routes = | pd.read_sql(query, conn, index_col='route_id') | pandas.read_sql |
# -*- coding: utf-8 -*-
#
import logging
logger = logging.getLogger(__name__)
import sys, os, time
from datetime import datetime
from timeit import default_timer as timer
try:
from humanfriendly import format_timespan
except ImportError:
def format_timespan(seconds):
return "{:.2f} seconds".format(seconds)
from .config import Config
from .util import load_random_state, prepare_directory, load_spark_dataframe, save_pandas_dataframe_to_pickle, remove_seed_papers_from_test_set, remove_missing_titles, year_lowpass_filter, predict_ranks_from_data, load_pandas_dataframe
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.metrics import classification_report
from .transformers import ItemSelector, DataFrameColumnTransformer, ClusterTransformer, AverageTfidfCosSimTransformer
from .pipeline_autoreview import PipelineExperiment
class Autoreview(object):
"""Toplevel Autoreview object"""
def __init__(self, outdir, id_list=None, citations=None, papers=None, sample_size=None, random_seed=None, id_colname='UID', citing_colname=None, cited_colname='cited_UID', title_colname='title', use_spark=True, config=None, citations_data_preloaded=None, paper_data_preloaded=None):
"""
:outdir: output directory.
:random_seed: integer
The following are needed for collecting the paper sets.
They can either be set on initialization, or using prepare_for_collection()
:id_list: list of strings: IDs for the seed set
:citations: path to citations data
:papers: path to papers data
:sample_size: integer: size of the seed set to split off from the initial. the rest will be used as target papers
:id_colname: default is 'UID'
:citing_colname: default is 'UID'
:cited_colname: default is 'cited_UID'
:use_spark: whether to use spark to get seed and candidate paper sets
:citations_data_preloaded: use this to supply citations data already in memory, instead of having to load it within the Autoreview object. Should be a pandas dataframe. Only do this if not using Spark.
:paper_data_preloaded: use this to supply papers data already in memory, instead of having to load it within the Autoreview object. Should be a pandas dataframe. Only do this if not using Spark.
"""
self.outdir = outdir
self.random_state = load_random_state(random_seed)
self.prepare_for_collection(id_list, citations, papers, sample_size, id_colname, citing_colname, cited_colname, citations_data_preloaded, paper_data_preloaded)
# if these are unspecified, the user will have to overwrite them later by calling prepare_for_collection() manually
self.title_colname = title_colname
if config is not None:
# TODO: be less strict when checking that the config is valid (currently only supports config objects from autoreview module)
assert isinstance(config, Config)
self._config = config
else:
self._config = Config()
self.use_spark = use_spark
if self.use_spark is True:
self.spark = self._config.spark
self.best_model_pipeline_experiment = None
def prepare_for_collection(self, id_list, citations, papers, sample_size, id_colname='UID', citing_colname=None, cited_colname='cited_UID', citations_data_preloaded=None, paper_data_preloaded=None):
"""Provide arguments for paper collection (use this if these arguments were not already provided at initialization)
:id_list: list of strings: IDs for the seed set
:citations: path to citations data
:papers: path to papers data
:sample_size: integer: size of the seed set to split off from the initial. the rest will be used as target papers
:id_colname: default is 'UID'
:citing_colname: default is 'UID'
:cited_colname: default is 'cited_UID'
:citations_data_preloaded: use this to supply citations data already in memory, instead of having to load it within the Autoreview object. Should be a pandas dataframe.
:paper_data_preloaded: use this to supply papers data already in memory, instead of having to load it within the Autoreview object. Should be a pandas dataframe.
"""
self.id_list = id_list
self.citations = citations
self.papers = papers
self.sample_size = sample_size
self.id_colname = id_colname
if citing_colname is not None:
self.citing_colname = citing_colname
else:
self.citing_colname = id_colname
self.cited_colname = cited_colname
self.df_citations = citations_data_preloaded
self.df_papers = paper_data_preloaded
def follow_citations(self, df, df_citations, use_spark=True):
"""follow in- and out-citations
:df: a dataframe with one column `ID` that contains the ids to follow in- and out-citations
:df_citations: dataframe with citation data. columns are `ID` and `cited_ID`
:returns: dataframe with one column `ID` that contains deduplicated IDs for in- and out-citations
"""
if use_spark is True:
return self._follow_citations_spark(df, df_citations)
df_outcitations = df_citations.merge(df, on='ID', how='inner')
df_incitations = df_citations.merge(df.rename(columns={'ID': 'cited_ID'}, errors='raise'), on='cited_ID', how='inner')
combined = np.append(df_outcitations.values.flatten(), df_incitations.values.flatten())
combined = np.unique(combined)
return pd.DataFrame(combined, columns=['ID'])
def _follow_citations_spark(self, sdf, sdf_citations):
sdf_outcitations = sdf_citations.join(sdf, on='ID', how='inner')
_sdf_renamed = sdf.withColumnRenamed('ID', 'cited_ID')
sdf_incitations = sdf_citations.join(_sdf_renamed, on='cited_ID')
sdf_combined = self.combine_ids([sdf_incitations, sdf_outcitations])
return sdf_combined
def combine_ids(self, sdfs):
"""Given a list of spark dataframes with columns ['UID', 'cited_UID']
return a dataframe with one column 'UID' containing all of the UIDs in both columns of the input dataframes
"""
sdf_combined = self.spark.createDataFrame([], schema='ID string')
for sdf in sdfs:
# add 'UID' column
sdf_combined = sdf_combined.union(sdf.select(['ID']))
# add 'cited_UID' column (need to rename)
sdf_combined = sdf_combined.union(sdf.select(['cited_ID']).withColumnRenamed('cited_ID', 'ID'))
return sdf_combined.drop_duplicates()
def get_papers_2_degrees_out(self, use_spark=True):
"""For a list of paper IDs (in `self.id_list`),
get all papers citing or cited by those, then repeat for
all these new papers.
For the three sets of papers---seed, target, and test (candidate) papers---
save a pickled pandas dataframe.
:returns: pandas dataframes for seed, target, and test papers
"""
df_id_list = | pd.DataFrame(self.id_list, columns=['ID'], dtype=str) | pandas.DataFrame |
import pandas as pd
import pickle
import json
import seaborn as sns
import pprint
import numpy as np
import math
def get_builds_from_commits(_commits):
_build_ids = jobs[jobs.commitsha.isin(_commits)].buildid
return builds[(builds.id.isin(_build_ids))]
def get_builds_from_ids(_builds, _build_ids):
return _builds[(_builds.id.isin(_build_ids))]
def get_commits_from_comparison_row(row, commitsDf):
_x = commitsDf
return _x[(_x.from_tag == row.from_tag) & (_x.to_tag == row.to_tag)].commitsha
def detect_build_bursts(_builds, gap_size, burst_size, states):
print(len(_builds))
positive_count = 0
negative_count = 0
n_bursts = 0
burst_sizes = []
i = 0
for index, row in _builds.sort_values(by="started_at").iterrows():
i+=1
if((i == len(_builds)) | (not (row.state in states))):
negative_count+=1
if(negative_count == gap_size):
if(positive_count >= burst_size):
n_bursts+=1
burst_sizes.append(positive_count)
negative_count = 0
positive_count = 0
if(row.state in states):
positive_count+=1
return n_bursts, burst_sizes
def build_burst_metrics(row):
gap_size = 1
burst_size = 2
states = ["errored","failed","canceled"]
_commits = get_commits_from_comparison_row(row, tags_iterative_pr_commits)
_builds = get_builds_from_commits(_commits)
_builds = builds_commitref[builds_commitref.id.isin(_builds.id)]
data=[]
for commitref in _builds.commitref.unique():
_ref_builds = _builds[_builds.commitref==commitref]
data.append(detect_build_bursts(_ref_builds, gap_size, burst_size, states))
bursts = np.array([])
bursts_size = np.array([])
for d in data:
bursts = np.append(bursts,d[0])
for v in d[1]:
bursts_size = np.append(bursts_size,v)
return (bursts.mean(), bursts_size.mean())
if __name__ == "__main__":
csv_folder = "csv"
tags_comparison = pd.read_csv(f"{csv_folder}/tags_comparison_final_updated_no_rc_and_milestones.csv", index_col=0)
tags_comparison.from_commit_date = pd.to_datetime(tags_comparison.from_commit_date)
tags_comparison.to_commit_date = pd.to_datetime(tags_comparison.to_commit_date)
tags_comparison.from_author_date = pd.to_datetime(tags_comparison.from_author_date)
tags_comparison.to_author_date = pd.to_datetime(tags_comparison.to_author_date)
tags_comparison = tags_comparison[2:]
tags_iterative_pr_commits = pd.read_csv(f"{csv_folder}/commits_for_tags/tags_pairs_iterative_commits.csv", index_col=0)
builds = pd.read_csv(f"{csv_folder}/builds_cleaned.csv", index_col=0)
jobs = | pd.read_csv(f"{csv_folder}/allJobs.csv", index_col=0) | pandas.read_csv |
import sys
import time
import numpy as np
import pandas as pd
from scipy.special import softmax
train_path = sys.argv[1]
test_path = sys.argv[2]
def f(pred,Y_train):
v = np.log(np.sum(Y_train*pred,axis=1))
#print(np.sum(v))
return abs(np.sum(v)/Y_train.shape[0])
def read_and_encode(train_path,test_path):
train = pd.read_csv(train_path, index_col = 0)
test = pd.read_csv(test_path, index_col = 0)
Y_df = train['Length of Stay']
train = train.drop(columns = ['Length of Stay'])
#Ensuring consistency of One-Hot Encoding
data = pd.concat([train, test], ignore_index = True)
cols = train.columns
cols = cols[:-1]
data = | pd.get_dummies(data, columns=cols, drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.caltrack.usage_per_day import (
CalTRACKUsagePerDayCandidateModel,
CalTRACKUsagePerDayModelResults,
DataSufficiency,
_caltrack_predict_design_matrix,
fit_caltrack_usage_per_day_model,
caltrack_usage_per_day_predict,
caltrack_sufficiency_criteria,
get_intercept_only_candidate_models,
get_too_few_non_zero_degree_day_warning,
get_total_degree_day_too_low_warning,
get_parameter_negative_warning,
get_parameter_p_value_too_high_warning,
get_cdd_only_candidate_models,
get_hdd_only_candidate_models,
get_cdd_hdd_candidate_models,
select_best_candidate,
)
from eemeter.exceptions import MissingModelParameterError, UnrecognizedModelTypeError
from eemeter.features import (
compute_time_features,
compute_temperature_features,
compute_usage_per_day_feature,
merge_features,
)
from eemeter.metrics import ModelMetrics
from eemeter.warnings import EEMeterWarning
from eemeter.transform import day_counts, get_baseline_data
def test_candidate_model_minimal():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
assert candidate_model.model_type == "model_type"
assert candidate_model.formula == "formula"
assert candidate_model.status == "status"
assert candidate_model.model_params == {}
assert candidate_model.warnings == []
assert str(candidate_model).startswith("CalTRACKUsagePerDayCandidateModel")
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
def test_candidate_model_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
)
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_candidate_model_json_none_and_nan_values():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
r_squared_adj=None,
)
assert candidate_model.json()["r_squared_adj"] is None
candidate_model.r_squared_adj = np.nan
assert candidate_model.json()["r_squared_adj"] is None
def test_data_sufficiency_minimal():
data_sufficiency = DataSufficiency(status="status", criteria_name="criteria_name")
assert data_sufficiency.status == "status"
assert data_sufficiency.criteria_name == "criteria_name"
assert data_sufficiency.warnings == []
assert data_sufficiency.settings == {}
assert str(data_sufficiency).startswith("DataSufficiency")
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"data": {},
"settings": {},
"status": "status",
"warnings": [],
}
def test_data_sufficiency_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
data_sufficiency = DataSufficiency(
status="status", criteria_name="criteria_name", warnings=[eemeter_warning]
)
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"settings": {},
"status": "status",
"data": {},
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_minimal():
model_results = CalTRACKUsagePerDayModelResults(
status="status", method_name="method_name"
)
assert model_results.status == "status"
assert model_results.method_name == "method_name"
assert model_results.model is None
assert model_results.r_squared_adj is None
assert model_results.candidates == []
assert model_results.warnings == []
assert model_results.metadata == {}
assert model_results.settings == {}
assert str(model_results).startswith("CalTRACKUsagePerDayModelResults")
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": None,
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_objects():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
candidates=[candidate_model],
warnings=[eemeter_warning],
)
assert model_results.json(with_candidates=True) == {
"candidates": [
{
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
],
"metadata": {},
"interval": None,
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_json_with_nan_r_squared_adj():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
r_squared_adj=np.nan,
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_model_metrics():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status", r_squared_adj=0.5
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
model_metrics = ModelMetrics(
observed_input=pd.Series([0, 1, 2]), predicted_input=pd.Series([1, 0, 2])
)
json_result = model_results.json()
json.dumps(json_result) # just make sure it's valid json
assert "totals_metrics" in json_result
assert "avgs_metrics" in json_result
json_result["totals_metrics"] = {} # overwrite because of floats
json_result["avgs_metrics"] = {} # overwrite because of floats
assert json_result == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": {},
"avgs_metrics": {},
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": 0.5,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
@pytest.fixture
def utc_index():
return pd.date_range("2011-01-01", freq="H", periods=365 * 24 + 1, tz="UTC")
@pytest.fixture
def temperature_data(utc_index):
series = pd.Series(
[
30.0 * ((i % (365 * 24.0)) / (365 * 24.0)) # 30 * frac of way through year
+ 50.0 # range from 50 to 80
for i in range(len(utc_index))
],
index=utc_index,
)
return series
@pytest.fixture
def prediction_index(temperature_data):
return temperature_data.resample("D").mean().index
@pytest.fixture
def candidate_model_no_model_none():
return CalTRACKUsagePerDayCandidateModel(
model_type=None,
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_no_model_none(
candidate_model_no_model_none, prediction_index, temperature_data
):
with pytest.raises(ValueError):
candidate_model_no_model_none.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_intercept_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_intercept_only(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert prediction["predicted_usage"].sum() == 365
assert sorted(prediction.columns) == ["predicted_usage"]
def test_caltrack_predict_intercept_only_with_disaggregated(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert prediction["base_load"].sum() == 365.0
assert prediction["cooling_load"].sum() == 0.0
assert prediction["heating_load"].sum() == 0.0
assert prediction["predicted_usage"].sum() == 365.0
assert sorted(prediction.columns) == [
"base_load",
"cooling_load",
"heating_load",
"predicted_usage",
]
def test_caltrack_predict_intercept_only_with_design_matrix(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0.0
assert prediction.n_days_kept.sum() == 365
assert prediction.predicted_usage.sum() == 365.0
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_missing_params():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={},
)
def test_caltrack_predict_missing_params(
candidate_model_missing_params, prediction_index, temperature_data
):
with pytest.raises(MissingModelParameterError):
candidate_model_missing_params.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_cdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_cdd": 1, "cooling_balance_point": 65},
)
def test_caltrack_predict_cdd_only(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data
)
prediction_df = model_prediction.result
assert round(prediction_df.predicted_usage.sum()) == 1733
def test_caltrack_predict_cdd_only_with_disaggregated(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 0.0
assert round(prediction.cooling_load.sum()) == 1368.0
def test_caltrack_predict_cdd_only_with_design_matrix(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_65.sum()) == 1368.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_hdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="hdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_hdd": 1, "heating_balance_point": 65},
)
def test_caltrack_predict_hdd_only(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
def test_caltrack_predict_hdd_only_with_disaggregated(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 1369.0
assert round(prediction.cooling_load.sum()) == 0.0
def test_caltrack_predict_hdd_only_with_design_matrix(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"hdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.hdd_65.sum()) == 1369.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_cdd_hdd():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_hdd",
formula="formula",
status="QUALIFIED",
model_params={
"intercept": 1,
"beta_hdd": 1,
"heating_balance_point": 60,
"beta_cdd": 1,
"cooling_balance_point": 70,
},
)
def test_caltrack_predict_cdd_hdd(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
def test_caltrack_predict_cdd_hdd_disaggregated(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 609.0
assert round(prediction.cooling_load.sum()) == 608.0
def test_caltrack_predict_cdd_hdd_with_design_matrix(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_70.sum()) == 608.0
assert round(prediction.hdd_60.sum()) == 609.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.temperature_mean.mean()) == 65.0
def test_caltrack_predict_cdd_hdd_with_design_matrix_missing_temp_data(
candidate_model_cdd_hdd, il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
prediction_index = meter_data.index[2:4]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temp_data = temperature_data["2015-11":"2016-03"]
temp_data_greater_90perc_missing = temp_data[
~(
(pd.Timestamp("2016-01-27T12:00:00", tz="utc") < temp_data.index)
& (temp_data.index < pd.Timestamp("2016-01-31T12:00:00", tz="utc"))
)
].reindex(temp_data.index)
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temp_data_greater_90perc_missing, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.shape == (0, 7)
@pytest.fixture
def candidate_model_bad_model_type():
return CalTRACKUsagePerDayCandidateModel(
model_type="unknown", formula="formula", status="QUALIFIED", model_params={}
)
def test_caltrack_predict_bad_model_type(
candidate_model_bad_model_type, temperature_data, prediction_index
):
with pytest.raises(UnrecognizedModelTypeError):
candidate_model_bad_model_type.predict(prediction_index, temperature_data)
def test_caltrack_predict_empty(
candidate_model_bad_model_type, temperature_data, prediction_index
):
model_prediction_obj = candidate_model_bad_model_type.predict(
prediction_index[:0], temperature_data[:0]
)
assert model_prediction_obj.result.empty is True
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_totals(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=False,
)
data = merge_features([meter_data, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_avgs(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=True,
)
meter_data_feature = compute_usage_per_day_feature(meter_data)
data = merge_features([meter_data_feature, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_n_days(cdd_hdd_h54_c67_billing_monthly_totals):
# This makes sure that the method works with n_days when
# DatetimeIndexes are not available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
data["n_days"] = 1
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert prediction.mean() is not None
def test_caltrack_predict_design_matrix_no_days_fails(
cdd_hdd_h54_c67_billing_monthly_totals
):
# This makes sure that the method fails if neither n_days nor
# a DatetimeIndex is available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
with pytest.raises(ValueError):
_caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
def test_get_too_few_non_zero_degree_day_warning_ok():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([1, 1, 1]),
minimum_non_zero=2,
)
assert warnings == []
def test_get_too_few_non_zero_degree_day_warning_fail():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([0, 0, 3]),
minimum_non_zero=2,
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.model_type.too_few_non_zero_xdd"
)
assert warning.description == (
"Number of non-zero daily XDD values below accepted minimum."
" Candidate fit not attempted."
)
assert warning.data == {
"minimum_non_zero_xdd": 2,
"n_non_zero_xdd": 1,
"xdd_balance_point": 65,
}
def test_get_total_degree_day_too_low_warning_ok():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days=pd.Series([1, 1, 1]),
period_days=pd.Series([3, 1, 2]),
minimum_total=4,
)
assert warnings == []
def test_get_total_degree_day_too_low_warning_fail():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days=pd.Series([0.5, 0.5, 0.5]),
period_days=pd.Series([3, 1, 2]),
minimum_total=4,
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.model_type.total_xdd_too_low"
)
assert warning.description == (
"Total XDD below accepted minimum. Candidate fit not attempted."
)
assert warning.data == {
"total_xdd": 3.0,
"total_xdd_minimum": 4,
"xdd_balance_point": 65,
}
def test_get_parameter_negative_warning_ok():
warnings = get_parameter_negative_warning(
"intercept_only", {"intercept": 0}, "intercept"
)
assert warnings == []
def test_get_parameter_negative_warning_fail():
warnings = get_parameter_negative_warning(
"intercept_only", {"intercept": -1}, "intercept"
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_negative"
)
assert warning.description == (
"Model fit intercept parameter is negative. Candidate model rejected."
)
assert warning.data == {"intercept": -1}
def test_get_parameter_p_value_too_high_warning_ok():
warnings = get_parameter_p_value_too_high_warning(
"intercept_only", {"intercept": 0}, "intercept", 0.1, 0.1
)
assert warnings == []
def test_get_parameter_p_value_too_high_warning_fail():
warnings = get_parameter_p_value_too_high_warning(
"intercept_only", {"intercept": 0}, "intercept", 0.2, 0.1
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_p_value_too_high"
)
assert warning.description == (
"Model fit intercept p-value is too high. Candidate model rejected."
)
assert warning.data == {
"intercept": 0,
"intercept_maximum_p_value": 0.1,
"intercept_p_value": 0.2,
}
def test_get_intercept_only_candidate_models_fail():
# should be covered by ETL, but this ensures no negative values.
data = pd.DataFrame({"meter_value": np.arange(10) * -1})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == -4.5
assert model.r_squared_adj == 0
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_negative"
)
def test_get_intercept_only_candidate_models_qualified(
prediction_index, temperature_data
):
data = pd.DataFrame({"meter_value": np.arange(10)})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == 4.5
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1642.5
assert model.r_squared_adj == 0
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_intercept_only_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame({"meter_value": np.arange(10), "weights": np.arange(10)})
candidate_models = get_intercept_only_candidate_models(data, "weights")
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == 6.33
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 2311.67
assert model.r_squared_adj == 0
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_intercept_only_candidate_models_error():
data = pd.DataFrame({"meter_value": []})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.model_results"
)
assert warning.description == (
"Error encountered in statsmodels.formula.api.ols method." " (Empty data?)"
)
assert list(sorted(warning.data.keys())) == ["traceback"]
assert warning.data["traceback"] is not None
def test_get_cdd_only_candidate_models_qualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.01
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.97
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1730.04
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame(
{
"meter_value": [1, 1, 1, 6],
"cdd_65": [0, 0.1, 0, 5],
"weights": [1, 100, 1, 1],
}
)
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.1, "weights")
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.02
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.9
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1723.19
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_not_attempted():
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 10, 10, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "NOT ATTEMPTED"
assert model.model is None
assert model.result is None
assert model.model_params == {}
assert model.r_squared_adj is None
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_disqualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, -4], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.0, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == -1.01
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 1.03
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == -1000.04
assert round(model.r_squared_adj, 2) == 1.00
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_error():
data = pd.DataFrame({"meter_value": [], "cdd_65": []})
candidate_models = get_cdd_only_candidate_models(data, 0, 0, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == ("eemeter.caltrack_daily.cdd_only.model_results")
assert warning.description == (
"Error encountered in statsmodels.formula.api.ols method." " (Empty data?)"
)
assert list(sorted(warning.data.keys())) == ["traceback"]
assert warning.data["traceback"] is not None
def test_get_hdd_only_candidate_models_qualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "hdd_65": [0, 0.1, 0, 5]})
candidate_models = get_hdd_only_candidate_models(data, 1, 1, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "hdd_only"
assert model.formula == "meter_value ~ hdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_hdd",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_hdd"], 2) == 1.01
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.97
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1730.67
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_hdd_only_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame(
{
"meter_value": [1, 1, 1, 6],
"hdd_65": [0, 0.1, 0, 5],
"weights": [1, 100, 1, 1],
}
)
candidate_models = get_hdd_only_candidate_models(data, 1, 1, 0.1, "weights")
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "hdd_only"
assert model.formula == "meter_value ~ hdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_hdd",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_hdd"], 2) == 1.02
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.9
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1723.83
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_hdd_only_candidate_models_not_attempted():
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "hdd_65": [0, 0.1, 0, 5]})
candidate_models = get_hdd_only_candidate_models(data, 10, 10, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "hdd_only"
assert model.formula == "meter_value ~ hdd_65"
assert model.status == "NOT ATTEMPTED"
assert model.model is None
assert model.result is None
assert model.model_params == {}
assert model.r_squared_adj is None
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_hdd_only_candidate_models_disqualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, -4], "hdd_65": [0, 0.1, 0, 5]})
candidate_models = get_hdd_only_candidate_models(data, 1, 1, 0.0, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "hdd_only"
assert model.formula == "meter_value ~ hdd_65"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_hdd",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_hdd"], 2) == -1.01
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 1.03
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == -1000.67
assert round(model.r_squared_adj, 2) == 1.00
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_hdd_only_candidate_models_error():
data = pd.DataFrame({"meter_value": [], "hdd_65": []})
candidate_models = get_hdd_only_candidate_models(data, 0, 0, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == ("eemeter.caltrack_daily.hdd_only.model_results")
assert warning.description == (
"Error encountered in statsmodels.formula.api.ols method." " (Empty data?)"
)
assert list(sorted(warning.data.keys())) == ["traceback"]
assert warning.data["traceback"] is not None
def test_get_cdd_hdd_candidate_models_qualified(prediction_index, temperature_data):
data = pd.DataFrame(
{
"meter_value": [6, 1, 1, 6],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0.1, 5],
}
)
candidate_models = get_cdd_hdd_candidate_models(data, 1, 1, 1, 1, 0.1, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_hdd"
assert model.formula == "meter_value ~ cdd_65 + hdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"beta_hdd",
"cooling_balance_point",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.03
assert round(model.model_params["beta_hdd"], 2) == 1.03
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.85
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 3130.31
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_hdd_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame(
{
"meter_value": [6, 1, 1, 6],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0.1, 5],
"weights": [1, 1, 100, 1],
}
)
candidate_models = get_cdd_hdd_candidate_models(
data, 1, 1, 1, 1, 0.1, 0.1, "weights"
)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_hdd"
assert model.formula == "meter_value ~ cdd_65 + hdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"beta_hdd",
"cooling_balance_point",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.04
assert round(model.model_params["beta_hdd"], 2) == 1.04
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.79
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 3139.71
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_hdd_candidate_models_not_attempted():
data = pd.DataFrame(
{
"meter_value": [6, 1, 1, 6],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0, 5],
}
)
candidate_models = get_cdd_hdd_candidate_models(
data, 10, 10, 10, 10, 0.1, 0.1, None
)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_hdd"
assert model.formula == "meter_value ~ cdd_65 + hdd_65"
assert model.status == "NOT ATTEMPTED"
assert model.model is None
assert model.result is None
assert model.model_params == {}
assert model.r_squared_adj is None
assert len(model.warnings) == 4
assert json.dumps(model.json()) is not None
def test_get_cdd_hdd_candidate_models_disqualified(prediction_index, temperature_data):
data = pd.DataFrame(
{
"meter_value": [-4, 1, 1, -4],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0, 5],
}
)
candidate_models = get_cdd_hdd_candidate_models(data, 1, 1, 1, 1, 0.0, 0.0, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_hdd"
assert model.formula == "meter_value ~ cdd_65 + hdd_65"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"beta_hdd",
"cooling_balance_point",
"heating_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == -1.02
assert round(model.model_params["beta_hdd"], 2) == -1.02
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["heating_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 1.1
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == -2391.1
assert round(model.r_squared_adj, 2) == 1.00
assert len(model.warnings) == 4
assert json.dumps(model.json()) is not None
def test_get_cdd_hdd_candidate_models_error():
data = pd.DataFrame({"meter_value": [], "hdd_65": [], "cdd_65": []})
candidate_models = get_cdd_hdd_candidate_models(data, 0, 0, 0, 0, 0.1, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == ("eemeter.caltrack_daily.cdd_hdd.model_results")
assert warning.description == (
"Error encountered in statsmodels.formula.api.ols method." " (Empty data?)"
)
assert list(sorted(warning.data.keys())) == ["traceback"]
assert warning.data["traceback"] is not None
@pytest.fixture
def candidate_model_qualified_high_r2():
return CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula1", status="QUALIFIED", r_squared_adj=1
)
@pytest.fixture
def candidate_model_qualified_low_r2():
return CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula2", status="QUALIFIED", r_squared_adj=0
)
@pytest.fixture
def candidate_model_disqualified():
return CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula3",
status="DISQUALIFIED",
r_squared_adj=0.5,
)
def test_select_best_candidate_ok(
candidate_model_qualified_high_r2,
candidate_model_qualified_low_r2,
candidate_model_disqualified,
):
candidates = [
candidate_model_qualified_high_r2,
candidate_model_qualified_low_r2,
candidate_model_disqualified,
]
best_candidate, warnings = select_best_candidate(candidates)
assert warnings == []
assert best_candidate.status == "QUALIFIED"
assert best_candidate.formula == "formula1"
assert best_candidate.r_squared_adj == 1
def test_select_best_candidate_none(candidate_model_disqualified,):
candidates = [candidate_model_disqualified]
best_candidate, warnings = select_best_candidate(candidates)
assert best_candidate is None
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.select_best_candidate.no_candidates"
)
assert warning.description == ("No qualified model candidates available.")
assert warning.data == {"status_count:DISQUALIFIED": 1}
def test_fit_caltrack_usage_per_day_model_empty():
data = pd.DataFrame({"meter_value": [], "hdd_65": [], "cdd_65": []})
model_results = fit_caltrack_usage_per_day_model(data)
assert model_results.method_name == "caltrack_usage_per_day"
assert model_results.status == "NO DATA"
assert len(model_results.warnings) == 1
warning = model_results.warnings[0]
assert warning.qualified_name == ("eemeter.caltrack_usage_per_day.no_data")
assert warning.description == ("No data available. Cannot fit model.")
assert warning.data == {}
@pytest.fixture
def cdd_hdd_h60_c65(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_daily["blackout_start_date"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60],
cooling_balance_points=[65],
use_mean_daily_values=True,
)
meter_data_feature = compute_usage_per_day_feature(meter_data, "meter_value")
data = merge_features([meter_data_feature, temperature_features])
baseline_data, warnings = get_baseline_data(data, end=blackout_start_date)
return baseline_data
def test_fit_caltrack_usage_per_day_model_cdd_hdd(
cdd_hdd_h60_c65, prediction_index, temperature_data
):
model_results = fit_caltrack_usage_per_day_model(cdd_hdd_h60_c65)
assert len(model_results.candidates) == 4
assert model_results.candidates[0].model_type == "intercept_only"
assert model_results.candidates[1].model_type == "hdd_only"
assert model_results.candidates[2].model_type == "cdd_only"
assert model_results.candidates[3].model_type == "cdd_hdd"
assert model_results.model.status == "QUALIFIED"
assert model_results.model.model_type == "cdd_hdd"
model_prediction = model_results.model.predict(prediction_index, temperature_data)
prediction_df = model_prediction.result
assert round(prediction_df.predicted_usage.sum(), 2) == 7059.48
@pytest.fixture
def cdd_hdd_c65(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_daily["blackout_start_date"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[],
cooling_balance_points=[65],
use_mean_daily_values=True,
)
meter_data_feature = compute_usage_per_day_feature(meter_data, "meter_value")
data = merge_features([meter_data_feature, temperature_features])
baseline_data, warnings = get_baseline_data(data, end=blackout_start_date)
return baseline_data
def test_fit_caltrack_usage_per_day_model_cdd_only(
cdd_hdd_c65, prediction_index, temperature_data
):
model_results = fit_caltrack_usage_per_day_model(cdd_hdd_c65)
assert len(model_results.candidates) == 2
assert model_results.candidates[0].model_type == "intercept_only"
assert model_results.candidates[1].model_type == "cdd_only"
assert model_results.model.status == "QUALIFIED"
assert model_results.model.model_type == "cdd_only"
model_prediction = model_results.model.predict(prediction_index, temperature_data)
prediction_df = model_prediction.result
assert round(prediction_df.predicted_usage.sum(), 2) == 10192.0
def test_fit_caltrack_usage_per_day_model_cdd_hdd_use_billing_presets(
cdd_hdd_h60_c65, prediction_index, temperature_data
):
model_results = fit_caltrack_usage_per_day_model(
cdd_hdd_h60_c65, use_billing_presets=True, weights_col="n_days_kept"
)
assert len(model_results.candidates) == 4
assert model_results.candidates[0].model_type == "intercept_only"
assert model_results.candidates[1].model_type == "hdd_only"
assert model_results.candidates[2].model_type == "cdd_only"
assert model_results.candidates[3].model_type == "cdd_hdd"
assert model_results.model.status == "QUALIFIED"
assert model_results.model.model_type == "cdd_hdd"
model_prediction = model_results.model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 7059.48
def test_fit_caltrack_usage_per_day_model_cdd_hdd_use_billing_presets_no_weights(
cdd_hdd_h60_c65, prediction_index, temperature_data
):
with pytest.raises(ValueError) as exc_info:
fit_caltrack_usage_per_day_model(cdd_hdd_h60_c65, use_billing_presets=True)
assert "weights_col" in str(exc_info.value)
# When model is intercept-only, num_parameters should = 0 with cvrmse = cvrmse_adj
def test_fit_caltrack_usage_per_day_model_num_parameters_equals_zero():
data = pd.DataFrame(
{
"meter_value": [6, 1, 1, 6],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0.1, 5],
"start": pd.date_range(start="2016-01-02", periods=4, freq="D", tz="UTC"),
}
).set_index("start")
model_results = fit_caltrack_usage_per_day_model(data, fit_intercept_only=True)
assert (
model_results.totals_metrics.num_parameters
== model_results.totals_metrics.num_parameters
)
assert (
model_results.avgs_metrics.num_parameters
== model_results.avgs_metrics.num_parameters
)
assert (
model_results.totals_metrics.cvrmse == model_results.totals_metrics.cvrmse_adj
)
assert model_results.avgs_metrics.cvrmse == model_results.avgs_metrics.cvrmse_adj
def test_fit_caltrack_usage_per_day_model_no_model():
data = pd.DataFrame(
{
"meter_value": [4, 1, 1, 4],
"cdd_65": [5, 0, 0.1, 0],
"hdd_65": [0, 0.1, 0, 5],
}
)
model_results = fit_caltrack_usage_per_day_model(
data,
fit_hdd_only=False,
fit_cdd_hdd=False,
fit_cdd_only=False,
fit_intercept_only=False,
)
assert model_results.method_name == "caltrack_usage_per_day"
assert model_results.status == "NO MODEL"
assert len(model_results.warnings) == 1
warning = model_results.warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.select_best_candidate.no_candidates"
)
assert warning.description == ("No qualified model candidates available.")
assert warning.data == {}
@pytest.fixture
def baseline_meter_data_billing():
index = pd.date_range("2011-01-01", freq="30D", periods=12, tz="UTC")
df = pd.DataFrame({"value": 1}, index=index)
df.iloc[-1] = np.nan
return df
@pytest.fixture
def baseline_temperature_data():
index = pd.date_range("2011-01-01", freq="H", periods=1095 * 24, tz="UTC")
series = pd.Series(np.random.normal(60, 5, len(index)), index=index)
return series
# CalTrack 2.2.3.2
def test_caltrack_merge_temperatures_insufficient_temperature_per_period(
baseline_meter_data_billing, baseline_temperature_data
):
baseline_temperature_data_missing = baseline_temperature_data.copy(deep=True)
baseline_temperature_data_missing.iloc[: (8 * 24)] = np.nan
# test without percent_hourly_coverage_per_billing_period constraint
temperature_features_no_constraint = compute_temperature_features(
baseline_meter_data_billing.index,
baseline_temperature_data_missing,
heating_balance_points=range(40, 81),
cooling_balance_points=range(50, 91),
data_quality=True,
keep_partial_nan_rows=False,
percent_hourly_coverage_per_billing_period=0,
)
assert temperature_features_no_constraint["n_days_kept"].isnull().sum() == 1
# test with default percent_hourly_coverage_per_billing_period=0.9 constraint
temperature_features_with_constraint = compute_temperature_features(
baseline_meter_data_billing.index,
baseline_temperature_data_missing,
heating_balance_points=range(40, 81),
cooling_balance_points=range(50, 91),
data_quality=True,
keep_partial_nan_rows=False,
)
assert temperature_features_with_constraint["n_days_kept"].isnull().sum() == 2
def test_caltrack_sufficiency_criteria_no_data():
data_quality = pd.DataFrame(
{"meter_value": [], "temperature_not_null": [], "temperature_null": []}
)
data_sufficiency = caltrack_sufficiency_criteria(data_quality, None, None)
assert data_sufficiency.status == "NO DATA"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert len(data_sufficiency.warnings) == 1
warning = data_sufficiency.warnings[0]
assert warning.qualified_name == ("eemeter.caltrack_sufficiency_criteria.no_data")
assert warning.description == "No data available."
assert warning.data == {}
def test_caltrack_sufficiency_criteria_pass():
data_quality = pd.DataFrame(
{
"meter_value": [1, np.nan],
"temperature_not_null": [1, 1],
"temperature_null": [0, 0],
"start": pd.date_range(start="2016-01-02", periods=2, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-02").tz_localize("UTC").to_pydatetime()
requested_end = pd.Timestamp("2016-01-03").tz_localize("UTC")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
requested_start,
requested_end,
num_days=1,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "PASS"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert data_sufficiency.warnings == []
assert data_sufficiency.settings == {
"num_days": 1,
"min_fraction_daily_coverage": 0.9,
"min_fraction_hourly_temperature_coverage_per_period": 0.9,
}
def test_caltrack_sufficiency_criteria_pass_extreme_value_warning():
data_quality = pd.DataFrame(
{
"meter_value": [1, 1, 99999, 1, np.nan],
"temperature_not_null": np.ones(5),
"temperature_null": np.zeros(5),
"start": pd.date_range(start="2016-01-02", periods=5, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-02").tz_localize("UTC").to_pydatetime()
requested_end = pd.Timestamp("2016-01-06").tz_localize("UTC")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
requested_start,
requested_end,
num_days=4,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "PASS"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert len(data_sufficiency.warnings) == 1
warning0 = data_sufficiency.warnings[0]
assert warning0.qualified_name == (
"eemeter.caltrack_sufficiency_criteria.extreme_values_detected"
)
assert warning0.data["n_extreme_values"] == 1
assert data_sufficiency.settings == {
"num_days": 4,
"min_fraction_daily_coverage": 0.9,
"min_fraction_hourly_temperature_coverage_per_period": 0.9,
}
def test_caltrack_sufficiency_criteria_fail_no_data():
data_quality = pd.DataFrame(
{
"meter_value": [np.nan, np.nan],
"temperature_not_null": [1, 5],
"temperature_null": [0, 5],
"start": pd.date_range(start="2016-01-02", periods=2, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-02").tz_localize("UTC")
requested_end = pd.Timestamp("2016-01-04").tz_localize("UTC")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
requested_start,
requested_end,
num_days=3,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "NO DATA"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
def test_caltrack_sufficiency_criteria_fail_no_gap():
data_quality = pd.DataFrame(
{
"meter_value": [1, 1],
"temperature_not_null": [1, 5],
"temperature_null": [0, 5],
"start": pd.date_range(start="2016-01-02", periods=2, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-02").tz_localize("UTC")
requested_end = pd.Timestamp("2016-01-04").tz_localize("UTC")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
requested_start,
requested_end,
num_days=3,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "FAIL"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert len(data_sufficiency.warnings) == 4
warning0 = data_sufficiency.warnings[0]
assert warning0.qualified_name == (
"eemeter.caltrack_sufficiency_criteria.incorrect_number_of_total_days"
)
assert warning0.description == (
"Total data span does not match the required value."
)
assert warning0.data == {"num_days": 3, "n_days_total": 2}
warning1 = data_sufficiency.warnings[1]
assert warning1.qualified_name == (
"eemeter.caltrack_sufficiency_criteria." "too_many_days_with_missing_data"
)
assert warning1.description == (
"Too many days in data have missing meter data or temperature data."
)
assert warning1.data == {"n_days_total": 2, "n_valid_days": 1}
warning2 = data_sufficiency.warnings[2]
assert warning2.qualified_name == (
"eemeter.caltrack_sufficiency_criteria." "too_many_days_with_missing_meter_data"
)
assert warning2.description == ("Too many days in data have missing meter data.")
# zero because nan value and last point dropped
assert warning2.data == {"n_days_total": 2, "n_valid_meter_data_days": 1}
warning3 = data_sufficiency.warnings[3]
assert warning3.qualified_name == (
"eemeter.caltrack_sufficiency_criteria."
"too_many_days_with_missing_temperature_data"
)
assert warning3.description == (
"Too many days in data have missing temperature data."
)
assert warning3.data == {"n_days_total": 2, "n_valid_temperature_data_days": 1}
def test_caltrack_sufficiency_criteria_pass_no_requested_start_end():
data_quality = pd.DataFrame(
{
"meter_value": [1, np.nan],
"temperature_not_null": [1, 1],
"temperature_null": [0, 0],
"start": pd.date_range(start="2016-01-02", periods=2, freq="D", tz="UTC"),
}
).set_index("start")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
None,
None,
num_days=1,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "PASS"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert len(data_sufficiency.warnings) == 0
def test_caltrack_sufficiency_criteria_fail_with_requested_start_end():
data_quality = pd.DataFrame(
{
"meter_value": [1, np.nan],
"temperature_not_null": [1, 1],
"temperature_null": [0, 0],
"start": pd.date_range(start="2016-01-02", periods=2, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-01").tz_localize("UTC")
requested_end = pd.Timestamp("2016-01-04").tz_localize("UTC")
data_sufficiency = caltrack_sufficiency_criteria(
data_quality,
requested_start,
requested_end,
num_days=3,
min_fraction_daily_coverage=0.9,
min_fraction_hourly_temperature_coverage_per_period=0.9,
)
assert data_sufficiency.status == "FAIL"
assert data_sufficiency.criteria_name == ("caltrack_sufficiency_criteria")
assert len(data_sufficiency.warnings) == 3
warning0 = data_sufficiency.warnings[0]
assert warning0.qualified_name == (
"eemeter.caltrack_sufficiency_criteria." "too_many_days_with_missing_data"
)
assert warning0.description == (
"Too many days in data have missing meter data or temperature data."
)
assert warning0.data == {"n_days_total": 3, "n_valid_days": 1}
warning1 = data_sufficiency.warnings[1]
assert warning1.qualified_name == (
"eemeter.caltrack_sufficiency_criteria." "too_many_days_with_missing_meter_data"
)
assert warning1.description == ("Too many days in data have missing meter data.")
assert warning1.data == {"n_days_total": 3, "n_valid_meter_data_days": 1}
warning2 = data_sufficiency.warnings[2]
assert warning2.qualified_name == (
"eemeter.caltrack_sufficiency_criteria."
"too_many_days_with_missing_temperature_data"
)
assert warning2.description == (
"Too many days in data have missing temperature data."
)
assert warning2.data == {"n_days_total": 3, "n_valid_temperature_data_days": 1}
# CalTrack 2.2.4
def test_caltrack_sufficiency_criteria_too_much_data():
data_quality = pd.DataFrame(
{
"meter_value": [1, 1, np.nan],
"temperature_not_null": [1, 1, 1],
"temperature_null": [0, 0, 0],
"start": pd.date_range(start="2016-01-02", periods=3, freq="D", tz="UTC"),
}
).set_index("start")
requested_start = pd.Timestamp("2016-01-03").tz_localize("UTC")
requested_end = | pd.Timestamp("2016-01-03") | pandas.Timestamp |
import streamlit as st
import altair as alt
from os import listdir
from os.path import isfile, join
from pydantic import BaseModel
import boto3
import json
import time
import pandas as pd
import numpy as np
import yfinance as yf
import datetime as dt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import random
import string
from datetime import datetime
from datetime import date
import requests as requests
import pandas as pd
import matplotlib.pyplot as plt
import streamlit as st
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from pytrends.request import TrendReq
import tweepy
import json
from tweepy import OAuthHandler
import re
import textblob
from textblob import TextBlob
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import openpyxl
import time
import tqdm
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
#To Hide Warnings
from urllib.request import urlopen, Request
import bs4
from bs4 import BeautifulSoup
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import plotly.express as px
from gensim.summarization import summarize
st.set_option('deprecation.showfileUploaderEncoding', False)
st.set_option('deprecation.showPyplotGlobalUse', False)
# data_dir = '/root/Assignment4/Assignment-Trial/Assignment-Trial/inference-data/'
# data_dir2 = '/root/Assignment4/Assignment-Trial/Assignment-Trial/fastAPIandStreamlit/awsdownload/'
data_dir3 = './awsdownloadstar/'
#companies = [f for f in listdir(data_dir) if isfile(join(data_dir, f))]
@st.cache
def load_data():
#df = data.cars()
return 0
def get_data(keyword):
keyword = [keyword]
pytrend = TrendReq()
pytrend.build_payload(kw_list=keyword)
df = pytrend.interest_over_time()
df.drop(columns=['isPartial'], inplace=True)
df.reset_index(inplace=True)
df.columns = ["ds", "y"]
return df
# make forecasts for a new period
def make_pred(df, periods):
prophet_basic = Prophet()
prophet_basic.fit(df)
future = prophet_basic.make_future_dataframe(periods=periods)
forecast = prophet_basic.predict(future)
fig1 = prophet_basic.plot(forecast, xlabel="date", ylabel="trend", figsize=(10, 6))
fig2 = prophet_basic.plot_components(forecast)
forecast = forecast[["ds", "yhat"]]
return forecast, fig1, fig2
def main():
df = load_data()
page = st.sidebar.selectbox('Choose a page',('Homepage', 'SignUp','Logout'))
#page = st.sidebar.radio("Choose a page", ["Homepage", "SignUp"])
if page == "Homepage":
ACCESS_KEY_ID = 'xx'
ACCESS_SECRET_KEY = 'xx'
st.title('** Welcome to Team 3 CSYE !!!**')
st.header('User Authentication')
st.subheader('Please enter valid username password and Acess Token')
usrName = st.text_input('Username')
usrPassword = st.text_input('Password')
acesstoken = st.text_input('Enter your Token')
OTP = usrName + usrPassword
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key=ACCESS_SECRET_KEY,
region_name='us-east-1')
table = dynamodb.Table('users')
response = table.scan()
OTPD = response['Items']
userlist = []
toklist = []
i = 0
while i < len(OTPD):
#print(OTP[i])
x = OTPD[i]['login']
y = OTPD[i]['acesstoken']
#print(x)
userlist.append(x)
toklist.append(y)
i=i+1
if OTP in userlist and acesstoken in toklist :
verified = "True"
result = "Congratulations User Verified!!"
page = st.sidebar.radio("Choose a page", ["Star Masked Data","Live News","Company Profile","Technical","Google Trends","Twitter Trends","Stock Future Prediction", "Meeting Summarization"])
st.title(result)
if page == "Star Masked Data":
st.title("Star Data Using AWS Comprehend")
user_input = st.text_input("Enter the name of the Company")
user_input = user_input+".out"
time.sleep(1.4)
try:
with open(data_dir3 + user_input) as f:
st.text(f.read())
except:
st.text("Company Does not Exist")
elif page == "Google Trends":
st.sidebar.write("""
## Choose a keyword and a prediction period
""")
keyword = st.sidebar.text_input("Keyword", "Amazon")
periods = st.sidebar.slider('Prediction time in days:', 7, 365, 90)
details = st.sidebar.checkbox("Show details")
# main section
st.write("""
# Welcome to Trend Predictor App
### This app predicts the **Google Trend** you want!
""")
st.image('https://s3.eu-west-2.amazonaws.com/cdn.howtomakemoneyfromhomeuk.com/wp-content/uploads/2020/10/Google-Trends.jpg',width=350, use_column_width=200)
st.write("Evolution of interest:", keyword)
df = get_data(keyword)
forecast, fig1, fig2 = make_pred(df, periods)
st.pyplot(fig1)
if details:
st.write("### Details :mag_right:")
st.pyplot(fig2)
elif page == "Meeting Summarization":
symbols = ['./Audio Files/Meeting 1.mp3','./Audio Files/Meeting 2.mp3', './Audio Files/Meeting 3.mp3', './Audio Files/Meeting 4.mp3']
track = st.selectbox('Choose a the Meeting Audio',symbols)
st.audio(track)
data_dir = './inference-data/'
ratiodata = st.text_input("Please Enter a Ratio you want summary by: ")
if st.button("Generate a Summarized Version of the Meeting"):
time.sleep(2.4)
#st.success("This is the Summarized text of the Meeting Audio Files xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxgeeeeeeeeeeeeeee eeeeeeeeeeeeeehjjjjjjjjjjjjjjjsdbjhvsdk vjbsdkvjbsdvkb skbdv")
if track == "./Audio Files/Meeting 2.mp3":
user_input = "NKE"
time.sleep(1.4)
try:
with open(data_dir + user_input) as f:
st.success(summarize(f.read(), ratio=float(ratiodata)))
#print()
st.warning("Sentiment: Negative")
except:
st.text("Company Does not Exist")
else:
user_input = "AGEN"
time.sleep(1.4)
try:
with open(data_dir + user_input) as f:
st.success(summarize(f.read(), ratio=float(ratiodata)))
#print()
st.success("Sentiment: Positive")
except:
st.text("Company Does not Exist")
elif page == "Twitter Trends":
st.write("""
# Welcome to Twitter Sentiment App
### This app predicts the **Twitter Sentiments** you want!
""")
st.image('https://assets.teenvogue.com/photos/56b4f21327a088e24b967bb6/3:2/w_531,h_354,c_limit/twitter-gifs.gif',width=250, use_column_width=200)
#st.subheader("Select a topic which you'd like to get the sentiment analysis on :")
################# Twitter API Connection #######################
consumer_key = "xx"
consumer_secret = "xx"
access_token = "xx"
access_token_secret = "xx"
# Use the above credentials to authenticate the API.
auth = tweepy.OAuthHandler( consumer_key , consumer_secret )
auth.set_access_token( access_token , access_token_secret )
api = tweepy.API(auth)
################################################################
df = pd.DataFrame(columns=["Date","User","IsVerified","Tweet","Likes","RT",'User_location'])
# Write a Function to extract tweets:
def get_tweets(Topic,Count):
i=0
#my_bar = st.progress(100) # To track progress of Extracted tweets
for tweet in tweepy.Cursor(api.search, q=Topic,count=100, lang="en",exclude='retweets').items():
#time.sleep(0.1)
#my_bar.progress(i)
df.loc[i,"Date"] = tweet.created_at
df.loc[i,"User"] = tweet.user.name
df.loc[i,"IsVerified"] = tweet.user.verified
df.loc[i,"Tweet"] = tweet.text
df.loc[i,"Likes"] = tweet.favorite_count
df.loc[i,"RT"] = tweet.retweet_count
df.loc[i,"User_location"] = tweet.user.location
#df.to_csv("TweetDataset.csv",index=False)
#df.to_excel('{}.xlsx'.format("TweetDataset"),index=False) ## Save as Excel
i=i+1
if i>Count:
break
else:
pass
# Function to Clean the Tweet.
def clean_tweet(tweet):
return ' '.join(re.sub('(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)|([RT])', ' ', tweet.lower()).split())
# Funciton to analyze Sentiment
def analyze_sentiment(tweet):
analysis = TextBlob(tweet)
if analysis.sentiment.polarity > 0:
return 'Positive'
elif analysis.sentiment.polarity == 0:
return 'Neutral'
else:
return 'Negative'
#Function to Pre-process data for Worlcloud
def prepCloud(Topic_text,Topic):
Topic = str(Topic).lower()
Topic=' '.join(re.sub('([^0-9A-Za-z \t])', ' ', Topic).split())
Topic = re.split("\s+",str(Topic))
stopwords = set(STOPWORDS)
stopwords.update(Topic) ### Add our topic in Stopwords, so it doesnt appear in wordClous
###
text_new = " ".join([txt for txt in Topic_text.split() if txt not in stopwords])
return text_new
# Collect Input from user :
Topic = str()
Topic = str(st.sidebar.text_input("Enter the topic you are interested in (Press Enter once done)"))
if len(Topic) > 0 :
# Call the function to extract the data. pass the topic and filename you want the data to be stored in.
with st.spinner("Please wait, Tweets are being extracted"):
get_tweets(Topic , Count=200)
st.success('Tweets have been Extracted !!!!')
# Call function to get Clean tweets
df['clean_tweet'] = df['Tweet'].apply(lambda x : clean_tweet(x))
# Call function to get the Sentiments
df["Sentiment"] = df["Tweet"].apply(lambda x : analyze_sentiment(x))
# Write Summary of the Tweets
st.write("Total Tweets Extracted for Topic '{}' are : {}".format(Topic,len(df.Tweet)))
st.write("Total Positive Tweets are : {}".format(len(df[df["Sentiment"]=="Positive"])))
st.write("Total Negative Tweets are : {}".format(len(df[df["Sentiment"]=="Negative"])))
st.write("Total Neutral Tweets are : {}".format(len(df[df["Sentiment"]=="Neutral"])))
# See the Extracted Data :
if st.button("See the Extracted Data"):
#st.markdown(html_temp, unsafe_allow_html=True)
st.success("Below is the Extracted Data :")
st.write(df.head(50))
# get the countPlot
if st.button("Get Count Plot for Different Sentiments"):
st.success("Generating A Count Plot")
st.subheader(" Count Plot for Different Sentiments")
st.write(sns.countplot(df["Sentiment"], palette="Blues"))
st.pyplot()
# Piechart
if st.button("Get Pie Chart for Different Sentiments"):
st.success("Generating A Pie Chart")
a=len(df[df["Sentiment"]=="Positive"])
b=len(df[df["Sentiment"]=="Negative"])
c=len(df[df["Sentiment"]=="Neutral"])
d=np.array([a,b,c])
explode = (0.1, 0.0, 0.1)
st.write(plt.pie(d,shadow=True,explode=explode,labels=["Positive","Negative","Neutral"],autopct='%1.2f%%'))
st.pyplot()
# get the countPlot Based on Verified and unverified Users
if st.button("Get Count Plot Based on Verified and unverified Users"):
st.success("Generating A Count Plot (Verified and unverified Users)")
st.subheader(" Count Plot for Different Sentiments for Verified and unverified Users")
st.write(sns.countplot(df["Sentiment"],hue=df.IsVerified))
st.pyplot()
## Points to add 1. Make Backgroud Clear for Wordcloud 2. Remove keywords from Wordcloud
# Create a Worlcloud
if st.button("Get WordCloud for all things said about {}".format(Topic)):
st.success("Generating A WordCloud for all things said about {}".format(Topic))
text = " ".join(review for review in df.clean_tweet)
stopwords = set(STOPWORDS)
text_newALL = prepCloud(text,Topic)
wordcloud = WordCloud(stopwords=stopwords,max_words=800,max_font_size=75, colormap="Blues", background_color="black").generate(text_newALL)
st.write(plt.imshow(wordcloud, interpolation='bilinear'))
st.pyplot()
#Wordcloud for Positive tweets only
if st.button("Get WordCloud for all Positive Tweets about {}".format(Topic)):
st.success("Generating A WordCloud for all Positive Tweets about {}".format(Topic))
text_positive = " ".join(review for review in df[df["Sentiment"]=="Positive"].clean_tweet)
stopwords = set(STOPWORDS)
text_new_positive = prepCloud(text_positive,Topic)
#text_positive=" ".join([word for word in text_positive.split() if word not in stopwords])
wordcloud = WordCloud(stopwords=stopwords,max_words=800,max_font_size=75, colormap="Greens", background_color="black").generate(text_new_positive)
st.write(plt.imshow(wordcloud, interpolation='bilinear'))
st.pyplot()
#Wordcloud for Negative tweets only
if st.button("Get WordCloud for all Negative Tweets about {}".format(Topic)):
st.success("Generating A WordCloud for all Positive Tweets about {}".format(Topic))
text_negative = " ".join(review for review in df[df["Sentiment"]=="Negative"].clean_tweet)
stopwords = set(STOPWORDS)
text_new_negative = prepCloud(text_negative,Topic)
#text_negative=" ".join([word for word in text_negative.split() if word not in stopwords])
wordcloud = WordCloud(stopwords=stopwords,max_words=800,max_font_size=75, colormap="Reds", background_color="black").generate(text_new_negative)
st.write(plt.imshow(wordcloud, interpolation='bilinear'))
st.pyplot()
#st.sidebar.subheader("Scatter-plot setup")
#box1 = st.sidebar.selectbox(label= "X axis", options = numeric_columns)
#box2 = st.sidebar.selectbox(label="Y axis", options=numeric_columns)
#sns.jointplot(x=box1, y= box2, data=df, kind = "reg", color= "red")
#st.pyplot()
elif page == "Stock Future Prediction":
snp500 = pd.read_csv("./Datasets/SP500.csv")
symbols = snp500['Symbol'].sort_values().tolist()
ticker = st.sidebar.selectbox(
'Choose a S&P 500 Stock',
symbols)
START = "2015-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title('Stock Forecast App')
st.image('https://media2.giphy.com/media/JtBZm3Getg3dqxK0zP/giphy-downsized-large.gif',width=250, use_column_width=200)
# stocks = ('GOOG', 'AAPL', 'MSFT', 'GME', 'W', 'TSLA')
# selected_stock = st.selectbox('Select dataset for prediction', stocks)
n_years = st.slider('Years of prediction:', 1, 4)
period = n_years * 365
st.title('Stock Forecast App To Do part in stockapp.py')
data_load_state = st.text('Loading data...')
data = yf.download(ticker, START, TODAY)
data.reset_index(inplace=True)
data_load_state.text('Loading data... done!')
st.subheader('Raw data')
st.write(data.tail())
# Plot raw data
def plot_raw_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_data()
# Predict forecast with Prophet.
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
# Show and plot forecast
st.subheader('Forecast data')
st.write(forecast.tail())
st.write(f'Forecast plot for {n_years} years')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write("Forecast components")
fig2 = m.plot_components(forecast)
st.write(fig2)
elif page == "Technical":
snp500 = pd.read_csv("./Datasets/SP500.csv")
symbols = snp500['Symbol'].sort_values().tolist()
ticker = st.sidebar.selectbox(
'Choose a S&P 500 Stock',
symbols)
stock = yf.Ticker(ticker)
def calcMovingAverage(data, size):
df = data.copy()
df['sma'] = df['Adj Close'].rolling(size).mean()
df['ema'] = df['Adj Close'].ewm(span=size, min_periods=size).mean()
df.dropna(inplace=True)
return df
def calc_macd(data):
df = data.copy()
df['ema12'] = df['Adj Close'].ewm(span=12, min_periods=12).mean()
df['ema26'] = df['Adj Close'].ewm(span=26, min_periods=26).mean()
df['macd'] = df['ema12'] - df['ema26']
df['signal'] = df['macd'].ewm(span=9, min_periods=9).mean()
df.dropna(inplace=True)
return df
def calcBollinger(data, size):
df = data.copy()
df["sma"] = df['Adj Close'].rolling(size).mean()
df["bolu"] = df["sma"] + 2*df['Adj Close'].rolling(size).std(ddof=0)
df["bold"] = df["sma"] - 2*df['Adj Close'].rolling(size).std(ddof=0)
df["width"] = df["bolu"] - df["bold"]
df.dropna(inplace=True)
return df
st.title('Technical Indicators')
st.subheader('Moving Average')
coMA1, coMA2 = st.beta_columns(2)
with coMA1:
numYearMA = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=0)
with coMA2:
windowSizeMA = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=1)
start = dt.datetime.today()-dt.timedelta(numYearMA * 365)
end = dt.datetime.today()
dataMA = yf.download(ticker,start,end)
df_ma = calcMovingAverage(dataMA, windowSizeMA)
df_ma = df_ma.reset_index()
figMA = go.Figure()
figMA.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['Adj Close'],
name = "Prices Over Last " + str(numYearMA) + " Year(s)"
)
)
figMA.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['sma'],
name = "SMA" + str(windowSizeMA) + " Over Last " + str(numYearMA) + " Year(s)"
)
)
figMA.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['ema'],
name = "EMA" + str(windowSizeMA) + " Over Last " + str(numYearMA) + " Year(s)"
)
)
figMA.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))
figMA.update_layout(legend_title_text='Trend')
figMA.update_yaxes(tickprefix="$")
st.plotly_chart(figMA, use_container_width=True)
st.subheader('Moving Average Convergence Divergence (MACD)')
numYearMACD = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=2)
startMACD = dt.datetime.today()-dt.timedelta(numYearMACD * 365)
endMACD = dt.datetime.today()
dataMACD = yf.download(ticker,startMACD,endMACD)
df_macd = calc_macd(dataMACD)
df_macd = df_macd.reset_index()
figMACD = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.01)
figMACD.add_trace(
go.Scatter(
x = df_macd['Date'],
y = df_macd['Adj Close'],
name = "Prices Over Last " + str(numYearMACD) + " Year(s)"
),
row=1, col=1
)
figMACD.add_trace(
go.Scatter(
x = df_macd['Date'],
y = df_macd['ema12'],
name = "EMA 12 Over Last " + str(numYearMACD) + " Year(s)"
),
row=1, col=1
)
figMACD.add_trace(
go.Scatter(
x = df_macd['Date'],
y = df_macd['ema26'],
name = "EMA 26 Over Last " + str(numYearMACD) + " Year(s)"
),
row=1, col=1
)
figMACD.add_trace(
go.Scatter(
x = df_macd['Date'],
y = df_macd['macd'],
name = "MACD Line"
),
row=2, col=1
)
figMACD.add_trace(
go.Scatter(
x = df_macd['Date'],
y = df_macd['signal'],
name = "Signal Line"
),
row=2, col=1
)
figMACD.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
figMACD.update_yaxes(tickprefix="$")
st.plotly_chart(figMACD, use_container_width=True)
st.subheader('Bollinger Band')
coBoll1, coBoll2 = st.beta_columns(2)
with coBoll1:
numYearBoll = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=6)
with coBoll2:
windowSizeBoll = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=7)
startBoll= dt.datetime.today()-dt.timedelta(numYearBoll * 365)
endBoll = dt.datetime.today()
dataBoll = yf.download(ticker,startBoll,endBoll)
df_boll = calcBollinger(dataBoll, windowSizeBoll)
df_boll = df_boll.reset_index()
figBoll = go.Figure()
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bolu'],
name = "Upper Band"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['sma'],
name = "SMA" + str(windowSizeBoll) + " Over Last " + str(numYearBoll) + " Year(s)"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bold'],
name = "Lower Band"
)
)
figBoll.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
figBoll.update_yaxes(tickprefix="$")
st.plotly_chart(figBoll, use_container_width=True)
elif page == "Live News":
st.image('https://www.visitashland.com/files/latestnews.jpg',width=250, use_column_width=200)
snp500 = pd.read_csv("./Datasets/SP500.csv")
symbols = snp500['Symbol'].sort_values().tolist()
ticker = st.sidebar.selectbox(
'Choose a S&P 500 Stock',
symbols)
if st.button("See Latest News about "+ticker+""):
st.header('Latest News')
def newsfromfizviz(temp):
# time.sleep(5)
finwiz_url = 'https://finviz.com/quote.ashx?t='
news_tables = {}
tickers = [temp]
for ticker in tickers:
url = finwiz_url + ticker
req = Request(url=url,headers={'user-agent': 'my-app/0.0.1'})
response = urlopen(req)
# Read the contents of the file into 'html'
html = BeautifulSoup(response)
# Find 'news-table' in the Soup and load it into 'news_table'
news_table = html.find(id='news-table')
# Add the table to our dictionary
news_tables[ticker] = news_table
parsed_news = []
# Iterate through the news
for file_name, news_table in news_tables.items():
# Iterate through all tr tags in 'news_table'
for x in news_table.findAll('tr'):
# read the text from each tr tag into text
# get text from a only
text = x.a.get_text()
# splite text in the td tag into a list
date_scrape = x.td.text.split()
# if the length of 'date_scrape' is 1, load 'time' as the only element
if len(date_scrape) == 1:
time = date_scrape[0]
# else load 'date' as the 1st element and 'time' as the second
else:
date = date_scrape[0]
time = date_scrape[1]
# Extract the ticker from the file name, get the string up to the 1st '_'
ticker = file_name.split('_')[0]
# Append ticker, date, time and headline as a list to the 'parsed_news' list
parsed_news.append([ticker, date, time, text])
# Instantiate the sentiment intensity analyzer
vader = SentimentIntensityAnalyzer()
# Set column names
columns = ['ticker', 'date', 'time', 'headline']
# Convert the parsed_news list into a DataFrame called 'parsed_and_scored_news'
parsed_and_scored_news = pd.DataFrame(parsed_news, columns=columns)
# Iterate through the headlines and get the polarity scores using vader
scores = parsed_and_scored_news['headline'].apply(vader.polarity_scores).tolist()
# Convert the 'scores' list of dicts into a DataFrame
scores_df = pd.DataFrame(scores)
# Join the DataFrames of the news and the list of dicts
parsed_and_scored_news = parsed_and_scored_news.join(scores_df, rsuffix='_right')
# Convert the date column from string to datetime
parsed_and_scored_news['date'] = pd.to_datetime(parsed_and_scored_news.date).dt.date
parsed_and_scored_news['Sentiment'] = np.where(parsed_and_scored_news['compound'] > 0, 'Positive', (np.where(parsed_and_scored_news['compound'] == 0, 'Neutral', 'Negative')))
return parsed_and_scored_news
df = newsfromfizviz(ticker)
df_pie = df[['Sentiment','headline']].groupby('Sentiment').count()
fig = px.pie(df_pie, values=df_pie['headline'], names=df_pie.index, color=df_pie.index, color_discrete_map={'Positive':'green', 'Neutral':'darkblue', 'Negative':'red'})
st.subheader('Dataframe with Latest News')
st.dataframe(df)
st.subheader('Latest News Sentiment Distribution using Pie Chart')
st.plotly_chart(fig)
plt.rcParams['figure.figsize'] = [11, 5]
# Group by date and ticker columns from scored_news and calculate the mean
mean_scores = df.groupby(['ticker','date']).mean()
# Unstack the column ticker
mean_scores = mean_scores.unstack()
# Get the cross-section of compound in the 'columns' axis
mean_scores = mean_scores.xs('compound', axis="columns").transpose()
# Plot a bar chart with pandas
mean_scores.plot(kind = 'bar')
plt.grid()
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('Sentiments over Time')
st.pyplot()
elif page == "Company Profile":
snp500 = pd.read_csv("./Datasets/SP500.csv")
symbols = snp500['Symbol'].sort_values().tolist()
ticker = st.sidebar.selectbox(
'Choose a S&P 500 Stock',
symbols)
stock = yf.Ticker(ticker)
stock = yf.Ticker(ticker)
info = stock.info
st.title('Company Profile')
st.subheader(info['longName'])
st.markdown('** Sector **: ' + info['sector'])
st.markdown('** Industry **: ' + info['industry'])
st.markdown('** Phone **: ' + info['phone'])
st.markdown('** Address **: ' + info['address1'] + ', ' + info['city'] + ', ' + info['zip'] + ', ' + info['country'])
st.markdown('** Website **: ' + info['website'])
st.markdown('** Business Summary **')
st.info(info['longBusinessSummary'])
fundInfo = {
'Enterprise Value (USD)': info['enterpriseValue'],
'Enterprise To Revenue Ratio': info['enterpriseToRevenue'],
'Enterprise To Ebitda Ratio': info['enterpriseToEbitda'],
'Net Income (USD)': info['netIncomeToCommon'],
'Profit Margin Ratio': info['profitMargins'],
'Forward PE Ratio': info['forwardPE'],
'PEG Ratio': info['pegRatio'],
'Price to Book Ratio': info['priceToBook'],
'Forward EPS (USD)': info['forwardEps'],
'Beta ': info['beta'],
'Book Value (USD)': info['bookValue'],
'Dividend Rate (%)': info['dividendRate'],
'Dividend Yield (%)': info['dividendYield'],
'Five year Avg Dividend Yield (%)': info['fiveYearAvgDividendYield'],
'Payout Ratio': info['payoutRatio']
}
fundDF = | pd.DataFrame.from_dict(fundInfo, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 2 17:13:01 2017
@author: kcarnold
"""
import hashlib
import random
import pickle
import numpy as np
import pandas as pd
#%%
#data_file = 'data/analysis_study4_2017-04-02T17:14:44.194603.pkl'
#data_file = 'data/analysis_study4_2017-04-02T20:37:11.374099.pkl'
#data_file = 'data/analysis_study4_2017-04-02T21:09:39.528242.pkl'
#data_file = 'data/analysis_study4_2017-04-04T13:11:10.932814.pkl'
data_file = 'data/analysis_funny_2017-04-07T09:58:07.316857.pkl'
log_data, survey_data = pickle.load(open(data_file, 'rb'))
participants = sorted(log_data.keys())
#%%
def split_randomly_without_overlap(remaining_views, chunk_size, rs):
chunks = []
while sum(remaining_views) >= chunk_size:
chunk = []
for i in range(chunk_size):
mrv = max(remaining_views)
opts = [i for i, rv in enumerate(remaining_views) if rv == mrv and i not in chunk]
# item = np.argmax(remaining_views)
item = rs.choice(opts)
assert item not in chunk
chunk.append(item)
remaining_views[item] -= 1
chunks.append(chunk)
return chunks
#split_randomly_without_overlap([1]*10, 4, rs=random.Random(0))
if False:
CHUNK_SIZE = 4
VIEWS_PER_ITEM = 3
splits = split_randomly_without_overlap(len(participants), CHUNK_SIZE, VIEWS_PER_ITEM, rs=random.Random(0))
data = [{
"pages": [[
dict(participant_id=participants[idx], cond=block['condition'], text=block['finalText']) for block in all_log_analyses[participants[idx]]['blocks']]
for idx in chunk],
"attrs": ["food", "drinks", "atmosphere", "service", "value"],
} for chunk in splits]
pd.DataFrame(dict(data=[json.dumps(d) for d in data])).to_csv(f'analyzed_{"_".join(participants)}.csv', index=False)
#%%
# to_rate = [['852f7a', '88d3ad', '0cb74f', 'f31d92'], ['4edc26', '885dae', 'a997ed', '8c01ef'], ['773fa0', '43cd2c', '706d74', '7d5d97']]
to_rate = [participants]#list(cytoolz.partition(4, non_excluded_participants))
data = [{
"pages": [[
dict(participant_id=participant_id, cond=block['condition'], text=block['finalText']) for block in all_log_analyses[participant_id]['blocks']]
for participant_id in chunk],
"attrs": ["food", "drinks", "atmosphere", "service", "value"],
} for chunk in to_rate]
#%%
pd.DataFrame(dict(data=[json.dumps(d) for d in data])).to_csv(f'to_rate_{run_id}.csv', index=False)
#%%
# Dump for spreadsheet raters
def should_flip(participant_id):
return np.random.RandomState(
np.frombuffer(hashlib.sha256(participant_id.encode('utf8')).digest(), dtype=np.uint32)
).rand() < .5
def make_participant_hash(participant_id):
return hashlib.sha256(participant_id.encode('utf8')).hexdigest()[:4]
#%%
rate_round_1 = ['8ddf8b', '6a8a4c', '8e4d93', 'a178d3']
rate_round_2 =['10317e', '3822a7', '42a2d1', '51aa50', '60577e', '72b6f6', '83ada3', '993876', 'aae8e4', 'ec0620']
rate_round_3 = ['10f0dc', 'ac1341', 'b2d633', 'c8963d']
rate_round_4 = ['7939c9', '8a8a64', 'bb9486', 'c7ffcb']
rate_round_5 = ['ab938b']
all_rated = set(rate_round_1 + rate_round_2 + rate_round_3 + rate_round_4 + rate_round_5)
#%%
sorted(set(participants) - set(all_rated))
#%%
texts = {pid: [block['finalText'] for block in data['blocks']] for pid, data in log_data.items()}
#texts = {pid: [block['condition'] for block in data['blocks']] for pid, data in log_data.items()}
#%%
import contextlib
def dump_rating_task(basename, participants, texts_by_participant_id):
participant_hashes = []
with open(f'{basename}-reviews.txt', 'w') as f, contextlib.redirect_stdout(f):
for participant_id in participants:
texts = texts_by_participant_id[participant_id]
if should_flip(participant_id):
texts = texts[::-1]
participant_hash = make_participant_hash(participant_id)
participant_hashes.append(participant_hash)
print()
print(participant_hash)
print('----')
for i, text in enumerate(texts):
# text_hash = hashlib.sha256(text.encode('utf8')).hexdigest()[:2]
name_str = 'AB'[i]
print(f"{participant_hash}-{name_str}")
print(text.replace('\n',' '))
print()
with open(f'{basename}-results.csv', 'w') as f, contextlib.redirect_stdout(f):
for participant_hash in participant_hashes:
for attr in ["food", "drinks", "atmosphere", "service", "value", "detailed", "written", "quality"]:
print(f"{participant_hash},{attr},,")
#%%
dump_rating_task('data/detail_ratings/input batches/funny1', list(participants), texts)
#%%
participants = sorted(list(log_data.keys()))
conditions = []
for author_id in participants:
author_conds = log_data[author_id]['conditions']
if should_flip(author_id):
rating_conds = author_conds[::-1]
else:
rating_conds = author_conds
conditions.append([author_id, rating_conds[0], rating_conds[1], ','.join(author_conds)])
conditions_as_rated = | pd.DataFrame(conditions, columns=['author_id', 'cond_A', 'cond_B', 'author_conds']) | pandas.DataFrame |
import requests
from model.parsers import model as m
import pandas as pd
import datetime
dataset = m.initialize()
unique_dates = list()
raw_data = requests.get('https://api.covid19india.org/states_daily.json')
raw_json = raw_data.json()
for item in raw_json['states_daily']:
if item['date'] not in unique_dates:
unique_dates.append(item['date'])
for date in unique_dates:
for item in raw_json['states_daily']:
if date == item['date']:
for state in dataset:
if date not in state:
state[date] = dict()
state[date][item['status']] = item[state['code']]
def date_validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%b-%y')
except ValueError:
print("Incorrect date format, should be dd-Mmm-yy")
return 0
def state_code_validate(state_code):
unique_states = list()
for item in dataset:
unique_states.append(item['code'])
if state_code in unique_states:
return 1
else:
print('Please enter a valid state code')
return 0
def needs_patch(date_to_fetch, state_code):
if (date_to_fetch == '26-Mar-20' and state_code == 'ap') or (date_to_fetch == '16-Mar-20' and state_code == 'mp'):
return True
return False
def apply_patch(date_to_fetch, state_code):
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
def fetch_by_date_and_code(date_to_fetch, state_code):
if(needs_patch(date_to_fetch, state_code)):
return apply_patch(date_to_fetch, state_code)
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch in unique_dates:
for state in dataset:
if state['code'] == state_code:
if date_to_fetch in state:
return state[date_to_fetch]
else :
print('date does not exist')
def cumulative_datewise_data(date_to_fetch, state_code):
should_stop = False
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
print(unique_date, fetch_by_date_and_code(unique_date, state_code))
if should_stop:
break
def cumulative_data(date_to_fetch, state_code):
should_stop = False
cumulative_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
if should_stop:
break
return cumulative_dict
else:
return 0
def cumulative_series_datewise_data(date_to_fetch, state_code):
should_stop = False
cumulative_series_datewise_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
cumulative_series_datewise_dict[unique_date] = cumulative_data(unique_date, state_code)
if should_stop:
break
return cumulative_series_datewise_dict
else:
print('date does not exist')
def cumulative_last_3_days(state_code, should_print = False):
resultset = dict()
for unique_date in unique_dates[-3:]:
resultset[unique_date] = cumulative_data(unique_date, state_code)
if should_print:
print(unique_date, cumulative_data(unique_date, state_code))
return resultset
def cumulative_last_3_days_all_states(choice):
resultset = dict()
for state in dataset:
resultset[state['name']] = cumulative_last_3_days(state['code'], False)
return resultset
def total_count(state_code):
cumulative_dict = dict()
for unique_date in unique_dates:
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
return cumulative_dict
def make_data_frame():
unique_states = list()
confirmed_list = list()
recovery_list = list()
deceased_list = list()
for state in dataset[:-1]:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset[:-1]:
status = total_count(state['code'])
confirmed_list.append(status['Confirmed'])
recovery_list.append(status['Recovered'])
deceased_list.append(status['Deceased'])
data = {'STATE/UT':unique_states, 'Confirmed':confirmed_list, 'Recovered':recovery_list, 'Deceased':deceased_list}
df = pd.DataFrame(data, columns = ['STATE/UT', 'Confirmed', 'Recovered', 'Deceased'])
return df
def cumulative_last_3_days_confirmed_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Confirmed'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_recovered_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Recovered'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_deceased_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Deceased'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def all_data_confirmed():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Confirmed'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_recovered():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Recovered'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_deceased():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Deceased'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def cumulative_all_data_confirmed():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
cumulative_dict = cumulative_data(date, state['code'])
dates[date].append(cumulative_dict['Confirmed'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def cumulative_all_data_recovered():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
cumulative_dict = cumulative_data(date, state['code'])
dates[date].append(cumulative_dict['Recovered'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def cumulative_all_data_deceased():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
cumulative_dict = cumulative_data(date, state['code'])
dates[date].append(cumulative_dict['Deceased'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pytest
import pandas as pd
import numpy as np
from collections import OrderedDict
from ..catalog_matching import (crossmatch,
select_min_dist,
post_k2_clean,
find_campaigns,
match_k2_epic_campaign,
extract_extensions,
assign_PMem_mean,
merge_k2_info_and_protocol,
crossmatch_multiple_catalogs,
pick_members_and_produce_k2_search_input,
)
ra = 'RAJ2000'
dec = 'DEJ2000'
def test_pick_members_and_produce_k2_search_input():
#---------------------------------------------------------
# Produce fake data
cross = pd.DataFrame(dict(zip(["RAJ2000_1", "DEJ2000_1",
"somecolumn", "PMem_1",
"PMem_2", "PMem_3"],
[[20, 20, 20],
[20, 20, 20],
["rolf", "hagen", "busch"],
[.1, .8, .9],
[.1, .8, .9],
[.9, np.nan, .9]],
)))
sname, name = "test", "Test"
coords = "1"
series = cross.loc[2,:] #this row should be preserved in result
outfile = ('catalog_matching/matched_catalogs/'
'membership_matches/radec/{}_radec.csv'
.format(sname))
#---------------------------------------------------------
# Call function
res = pick_members_and_produce_k2_search_input(cross, sname,
name, coords=coords)
df = pd.read_csv(outfile, header=None)
#---------------------------------------------------------
# Check if the RA/Dec file is correct:
assert df.loc[0,0] == 20
assert df.loc[0,1] == 20
assert df.shape[0] == 1
assert df.shape[1] == 2
# Remove output file
os.remove(outfile)
# Check if the DataFrame was processed correctly
assert res.shape[0] == 1 # only one member is left
assert (res.loc[2,series.index] == series).all() # input columns are preserved
def test_crossmatch_multiple_catalogs():
#-----------------------------------------------------------
# Create a fake data set
diff = 1.49/3600 # 1.5 arcsec distance
c1 = pd.DataFrame(dict(zip(["RAJ2000_1","DEJ2000_1","PMem_1"],
[[21,20,19],[10,10,10],[.9,.8,.7]])))
c2 = pd.DataFrame(dict(zip(["RAJ2000_2","DEJ2000_2","PMem_2","binary_2"],
[[21,20+diff,19+3*diff],[10,10,10],
[.75,.85,.3],[.1,.02,.11]])))
c3 = pd.DataFrame(dict(zip(["RAJ2000_3","DEJ2000_3","PMem_3"],
[[np.nan,20-diff,19],[10,10,10],[.9,.9,.9]])))
d = {"1":c1, "2":c2, "3":c3}
renamed_catalogs = OrderedDict(sorted(d.items(), key=lambda t: t[0])) # order the dicts, not necessary for performance but helpful for testing
name = "Test"
sname = "test"
#-----------------------------------------------------------
# Call the function
res = crossmatch_multiple_catalogs(renamed_catalogs, name, sname,
arcsec=3., plot=True, union=True,
bijective=True)
#-----------------------------------------------------------
# Do some checks
# Check that the table size is correct
assert res.shape[0] == 5
assert res.shape[1] == 16
# Check that relevant columns are created with the right names/values
assert "DEJ2000_1_2_3" in res.columns.tolist()
assert set(c1.columns.values).issubset(set(res.columns.values))
assert set(c2.columns.values).issubset(set(res.columns.values))
assert set(c3.columns.values).issubset(set(res.columns.values))
# Check that the distance calculation was done correctly
assert res.loc[1, "dist_1_2_3"] == pytest.approx(2.235, rel=.1)
assert res.loc[2, "dist_1_2_3"] == 0.
# Check that NaNs stay NaNs:
assert np.isnan(res.loc[4, "RAJ2000_3"])
# Check individual values and NaNs:
assert res.loc[2, "RAJ2000_1_2_3"] == 19.
assert (res.DEJ2000_1_2_3.values == 10.).all()
assert res.dist_1_2.dropna().shape[0] == 2
assert res.dist_1_2_3.dropna().shape[0] == 2
def test_merge_k2_info_and_protocol():
# Fake data
folder = "catalog_matching/tests/exfiles/"
sname = "tiny"
df = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union_k2.csv')
racols = list(filter(lambda k: ra in k, df.columns.values))
deccols = list(filter(lambda k: dec in k, df.columns.values))
df["{}_mean".format(ra)] = df[racols].mean(axis=1)
df["{}_mean".format(dec)] = df[deccols].mean(axis=1)
# Call function
merge_k2_info_and_protocol(df, "tiny", "mean", folder=folder)
data = pd.read_csv("{}members.txt".format(folder))
assert data.shape[1] == 6
assert data.shape[0] == 1
assert data.nlikelymembers[0] == 7
assert data.LCs[0] == 5
assert data.cluster[0] == "tiny"
# Remove the test file, as it would get longer and longer over time.
os.remove("{}members.txt".format(folder))
def test_assign_PMem_mean():
# Test a working example:
df = pd.DataFrame(index=np.arange(10),
columns=["RAJ2000_1","DEJ2000_1","PMem_1",
"RAJ2000","DEJ2000_holla","PMem_holla",
"RAJ2000_2","DEJ2000_BRm2","PMem_dd",])
df["PMem_dd"] = np.arange(0,1.0,.1)
df["PMem_holla"] = np.arange(0,1.0,.1)
df["PMem_1"] = [.1] * 5 + [np.nan] * 5
df = assign_PMem_mean(df)
assert (df.PMem_mean.tolist()
== pytest.approx([.1/3, .1, .5/3, .7/3, .3, .5, .6, .7, .8, .9]))
def test_extract_extensions():
#---------------------------
# Create fake data
df = pd.DataFrame(columns=["RAJ2000_1","DEJ2000_1","PMem_1",
"RAJ2000","DEJ2000_holla","PMem_holla",
"RAJ2000_2","DEJ2000_BRm2","PMem_dd",])
# Test output as extensions
assert extract_extensions(df, prefix="RAJ2000") == ["1","2"]
assert extract_extensions(df, prefix="DEJ2000") == ["1","holla","BRm2"]
assert extract_extensions(df, prefix="PMem") == ["1","holla","dd"]
# Test output as column names
assert extract_extensions(df, prefix="PMem", retcolnames=True) == ['PMem_1', 'PMem_holla', 'PMem_dd']
assert extract_extensions(df, prefix="RAJ2000", retcolnames=True) == ["RAJ2000_1","RAJ2000_2"]
assert extract_extensions(df, prefix="DEJ2000", retcolnames=True) == ["DEJ2000_1","DEJ2000_holla","DEJ2000_BRm2"]
# Must pass a string-convertible prefix
with pytest.raises(TypeError):
extract_extensions(df, prefix=None)
with pytest.raises(TypeError):
extract_extensions(df, prefix=1.44)
assert set(extract_extensions(df, prefix="", retcolnames=True)) == set(df.columns.tolist()) - {"RAJ2000"}
def test_match_k2_epic_campaign():
# set up fake data
testdf = pd.DataFrame({"RAJ2000":[57.13268195367, 132.8329500, 59.],
"DEJ2000":[24.03288651412, 11.7834400, -25.],
"smh":["blo","blo","blo"]})
# run function
resdf = match_k2_epic_campaign(testdf)
# test some values from the results
assert resdf.Campaign[0] == 4.
assert resdf.Campaign[1] == 5.
assert resdf.Campaign[2] == 16.
assert resdf.Campaign[3] == 18.
assert np.isnan(resdf.Campaign[4])
assert (resdf.smh.values == "blo").all()
assert resdf.loc[4,:].DEJ2000 == -25.
assert resdf.loc[4,:].RAJ2000 == 59.
assert resdf.loc[0,:].EPIC == '211066477'
assert resdf.loc[1,:].EPIC == '211409345'
assert resdf.loc[2,:].EPIC == '211409345'
assert resdf.loc[3,:].EPIC == '211409345'
def test_find_campaigns():
# success modes are tested in match_k2_epic_campaign
# test failure modes
for t1 in [find_campaigns("blo ddd"), find_campaigns("67777 9888"),
find_campaigns("0 nan")]:
assert np.isnan(t1[0][0])
assert np.isnan(t1[1])
def test_crossmatch():
name = 'Tiny Cluster'
sname ='tiny'
a = pd.read_csv('catalog_matching/tests/exfiles/tiny_a.csv')
b = pd.read_csv('catalog_matching/tests/exfiles/tiny_b.csv')
d = pd.read_csv('catalog_matching/tests/exfiles/tiny_d.csv')#modified b
keys = ['a','b']
df = crossmatch(a, b, keys, name, sname, arcsec=5., plot=False)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result.csv')
assert t.equals(df)
df = crossmatch(a, b, keys, name, sname, arcsec=5., plot=False, union=True)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result_union.csv')
assert t.equals(df)
df = crossmatch(b, a, keys[::-1], name, sname, arcsec=5., plot=False, union=True)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result_union_reverse.csv')
assert t.equals(df)
df = crossmatch(a, d, ['a','d'], name, sname, arcsec=5., plot=False, union=True, bijective=True)
t = | pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union.csv') | pandas.read_csv |
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import numpy as np
def compareCounts(fileList, column):
df = pd.DataFrame()
for i in fileList:
path =Path(i)
name = path.stem
src = gpd.read_file(i)
#print(src)
src = src.loc[src['BU_CODE'].str.contains('BU0363')]
if 'BU_CODE' not in df.columns:
df['BU_CODE'] = src['BU_CODE']
df['{}'.format(name)] = src['{}'.format(column)]
#df = df.join(src.set_index('BU_CODE'), lsuffix='_l')
else:
df['{}'.format(name)] = src['{}'.format(column)]
print(df.head(3))
for col in df.columns:
if col!="ams_l1_totalpop_2018" and col!= "BU_CODE":
print(col)
df['{}'.format(col)] = df['{}'.format(col)].fillna(0).astype(np.int64)
df["dif_{}".format(col)] = df[col] - df["ams_l1_totalpop_2018"]
df["Error_{}".format(col)] = (df[col] - df["ams_l1_totalpop_2018"]) / df["ams_l1_totalpop_2018"].sum() * 100
df["Accuracy_{}".format(col)] = (df[col] / df["ams_l1_totalpop_2018"]) * 100
df["PrE_{}".format(col)] = (df[col] - df["ams_l1_totalpop_2018"]) * (df["ams_l1_totalpop_2018"] / df["ams_l1_totalpop_2018"].sum()) * 100#
#frame['dif_sum_2018_l1_totalpop'][frame.sum_2018_l1_totalpop == 0] = 0
#frame['Error_sum_2018_l1_totalpop'][frame.sum_2018_l1_totalpop == 0] = 0
return df
def BoxPlotCBS(directory, df, xLegend):
sns.boxplot(x=xLegend, y="Type", data=pd.melt(df, var_name='Type', value_name=xLegend), linewidth=1.0)
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
def BoxPlotCBS_NO(directory, df, xLegend):
for column in df.columns:
Q1 = df['{}'.format(column)].quantile(0.25)
Q3 = df['{}'.format(column)].quantile(0.75)
IQR = Q3 - Q1 #IQR is interquartile range.
filter = (df['{}'.format(column)] >= Q1 - 1.5 * IQR) & (df['{}'.format(column)] <= Q3 + 1.5 *IQR)
df =df.loc[filter]
sns.boxplot(x=xLegend, y="Type", data=pd.melt(df, var_name='Type', value_name=xLegend), linewidth=1.0)
#sns.swarmplot(x="Accuracy", y="Type", data=pd.melt(df, var_name='Type', value_name='Accuracy'), color=".15")
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
def BoxPlot(directory, fileList, column):
df = pd.DataFrame()
for i in fileList:
path =Path(i)
name = path.stem
src = gpd.read_file(i)
df['{}'.format(name)] = src['{}'.format(column)]
print(df.head(3))
sns.boxplot(x="Accuracy", y="Type", data= | pd.melt(df, var_name='Type', value_name='Accuracy') | pandas.melt |
import streamlit as st
import pandas as pd
import joblib
from PIL import Image
model = open("Knn_Classifier.pkl","rb")
model = joblib.load(model)
st.title("Iris flower species Classification App")
setosa= Image.open("setosa.jpg")
versicolor= Image.open('versiclor.jpg')
virginica = Image.open('virginia.jpg')
virginica = virginica.resize((500,500))
setosa = setosa.resize((500,500))
versicolor = versicolor.resize((500,500))
st.sidebar.title("Features")
parameter_list=['Sepal length (cm)','Sepal Width (cm)','Petal length (cm)','Petal Width (cm)']
parameter_input_values=[]
parameter_default_values=['5.2','3.2','4.2','1.2']
values=[]
#Display
for parameter, parameter_df in zip(parameter_list, parameter_default_values):
values= st.sidebar.slider(label=parameter, key=parameter,value=float(parameter_df), min_value=0.0, max_value=8.0, step=0.1)
parameter_input_values.append(values)
input_variables= | pd.DataFrame([parameter_input_values],columns=parameter_list,dtype=float) | pandas.DataFrame |
from __future__ import annotations
from collections.abc import MutableMapping
from typing import (
Any,
Callable,
ItemsView,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
ValuesView,
)
import numpy as np
from pandas import DataFrame, Index, MultiIndex, Series
from linearmodels.typing import AnyArray, Label
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_VT_co = TypeVar("_VT_co", covariant=True)
class SupportsKeysAndGetItem(Protocol[_KT, _VT_co]):
def keys(self) -> Iterable[_KT]:
...
def __getitem__(self, __k: _KT) -> _VT_co:
...
def _new_attr_dict_(*args: Iterable[Tuple[Any, Any]]) -> AttrDict:
attr_dict = AttrDict()
for k, v in args:
attr_dict[k] = v
return attr_dict
class AttrDict(MutableMapping):
"""
Ordered dictionary-like object that exposes keys as attributes
"""
def update(
self,
*args: Union[SupportsKeysAndGetItem[Any, Any], Iterable[Tuple[Any, Any]]],
**kwargs: Any,
) -> None:
"""
Update AD from dictionary or iterable E and F.
If E is present and has a .keys() method, then does: for k in E: AD[k] = E[k]
If E is present and lacks a .keys() method, then does: for k, v in E: AD[k] = v
In either case, this is followed by: for k in F: AD[k] = F[k]
"""
self.__private_dict__.update(*args, **kwargs)
def clear(self) -> None:
"""Remove all items from AD."""
self.__private_dict__.clear()
def copy(self) -> AttrDict:
"""Create a shallow copy of AD"""
ad = AttrDict()
for key in self.__private_dict__.keys():
ad[key] = self.__private_dict__[key]
return ad
def keys(self) -> KeysView[Any]:
"""Return an ordered list-like object providing a view on AD's keys"""
return self.__private_dict__.keys()
def items(self) -> ItemsView[Any, Any]:
"""Return an ordered list-like object providing a view on AD's items"""
return self.__private_dict__.items()
def values(self) -> ValuesView[Any]:
"""Return an ordered list-like object object providing a view on AD's values"""
return self.__private_dict__.values()
def pop(self, key: Label, default: Any = None) -> Any:
"""
Remove specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise KeyError is raised
"""
return self.__private_dict__.pop(key, default)
def __reduce__(
self,
) -> Tuple[
Callable[[Iterable[Tuple[Any, Any]]], "AttrDict"], Tuple[Tuple[Any, Any], ...]
]:
return _new_attr_dict_, tuple((k, v) for k, v in self.items())
def __len__(self) -> int:
return self.__private_dict__.__len__()
def __repr__(self) -> str:
out = self.__private_dict__.__str__()
return "AttrDict" + out
def __str__(self) -> str:
return self.__repr__()
def __init__(
self, *args: Union[Mapping[Any, Any], Sequence[Tuple[Any, Any]]], **kwargs: Any
) -> None:
self.__dict__["__private_dict__"] = dict(*args, **kwargs)
def __contains__(self, item: Label) -> bool:
return self.__private_dict__.__contains__(item)
def __getitem__(self, item: Label) -> Any:
return self.__private_dict__[item]
def __setitem__(self, key: Label, value: Any) -> None:
if key == "__private_dict__":
raise KeyError("__private_dict__ is reserved and cannot be set.")
self.__private_dict__[key] = value
def __delitem__(self, key: Label) -> None:
del self.__private_dict__[key]
def __getattr__(self, key: Label) -> Any:
if key not in self.__private_dict__:
raise AttributeError
return self.__private_dict__[key]
def __setattr__(self, key: Label, value: Any) -> None:
if key == "__private_dict__":
raise AttributeError("__private_dict__ is invalid")
self.__private_dict__[key] = value
def __delattr__(self, key: Label) -> None:
del self.__private_dict__[key]
def __dir__(self) -> Iterable[str]:
out = [str(key) for key in self.__private_dict__.keys()]
out += list(super(AttrDict, self).__dir__())
filtered = [key for key in out if key.isidentifier()]
return sorted(set(filtered))
def __iter__(self) -> Iterator[Label]:
return self.__private_dict__.__iter__()
def ensure_unique_column(col_name: str, df: DataFrame, addition: str = "_") -> str:
while col_name in df:
col_name = addition + col_name + addition
return col_name
def panel_to_frame(
x: Optional[AnyArray],
items: Sequence[Label],
major_axis: Sequence[Label],
minor_axis: Sequence[Label],
swap: bool = False,
) -> DataFrame:
"""
Construct a multiindex DataFrame using Panel-like arguments
Parameters
----------
x : ndarray
3-d array with size nite, nmajor, nminor
items : list-like
List like object with item labels
major_axis : list-like
List like object with major_axis labels
minor_axis : list-like
List like object with minor_axis labels
swap : bool
Swap is major and minor axes
Notes
-----
This function is equivalent to
Panel(x, items, major_axis, minor_axis).to_frame()
if `swap` is True, it is equivalent to
Panel(x, items, major_axis, minor_axis).swapaxes(1,2).to_frame()
"""
nmajor = np.arange(len(major_axis))
nminor = np.arange(len(minor_axis))
final_levels = [major_axis, minor_axis]
mi = | MultiIndex.from_product([nmajor, nminor]) | pandas.MultiIndex.from_product |
import pandas as pd
from flask import Flask, redirect, request, url_for,render_template, Response, jsonify
from application import app
import requests
import hashlib
import json
@app.route('/')
@app.route('/home')
def home():
return render_template('homepage.html')+('<br><br> <a href="/signup_home" type="button">Create an account</a> </br>')
@app.route('/login', methods=['GET','POST'])
def login():
return render_template('login.html')
@app.route('/signup_home', methods=['GET','POST'])
def signup_home():
return '<h1>Create a new BChain account</h1><br>'+ render_template('signup.html')#,title='add_item')#+('<br><br> <a href="/products" type="button">Return to Products home</a> </br>')
@app.route('/signup', methods=['GET','POST'])
def signup():
if request.method == 'POST':
keypair = requests.get('http://keypair-generator:5001/keys_generator').json()
creds = requests.get('http://credentials:5002/accounts_database').json()
current_data = | pd.DataFrame.from_dict(creds,orient='index') | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import utils
class Indicators:
def __init__(self, stock, start_date, end_date):
self.stock = stock
self.start_date = start_date
self.end_date = end_date
self.data = utils.read_stock_data(stock)
def calculate_all_indicators(self):
indicators = [
self.adj_close_price(),
self.bollinger_bands(),
self.cci(4),
self.cci(12),
self.cci(20),
self.ema(2),
self.ema(6),
self.ema(10),
self.ema(12),
self.macd(),
self.mfi(14),
self.mfi(16),
self.mfi(18),
self.obv(),
self.px_volume(),
self.rsi(6),
self.rsi(12),
self.sma(3),
self.sma(10),
self.trix(),
self.volatility(2),
self.volatility(4),
self.volatility(6),
self.volatility(8),
self.volatility(10),
self.volatility(12),
self.volatility(14),
self.volatility(16),
self.volatility(18),
self.volatility(20),
self.willr()
]
dates = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)['Date']
df = pd.concat(indicators, axis=1)
return df.set_index(dates)
def adj_close_price(self):
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
return pd.DataFrame(df['Close'].values, columns=['Adj Close Price'])
def bollinger_bands(self):
window_len = 20
def Bollinger_Bands(stock_price, window_size, num_of_std):
rolling_mean = stock_price['Close'].rolling(window=window_size).mean()[window_size-1:]
rolling_std = stock_price['Close'].rolling(window=window_size).std()[window_size-1:]
upper_band = np.add(rolling_mean, rolling_std * num_of_std)
lower_band = np.subtract(rolling_mean, rolling_std * num_of_std)
return rolling_mean, upper_band, lower_band
prices = utils.include_n_days_before(self.data, window_len, self.start_date, self.end_date)
middle, upper, lower = Bollinger_Bands(prices, window_len, 2)
result_df = pd.DataFrame({'BB_Middle': middle.values, \
'BB_Upper': upper.values, 'BB_Lower': lower.values})
return result_df
def cci(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
df_after_start_date = df[num_days-1:]
def calculate_tp(t):
return(t['High']+t['Low']+t['Close'])/3
tp_df = df_after_start_date.apply(calculate_tp, 1)
# calculate TpAvg(t) where TpAvg(t,n)=Avg(Tp(t)) over [t, t-1, . . . , t-n+1];
tp_avg_df = df.apply(calculate_tp, 1)
tp_avg_df = tp_avg_df.rolling(window=num_days).mean()
tp_avg_df = tp_avg_df[(num_days-1):]
# calculate MD(t) where MD(t)=Avg(Abs(Tp(t)-TpAvg(t,n)));
md = np.mean(np.absolute(np.subtract(tp_df, tp_avg_df)))
# calculate CCI(t) where CCI(t) = Tp(t)-TpAvg(t,n)/(0.15*MD(t));
cci = np.subtract(tp_df, tp_avg_df)/(0.15*md)
return pd.DataFrame(cci.values, columns=['CCI_{0}'.format(num_days)])
def ema(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
ema = df['Close'].ewm(span=num_days).mean()
ema = ema[num_days-1:]
return pd.DataFrame(ema.values, columns=['EMA_{0}'.format(num_days)])
def macd(self):
n_slow = 26
n_fast = 12
n_signal = 9
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
# Calculate MACD
ema_slow = df['Close'].ewm(span=n_slow, min_periods=1).mean()
ema_fast = df['Close'].ewm(span=n_fast, min_periods=1).mean()
macd = np.subtract(ema_fast, ema_slow)
# Calculate MACD signal
macd_signal = macd.ewm(span=n_signal, min_periods=1).mean()
# Calculate MACD histogram
macd_hist = np.subtract(macd, macd_signal)
result_df = pd.DataFrame({'MACD': macd.values, \
'MACD_Sig': macd_signal.values, \
'MACD_Hist': macd_hist.values})
return result_df
def mfi(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
def Money_Flow_Index(window_df, tp_df, mf_df):
pos_mf = 0.0
neg_mf = 0.0
for i in range(len(window_df)):
tp = tp_df.iloc[i].item()
mf = mf_df.iloc[i].item()
if i == 0:
pos_mf += mf
else:
tp_before = tp_df.iloc[i-1].item()
if tp > tp_before:
pos_mf += mf
elif tp < tp_before:
neg_mf += mf
mfi = (pos_mf / (pos_mf + neg_mf)) * 100
return mfi
tp_df = (df['High']+df['Low']+df['Close'])/3
mf_df = tp_df * df['Volume']
col_name = 'MFI_{0}'.format(num_days)
mfi_df = pd.DataFrame(columns=[col_name])
for i in range(len(df)-num_days+1):
temp_df = df.iloc[i:i+num_days, :]
temp_tp_df = tp_df.iloc[i:i+num_days]
temp_mf_df = mf_df.iloc[i:i+num_days]
mfi = Money_Flow_Index(temp_df, temp_tp_df, temp_mf_df)
mfi_df = mfi_df.append(pd.DataFrame([mfi], columns=[col_name]), ignore_index=True)
return mfi_df
def momentum(self, num_days):
df = utils.include_n_days_before(self.data, num_days+1, self.start_date, self.end_date)
momentum = df['Close'].rolling(window=num_days+1)\
.apply(lambda t: t[num_days]-t[0])
momentum = momentum[num_days:]
return pd.DataFrame(momentum.values, columns=['MOM_{0}'.format(num_days)])
def obv(self):
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
obv_df = pd.DataFrame([0.0], columns=['OBV'])
obv = 0.0
for i in range(len(df)-1):
row_i = df.iloc[i]
row_i_1 = df.iloc[i+1]
volume = 0.0
if row_i_1['Close'] > row_i['Close']:
volume = row_i_1['Volume']
elif row_i_1['Close'] < row_i['Close']:
volume = row_i_1['Volume'] * -1
obv += volume
obv_df = obv_df.append(pd.DataFrame([obv], columns=['OBV']), ignore_index=True)
return obv_df
def px_volume(self):
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
df = df['Volume']
return pd.DataFrame(df.values, columns=['PX Volume'])
def rsi(self, num_days):
df = utils.include_n_days_before(self.data, num_days+1, self.start_date, self.end_date)
diff_df = df['Close'].diff()
diff_df = diff_df[1:]
avg_up = diff_df.where(lambda x: x > 0, other=0.0)\
.rolling(window=num_days, min_periods=num_days).mean()
avg_down = diff_df.where(lambda x: x < 0, other=0.0).abs()\
.rolling(window=num_days, min_periods=num_days).mean()
rsi = (avg_up / (avg_up + avg_down)) * 100
rsi = rsi[num_days-1:]
return pd.DataFrame(rsi.values, columns=['RSI_{0}'.format(num_days)])
def sma(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
sma = df['Close'].rolling(window=num_days).mean()
sma = sma[num_days-1:]
return pd.DataFrame(sma.values, columns=['SMA_{0}'.format(num_days)])
def trix(self):
current_df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
one_day_before_df = utils.include_n_days_before(self.data, 2, self.start_date, self.end_date)
def calculate_triple_ema(df):
i = 0
while i < 3:
df = df.ewm(span=12, min_periods=1).mean()
i += 1
return df
# TRIX(t) = TR(t)/TR(t-1) where TR(t)=EMA(EMA(EMA(Price(t)))) over n days period
tr_t = calculate_triple_ema(current_df['Close'])
tr_t_1 = calculate_triple_ema(one_day_before_df[0:-1]['Close'])
trix = np.divide(tr_t, tr_t_1)
return | pd.DataFrame(trix.values, columns=['TRIX']) | pandas.DataFrame |
import pandas as pd
import numpy as np
# Lendo do data frame
df = pd.read_csv("https://pycourse.s3.amazonaws.com/bike-sharing.csv")
print(df.head())
print('\n************************************************************************************\n')
print(df.info())
print('\n************************************************************************************\n')
print(df.dtypes)
print('\n************************************************************************************\n')
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
import os
import shutil
import zipfile
import torch
import torch.utils.data
from dataset import *
import pickle
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import numpy as np
import csv
from collections import Counter, defaultdict
import pandas as pd
from utils import check_and_mkdir
from scipy import stats
import re
import itertools
import functools
import random
import seaborn as sns
print = functools.partial(print, flush=True)
MAX_NO_UNBALANCED_FEATURE = 0
MIN_SUCCESS_RATE = 0.5 # 0.1
MIN_SUPPORT = MIN_SUCCESS_RATE * 100
# 5
# 5
print('Global MAX_NO_UNBALANCED_FEATURE: ', MAX_NO_UNBALANCED_FEATURE)
np.random.seed(0)
random.seed(0)
def IQR(s):
return [np.quantile(s, .5), np.quantile(s, .25), np.quantile(s, .75)]
def stringlist_2_list(s):
r = s.strip('][').replace(',', ' ').split()
r = list(map(float, r))
return r
def stringlist_2_str(s, percent=False, digit=-1):
r = s.strip('][').replace(',', ' ').split()
r = list(map(float, r))
if percent:
r = [x * 100 for x in r]
if digit == 0:
rr = ','.join(['{:.0f}'.format(x) for x in r])
elif digit == 1:
rr = ','.join(['{:.1f}'.format(x) for x in r])
elif digit == 2:
rr = ','.join(['{:.2f}'.format(x) for x in r])
elif digit == 3:
rr = ','.join(['{:.1f}'.format(x) for x in r])
else:
rr = ','.join(['{}'.format(x) for x in r])
return rr
def boot_matrix(z, B):
"""Bootstrap sample
Returns all bootstrap samples in a matrix"""
z = np.array(z).flatten()
n = len(z) # sample size
idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples
return z[idz]
def bootstrap_mean_ci(x, B=1000, alpha=0.05):
n = len(x)
# Generate boostrap distribution of sample mean
xboot = boot_matrix(x, B=B)
sampling_distribution = xboot.mean(axis=1)
quantile_confidence_interval = np.percentile(sampling_distribution, q=(100 * alpha / 2, 100 * (1 - alpha / 2)))
std = sampling_distribution.std()
# if plot:
# plt.hist(sampling_distribution, bins="fd")
return quantile_confidence_interval, std
def bootstrap_mean_pvalue(x, expected_mean=0., B=1000):
"""
Ref:
1. https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#cite_note-:0-1
2. https://www.tau.ac.il/~saharon/StatisticsSeminar_files/Hypothesis.pdf
3. https://github.com/mayer79/Bootstrap-p-values/blob/master/Bootstrap%20p%20values.ipynb
4. https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html?highlight=one%20sample%20ttest
Bootstrap p values for one-sample t test
Returns boostrap p value, test statistics and parametric p value"""
n = len(x)
orig = stats.ttest_1samp(x, expected_mean)
# Generate boostrap distribution of sample mean
x_boots = boot_matrix(x - x.mean() + expected_mean, B=B)
x_boots_mean = x_boots.mean(axis=1)
t_boots = (x_boots_mean - expected_mean) / (x_boots.std(axis=1, ddof=1) / np.sqrt(n))
p = np.mean(t_boots >= orig[0])
p_final = 2 * min(p, 1 - p)
# Plot bootstrap distribution
# if plot:
# plt.figure()
# plt.hist(x_boots_mean, bins="fd")
return p_final, orig
def bootstrap_mean_pvalue_2samples(x, y, equal_var=False, B=1000):
"""
Bootstrap hypothesis testing for comparing the means of two independent samples
Ref:
1. https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#cite_note-:0-1
2. https://www.tau.ac.il/~saharon/StatisticsSeminar_files/Hypothesis.pdf
3. https://github.com/mayer79/Bootstrap-p-values/blob/master/Bootstrap%20p%20values.ipynb
4. https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html?highlight=one%20sample%20ttest
Bootstrap p values for one-sample t test
Returns boostrap p value, test statistics and parametric p value"""
n = len(x)
orig = stats.ttest_ind(x, y, equal_var=equal_var)
pooled_mean = np.concatenate((x, y), axis=None).mean()
xboot = boot_matrix(x - x.mean() + pooled_mean,
B=B) # important centering step to get sampling distribution under the null
yboot = boot_matrix(y - y.mean() + pooled_mean, B=B)
sampling_distribution = stats.ttest_ind(xboot, yboot, axis=1, equal_var=equal_var)[0]
if np.isnan(orig[1]):
p_final = np.nan
else:
# Calculate proportion of bootstrap samples with at least as strong evidence against null
p = np.mean(sampling_distribution >= orig[0])
# RESULTS
# print("p value for null hypothesis of equal population means:")
# print("Parametric:", orig[1])
# print("Bootstrap:", 2 * min(p, 1 - p))
p_final = 2 * min(p, 1 - p)
return p_final, orig
def shell_for_ml(cohort_dir_name, model, niter=50, min_patients=500, stats=True, more_para=''):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
fo = open('shell_{}_{}.sh'.format(model, cohort_dir_name), 'w') # 'a'
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
# load others:
df = pd.read_excel(r'../data/repurposed_AD_under_trials_20200227.xlsx', dtype=str)
added_drug = []
for index, row in df.iterrows():
rx = row['rxcui']
gpi = row['gpi']
if pd.notna(rx):
rx = [x + '.pkl' for x in re.split('[,;+]', rx)]
added_drug.extend(rx)
if pd.notna(gpi):
gpi = [x + '.pkl' for x in re.split('[,;+]', gpi)]
added_drug.extend(gpi)
print('len(added_drug): ', len(added_drug))
print(added_drug)
fo.write('mkdir -p output_marketscan/{}/{}/log\n'.format(cohort_dir_name, model))
n = 0
for x in name_cnt:
k, v = x
if (v >= min_patients) or (k in added_drug):
drug = k.split('.')[0]
for ctrl_type in ['random', 'atc']:
for seed in range(0, niter):
cmd = "python main.py --data_dir ../ipreprocess/output_marketscan/{}/ --treated_drug {} " \
"--controlled_drug {} --run_model {} --output_marketscan_dir output_marketscan/{}/{}/ --random_seed {} " \
"--drug_coding rxnorm --med_code_topk 200 {} {} " \
"2>&1 | tee output_marketscan/{}/{}/log/{}_S{}D200C{}_{}.log\n".format(
cohort_dir_name, drug,
ctrl_type, model, cohort_dir_name, model, seed, '--stats' if stats else '', more_para,
cohort_dir_name, model, drug, seed, ctrl_type, model)
fo.write(cmd)
n += 1
fo.close()
print('In total ', n, ' commands')
def shell_for_ml_marketscan(cohort_dir_name, model, niter=50, min_patients=500, stats=True, more_para='', selected=False):
if not selected:
cohort_size = pickle.load(
open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
else:
d = pd.read_excel(r'../iptw/output_marketscan/{}/selected_drug_list.xlsx'.format(cohort_dir_name),
dtype={'drug':str})
name_cnt = []
for index, row in d.iterrows():
drug = row['drug']
n = row['n_treat']
name_cnt.append([drug, n])
fo = open('shell_{}_{}_marketscan.sh'.format(model, cohort_dir_name), 'w') # 'a'
fo.write('mkdir -p output_marketscan/{}/{}/log\n'.format(cohort_dir_name, model))
n_cmd = n_drug = 0
for x in name_cnt:
k, v = x
if (v >= min_patients):
drug = k.split('.')[0]
n_drug += 1
for ctrl_type in ['random', 'atc']:
for seed in range(0, niter):
cmd = "python main.py --data_dir ../ipreprocess/output_marketscan/{}/ --treated_drug {} " \
"--controlled_drug {} --run_model {} --output_dir output_marketscan/{}/{}/ --random_seed {} " \
"--drug_coding gpi --med_code_topk 200 {} {} " \
"2>&1 | tee output_marketscan/{}/{}/log/{}_S{}D200C{}_{}.log\n".format(
cohort_dir_name, drug,
ctrl_type, model, cohort_dir_name, model, seed, '--stats' if stats else '', more_para,
cohort_dir_name, model, drug, seed, ctrl_type, model)
fo.write(cmd)
n_cmd += 1
fo.close()
print('In total ', n_drug, 'dugs ', n_cmd, ' commands')
def shell_for_ml_marketscan_stats_exist(cohort_dir_name, model, niter=10, min_patients=500):
cohort_size = pickle.load(
open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
fo = open('shell_{}_{}_marketscan_stats_exist.sh'.format(model, cohort_dir_name), 'w') # 'a'
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
# load others:
df = pd.read_excel(r'../data/repurposed_AD_under_trials_20200227.xlsx', dtype=str)
added_drug = []
for index, row in df.iterrows():
rx = row['rxcui']
gpi = row['gpi']
if pd.notna(rx):
rx = [x + '.pkl' for x in re.split('[,;+]', rx)]
added_drug.extend(rx)
if pd.notna(gpi):
gpi = [x + '.pkl' for x in re.split('[,;+]', gpi)]
added_drug.extend(gpi)
print('len(added_drug): ', len(added_drug))
print(added_drug)
fo.write('mkdir -p output_marketscan/{}/{}/log_stats_exit\n'.format(cohort_dir_name, model))
n = 0
for x in name_cnt:
k, v = x
if (v >= min_patients) or (k in added_drug):
drug = k.split('.')[0]
for ctrl_type in ['random', 'atc']:
for seed in range(0, niter):
cmd = "python main.py --data_dir ../ipreprocess/output_marketscan/{}/ --treated_drug {} " \
"--controlled_drug {} --run_model {} --output_dir output_marketscan/{}/{}/ --random_seed {} " \
"--drug_coding gpi --med_code_topk 200 --stats --stats_exit " \
"2>&1 | tee output_marketscan/{}/{}/log_stats_exit/{}_S{}D200C{}_{}.log\n".format(
cohort_dir_name, drug,
ctrl_type, model, cohort_dir_name, model, seed,
cohort_dir_name, model, drug, seed, ctrl_type, model)
fo.write(cmd)
n += 1
fo.close()
print('In total ', n, ' commands')
def split_shell_file(fname, divide=2, skip_first=1):
f = open(fname, 'r')
content_list = f.readlines()
n = len(content_list)
n_d = np.ceil((n - skip_first) / divide)
seg = [0, ] + [int(i * n_d + skip_first) for i in range(1, divide)] + [n]
for i in range(divide):
fout_name = fname.split('.')
fout_name = ''.join(fout_name[:-1]) + '-' + str(i) + '.' + fout_name[-1]
fout = open(fout_name, 'w')
for l in content_list[seg[i]:seg[i + 1]]:
fout.write(l)
fout.close()
print('dump done')
def results_model_selection_for_ml(cohort_dir_name, model, drug_name, niter=50):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
drug_list_all = [drug.split('.')[0] for drug, cnt in name_cnt]
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
drug_in_dir = set([x for x in os.listdir(dirname) if x.isdigit()])
drug_list = [x for x in drug_list_all if x in drug_in_dir] # in order
check_and_mkdir(dirname + 'results/')
for drug in drug_list:
results = []
for ctrl_type in ['random', 'atc']:
for seed in range(0, niter):
fname = dirname + drug + "/{}_S{}D200C{}_{}".format(drug, seed, ctrl_type, model)
try:
df = pd.read_csv(fname + '_ALL-model-select.csv')
except:
print('No file exisits: ', fname + '_ALL-model-select.csv')
# 1. selected by AUC
dftmp = df.sort_values(by=['val_auc', 'i'], ascending=[False, True])
val_auc = dftmp.iloc[0, dftmp.columns.get_loc('val_auc')]
val_auc_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
val_auc_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 2. selected by val_max_smd_iptw
dftmp = df.sort_values(by=['val_max_smd_iptw', 'i'], ascending=[True, True])
val_maxsmd = dftmp.iloc[0, dftmp.columns.get_loc('val_max_smd_iptw')]
val_maxsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
val_maxsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 3. selected by val_n_unbalanced_feat_iptw
dftmp = df.sort_values(by=['val_n_unbalanced_feat_iptw', 'i'], ascending=[True, False]) # [True, True]
val_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('val_n_unbalanced_feat_iptw')]
val_nsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
val_nsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 4. selected by train_max_smd_iptw
dftmp = df.sort_values(by=['train_max_smd_iptw', 'i'], ascending=[True, True])
train_maxsmd = dftmp.iloc[0, dftmp.columns.get_loc('train_max_smd_iptw')]
train_maxsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
train_maxsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 5. selected by train_n_unbalanced_feat_iptw
dftmp = df.sort_values(by=['train_n_unbalanced_feat_iptw', 'i'],
ascending=[True, False]) # [True, True]
train_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('train_n_unbalanced_feat_iptw')]
train_nsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
train_nsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 6. selected by trainval_max_smd_iptw
dftmp = df.sort_values(by=['trainval_max_smd_iptw', 'i'], ascending=[True, True])
trainval_maxsmd = dftmp.iloc[0, dftmp.columns.get_loc('trainval_max_smd_iptw')]
trainval_maxsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
trainval_maxsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 7. selected by trainval_n_unbalanced_feat_iptw
dftmp = df.sort_values(by=['trainval_n_unbalanced_feat_iptw', 'i'],
ascending=[True, False]) # [True, True]
trainval_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('trainval_n_unbalanced_feat_iptw')]
trainval_nsmd_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
trainval_nsmd_testauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
# 8. FINAL: selected by trainval_n_unbalanced_feat_iptw + val AUC
dftmp = df.sort_values(by=['trainval_n_unbalanced_feat_iptw', 'val_auc'], ascending=[True, False])
trainval_final_nsmd = dftmp.iloc[0, dftmp.columns.get_loc('trainval_n_unbalanced_feat_iptw')]
trainval_final_valauc = dftmp.iloc[0, dftmp.columns.get_loc('val_auc')]
trainval_final_finalnsmd = dftmp.iloc[0, dftmp.columns.get_loc('all_n_unbalanced_feat_iptw')]
trainval_final_testnauc = dftmp.iloc[0, dftmp.columns.get_loc('test_auc')]
results.append(["{}_S{}D200C{}_{}".format(drug, seed, ctrl_type, model), ctrl_type,
val_auc, val_auc_nsmd, val_auc_testauc,
val_maxsmd, val_maxsmd_nsmd, val_maxsmd_testauc,
val_nsmd, val_nsmd_nsmd, val_nsmd_testauc,
train_maxsmd, train_maxsmd_nsmd, train_maxsmd_testauc,
train_nsmd, train_nsmd_nsmd, train_nsmd_testauc,
trainval_maxsmd, trainval_maxsmd_nsmd, trainval_maxsmd_testauc,
trainval_nsmd, trainval_nsmd_nsmd, trainval_nsmd_testauc,
trainval_final_nsmd, trainval_final_valauc, trainval_final_finalnsmd,
trainval_final_testnauc,
])
rdf = pd.DataFrame(results, columns=['fname', 'ctrl_type',
"val_auc", "val_auc_nsmd", "val_auc_testauc",
"val_maxsmd", "val_maxsmd_nsmd", "val_maxsmd_testauc",
"val_nsmd", "val_nsmd_nsmd", "val_nsmd_testauc",
"train_maxsmd", "train_maxsmd_nsmd", "train_maxsmd_testauc",
"train_nsmd", "train_nsmd_nsmd", "train_nsmd_testauc",
"trainval_maxsmd", "trainval_maxsmd_nsmd", "trainval_maxsmd_testauc",
"trainval_nsmd", "trainval_nsmd_nsmd", "trainval_nsmd_testauc",
"trainval_final_nsmd", "trainval_final_valauc", "trainval_final_finalnsmd",
"trainval_final_testnauc",
])
rdf.to_csv(dirname + 'results/' + drug + '_model_selection.csv')
for t in ['random', 'atc', 'all']:
# fig = plt.figure(figsize=(20, 15))
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 18))
if t != 'all':
idx = rdf['ctrl_type'] == t
else:
idx = rdf['ctrl_type'].notna()
boxplot = rdf[idx].boxplot(column=["val_auc_nsmd", "val_maxsmd_nsmd", "val_nsmd_nsmd", "train_maxsmd_nsmd",
"train_nsmd_nsmd", "trainval_maxsmd_nsmd", "trainval_nsmd_nsmd",
"trainval_final_finalnsmd"], rot=25, fontsize=15, ax=ax1)
ax1.axhline(y=5, color='r', linestyle='-')
boxplot.set_title("{}-{}_S{}D200C{}_{}".format(drug, drug_name.get(drug)[:30], '0-19', t, model), fontsize=25)
# plt.xlabel("Model selection methods", fontsize=15)
ax1.set_ylabel("#unbalanced_feat_iptw of boostrap experiments", fontsize=20)
# fig.savefig(dirname + 'results/' + drug + '_model_selection_boxplot-{}-allnsmd.png'.format(t))
# plt.show()
# fig = plt.figure(figsize=(20, 15))
boxplot = rdf[idx].boxplot(column=["val_auc_testauc", "val_maxsmd_testauc", "val_nsmd_testauc",
"train_maxsmd_testauc", "train_nsmd_testauc", "trainval_maxsmd_testauc",
"trainval_nsmd_testauc", 'trainval_final_testnauc'], rot=25, fontsize=15,
ax=ax2)
# plt.axhline(y=0.5, color='r', linestyle='-')
# boxplot.set_title("{}-{}_S{}D200C{}_{}".format(drug, drug_name.get(drug), '0-19', t, model), fontsize=25)
ax2.set_xlabel("Model selection methods", fontsize=20)
ax2.set_ylabel("test_auc of boostrap experiments", fontsize=20)
plt.tight_layout()
fig.savefig(dirname + 'results/' + drug + '_model_selection_boxplot-{}.png'.format(t))
# plt.clf()
plt.close()
print()
def results_model_selection_for_ml_step2(cohort_dir_name, model, drug_name):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
drug_list_all = [drug.split('.')[0] for drug, cnt in name_cnt]
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
drug_in_dir = set([x for x in os.listdir(dirname) if x.isdigit()])
drug_list = [x for x in drug_list_all if x in drug_in_dir] # in order
check_and_mkdir(dirname + 'results/')
writer = pd.ExcelWriter(dirname + 'results/summarized_model_selection_{}.xlsx'.format(model), engine='xlsxwriter')
for t in ['random', 'atc', 'all']:
results = []
for drug in drug_list:
rdf = pd.read_csv(dirname + 'results/' + drug + '_model_selection.csv')
if t != 'all':
idx = rdf['ctrl_type'] == t
else:
idx = rdf['ctrl_type'].notna()
r = [drug, drug_name.get(drug, '')]
col_name = ['drug', 'drug_name']
# zip(["val_auc_nsmd", "val_maxsmd_nsmd", "val_nsmd_nsmd", "train_maxsmd_nsmd",
# "train_nsmd_nsmd", "trainval_maxsmd_nsmd", "trainval_nsmd_nsmd",
# "trainval_final_finalnsmd"],
# ["val_auc_testauc", "val_maxsmd_testauc", "val_nsmd_testauc",
# "train_maxsmd_testauc", "train_nsmd_testauc", "trainval_maxsmd_testauc",
# "trainval_nsmd_testauc", 'trainval_final_testnauc'])
for c1, c2 in zip(["val_auc_nsmd", "val_maxsmd_nsmd", "trainval_final_finalnsmd"],
["val_auc_testauc", "val_maxsmd_testauc", 'trainval_final_testnauc']):
nsmd = rdf.loc[idx, c1]
auc = rdf.loc[idx, c2]
nsmd_med = IQR(nsmd)[0]
nsmd_iqr = IQR(nsmd)[1:]
nsmd_mean = nsmd.mean()
nsmd_mean_ci, nsmd_mean_std = bootstrap_mean_ci(nsmd, alpha=0.05)
success_rate = (nsmd <= MAX_NO_UNBALANCED_FEATURE).mean()
success_rate_ci, success_rate_std = bootstrap_mean_ci(nsmd <= MAX_NO_UNBALANCED_FEATURE, alpha=0.05)
auc_med = IQR(auc)[0]
auc_iqr = IQR(auc)[1:]
auc_mean = auc.mean()
auc_mean_ci, auc_mean_std = bootstrap_mean_ci(auc, alpha=0.05)
r.extend([nsmd_med, nsmd_iqr,
nsmd_mean, nsmd_mean_ci, nsmd_mean_std,
success_rate, success_rate_ci, success_rate_std,
auc_med, auc_iqr, auc_mean, auc_mean_ci, auc_mean_std])
col_name.extend(
["nsmd_med-" + c1, "nsmd_iqr-" + c1, "nsmd_mean-" + c1, "nsmd_mean_ci-" + c1, "nsmd_mean_std-" + c1,
"success_rate-" + c1, "success_rate_ci-" + c1, "success_rate_std-" + c1,
"auc_med-" + c2, "auc_iqr-" + c2, "auc_mean-" + c2, "auc_mean_ci-" + c2, "auc_mean_std-" + c2])
x = np.array(rdf.loc[idx, "trainval_final_finalnsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
y1 = np.array(rdf.loc[idx, "val_auc_nsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
y2 = np.array(rdf.loc[idx, "val_maxsmd_nsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
p1, test_orig1 = bootstrap_mean_pvalue_2samples(x, y1)
p2, test_orig2 = bootstrap_mean_pvalue_2samples(x, y2)
p3, test_orig3 = bootstrap_mean_pvalue_2samples(y1, y2)
r.extend([p1, test_orig1[1], p2, test_orig2[1], p3, test_orig3[1]])
col_name.extend(
['pboot-succes-final-vs-auc', 'p-succes-final-vs-auc',
'pboot-succes-final-vs-maxsmd', 'p-succes-final-vs-maxsmd',
'pboot-succes-auc-vs-maxsmd', 'p-succes-auc-vs-maxsmd'])
results.append(r)
df = pd.DataFrame(results, columns=col_name)
df.to_excel(writer, sheet_name=t)
writer.save()
print()
def results_model_selection_for_ml_step2More(cohort_dir_name, model, drug_name):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
drug_list_all = [drug.split('.')[0] for drug, cnt in name_cnt]
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
drug_in_dir = set([x for x in os.listdir(dirname) if x.isdigit()])
drug_list = [x for x in drug_list_all if x in drug_in_dir] # in order
check_and_mkdir(dirname + 'results/')
writer = pd.ExcelWriter(dirname + 'results/summarized_model_selection_{}-More.xlsx'.format(model),
engine='xlsxwriter')
for t in ['random', 'atc', 'all']:
results = []
for drug in drug_list:
rdf = pd.read_csv(dirname + 'results/' + drug + '_model_selection.csv')
if t != 'all':
idx = rdf['ctrl_type'] == t
else:
idx = rdf['ctrl_type'].notna()
r = [drug, drug_name.get(drug, '')]
col_name = ['drug', 'drug_name']
for c1, c2 in zip(
["val_auc_nsmd", "val_maxsmd_nsmd", "val_nsmd_nsmd", "train_maxsmd_nsmd",
"train_nsmd_nsmd", "trainval_maxsmd_nsmd", "trainval_nsmd_nsmd", "trainval_final_finalnsmd"],
["val_auc_testauc", "val_maxsmd_testauc", "val_nsmd_testauc", "train_maxsmd_testauc",
"train_nsmd_testauc", "trainval_maxsmd_testauc", "trainval_nsmd_testauc",
'trainval_final_testnauc']):
# for c1, c2 in zip(["val_auc_nsmd", "val_maxsmd_nsmd", "val_nsmd_nsmd", "train_maxsmd_nsmd", "train_nsmd_nsmd", "trainval_final_finalnsmd"],
# ["val_auc_testauc", "val_maxsmd_testauc", 'trainval_final_testnauc']):
nsmd = rdf.loc[idx, c1]
auc = rdf.loc[idx, c2]
nsmd_med = IQR(nsmd)[0]
nsmd_iqr = IQR(nsmd)[1:]
nsmd_mean = nsmd.mean()
nsmd_mean_ci, nsmd_mean_std = bootstrap_mean_ci(nsmd, alpha=0.05)
success_rate = (nsmd <= MAX_NO_UNBALANCED_FEATURE).mean()
success_rate_ci, success_rate_std = bootstrap_mean_ci(nsmd <= MAX_NO_UNBALANCED_FEATURE, alpha=0.05)
auc_med = IQR(auc)[0]
auc_iqr = IQR(auc)[1:]
auc_mean = auc.mean()
auc_mean_ci, auc_mean_std = bootstrap_mean_ci(auc, alpha=0.05)
r.extend([nsmd_med, nsmd_iqr,
nsmd_mean, nsmd_mean_ci, nsmd_mean_std,
success_rate, success_rate_ci, success_rate_std,
auc_med, auc_iqr, auc_mean, auc_mean_ci, auc_mean_std])
col_name.extend(
["nsmd_med-" + c1, "nsmd_iqr-" + c1, "nsmd_mean-" + c1, "nsmd_mean_ci-" + c1, "nsmd_mean_std-" + c1,
"success_rate-" + c1, "success_rate_ci-" + c1, "success_rate_std-" + c1,
"auc_med-" + c2, "auc_iqr-" + c2, "auc_mean-" + c2, "auc_mean_ci-" + c2, "auc_mean_std-" + c2])
x = np.array(rdf.loc[idx, "trainval_final_finalnsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
y1 = np.array(rdf.loc[idx, "val_auc_nsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
y2 = np.array(rdf.loc[idx, "val_maxsmd_nsmd"] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
p1, test_orig1 = bootstrap_mean_pvalue_2samples(x, y1)
p2, test_orig2 = bootstrap_mean_pvalue_2samples(x, y2)
p3, test_orig3 = bootstrap_mean_pvalue_2samples(y1, y2)
r.extend([p1, test_orig1[1], p2, test_orig2[1], p3, test_orig3[1]])
col_name.extend(
['pboot-succes-final-vs-auc', 'p-succes-final-vs-auc',
'pboot-succes-final-vs-maxsmd', 'p-succes-final-vs-maxsmd',
'pboot-succes-auc-vs-maxsmd', 'p-succes-auc-vs-maxsmd'])
col = ['val_auc_nsmd', 'val_maxsmd_nsmd', 'val_nsmd_nsmd',
'train_maxsmd_nsmd', 'train_nsmd_nsmd',
'trainval_maxsmd_nsmd', 'trainval_nsmd_nsmd'] # ,'success_rate-trainval_final_finalnsmd']
for c in col:
y = np.array(rdf.loc[idx, c] <= MAX_NO_UNBALANCED_FEATURE, dtype=np.float)
p, test_orig = bootstrap_mean_pvalue_2samples(x, y)
r.append(test_orig[1])
col_name.append('p-succes-fvs-' + c)
results.append(r)
df = pd.DataFrame(results, columns=col_name)
df.to_excel(writer, sheet_name=t)
writer.save()
print()
def results_ATE_for_ml(cohort_dir_name, model, niter=50):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
drug_list_all = [drug.split('.')[0] for drug, cnt in name_cnt]
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
drug_in_dir = set([x for x in os.listdir(dirname) if x.isdigit()])
drug_list = [x for x in drug_list_all if x in drug_in_dir] # in order
check_and_mkdir(dirname + 'results/')
for drug in drug_list:
results = []
for ctrl_type in ['random', 'atc']:
for seed in range(0, niter):
fname = dirname + drug + "/{}_S{}D200C{}_{}".format(drug, seed, ctrl_type, model)
try:
df = pd.read_csv(fname + '_results.csv')
except:
print('No file exisits: ', fname + '_results.csv')
r = df.loc[3, :]
for c in ["KM_time_points", "KM1_original", "KM0_original", "KM1-0_original",
"KM1_IPTW", "KM0_IPTW", "KM1-0_IPTW"]:
r.loc[c] = stringlist_2_list(r.loc[c])[-1]
r = pd.Series(["{}_S{}D200C{}_{}".format(drug, seed, ctrl_type, model), ctrl_type],
index=['fname', 'ctrl_type']).append(r)
results.append(r)
rdf = pd.DataFrame(results)
rdf.to_excel(dirname + 'results/' + drug + '_results.xlsx')
print('Done')
def results_ATE_for_ml_step2(cohort_dir_name, model, drug_name):
cohort_size = pickle.load(open(r'../ipreprocess/output_marketscan/{}/cohorts_size.pkl'.format(cohort_dir_name), 'rb'))
name_cnt = sorted(cohort_size.items(), key=lambda x: x[1], reverse=True)
drug_list_all = [drug.split('.')[0] for drug, cnt in name_cnt]
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
drug_in_dir = set([x for x in os.listdir(dirname) if x.isdigit()])
drug_list = [x for x in drug_list_all if x in drug_in_dir] # in order
check_and_mkdir(dirname + 'results/')
writer = pd.ExcelWriter(dirname + 'results/summarized_IPTW_ATE_{}.xlsx'.format(model), engine='xlsxwriter')
for t in ['random', 'atc', 'all']:
results = []
for drug in drug_list:
rdf = pd.read_excel(dirname + 'results/' + drug + '_results.xlsx')
if t != 'all':
idx_all = (rdf['ctrl_type'] == t)
else:
idx_all = (rdf['ctrl_type'].notna())
# Only select balanced trial
idx = idx_all & (rdf['n_unbalanced_feature_IPTW'] <= MAX_NO_UNBALANCED_FEATURE)
print('drug: ', drug, drug_name.get(drug, ''), t, 'support:', idx.sum())
r = [drug, drug_name.get(drug, ''), idx_all.sum(), idx.sum()]
col_name = ['drug', 'drug_name', 'niters', 'support']
for c in ["n_treat", "n_ctrl", "n_feature"]: # , 'HR_IPTW', 'HR_IPTW_CI'
nv = rdf.loc[idx, c]
nv_mean = nv.mean()
r.append(nv_mean)
col_name.append(c)
nv = rdf.loc[idx_all, c]
nv_mean = nv.mean()
r.append(nv_mean)
col_name.append(c + '-uab')
for c in ["n_unbalanced_feature", "n_unbalanced_feature_IPTW"]: # , 'HR_IPTW', 'HR_IPTW_CI'
nv = rdf.loc[idx, c]
if len(nv) > 0:
med = IQR(nv)[0]
iqr = IQR(nv)[1:]
mean = nv.mean()
mean_ci, _ = bootstrap_mean_ci(nv, alpha=0.05)
r.extend([med, iqr, mean, mean_ci])
else:
r.extend([np.nan, np.nan, np.nan, np.nan])
col_name.extend(["med-" + c, "iqr-" + c, "mean-" + c, "mean_ci-" + c])
nv = rdf.loc[idx_all, c]
if len(nv) > 0:
med = IQR(nv)[0]
iqr = IQR(nv)[1:]
mean = nv.mean()
mean_ci, _ = bootstrap_mean_ci(nv, alpha=0.05)
r.extend([med, iqr, mean, mean_ci])
else:
r.extend([np.nan, np.nan, np.nan, np.nan])
col_name.extend(
["med-" + c + '-uab', "iqr-" + c + '-uab', "mean-" + c + '-uab', "mean_ci-" + c + '-uab'])
for c in ["ATE_original", "ATE_IPTW", "KM1-0_original", "KM1-0_IPTW", 'HR_ori', 'HR_IPTW']:
if c not in rdf.columns:
continue
nv = rdf.loc[idx, c]
if len(nv) > 0:
med = IQR(nv)[0]
iqr = IQR(nv)[1:]
mean = nv.mean()
mean_ci, _ = bootstrap_mean_ci(nv, alpha=0.05)
if 'HR' in c:
p, _ = bootstrap_mean_pvalue(nv, expected_mean=1)
else:
p, _ = bootstrap_mean_pvalue(nv, expected_mean=0)
r.extend([med, iqr, mean, mean_ci, p])
else:
r.extend([np.nan, np.nan, np.nan, np.nan, np.nan])
col_name.extend(["med-" + c, "iqr-" + c, "mean-" + c, "mean_ci-" + c, 'pvalue-' + c])
if 'HR_ori_CI' in rdf.columns:
r.append(';'.join(rdf.loc[idx, 'HR_ori_CI']))
col_name.append('HR_ori_CI')
r.append(';'.join(rdf.loc[idx, 'HR_IPTW_CI']))
col_name.append('HR_IPTW_CI')
results.append(r)
df = pd.DataFrame(results, columns=col_name)
df.to_excel(writer, sheet_name=t)
writer.save()
print()
def results_ATE_for_ml_step3_finalInfo(cohort_dir_name, model):
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
df_all = pd.read_excel(dirname + 'results/summarized_IPTW_ATE_{}.xlsx'.format(model), sheet_name=None, dtype={'drug':str})
writer = pd.ExcelWriter(dirname + 'results/summarized_IPTW_ATE_{}_finalInfo.xlsx'.format(model),
engine='xlsxwriter')
for sheet in ['random', 'atc', 'all']:
df = df_all[sheet]
# Only select drugs with selection criteria trial
# 1. minimum support set 10, may choose 20 later
# 2. p value < 0.05
idx = (df['support'] >= 10) & (df['pvalue-KM1-0_IPTW'] <= 0.05)
df_sort = df.loc[idx, :].sort_values(by=['mean-KM1-0_IPTW'], ascending=[False])
df_final = df_sort[
['drug', 'drug_name', 'niters', 'support', 'n_treat', 'n_ctrl', 'n_feature',
'mean-n_unbalanced_feature', 'mean_ci-n_unbalanced_feature',
'mean-n_unbalanced_feature_IPTW', 'mean_ci-n_unbalanced_feature_IPTW',
# 'mean-ATE_original', 'mean_ci-ATE_original', 'pvalue-ATE_original',
# 'mean-ATE_IPTW', 'mean_ci-ATE_IPTW', 'pvalue-ATE_IPTW',
# 'mean-KM1-0_original', 'mean_ci-KM1-0_original', 'pvalue-KM1-0_original',
'mean-KM1-0_IPTW', 'mean_ci-KM1-0_IPTW', 'pvalue-KM1-0_IPTW',
'mean-HR_IPTW', 'mean_ci-HR_IPTW', 'pvalue-HR_IPTW']]
df_final['n_ctrl'] = df_final['n_ctrl'].apply(
lambda x: '{:.1f}'.format(x))
df_final['mean-n_unbalanced_feature'] = df_final['mean-n_unbalanced_feature'].apply(
lambda x: '{:.1f}'.format(x))
df_final['mean_ci-n_unbalanced_feature'] = df_final['mean_ci-n_unbalanced_feature'].apply(
lambda x: stringlist_2_str(x, False, 1))
df_final['mean-n_unbalanced_feature_IPTW'] = df_final['mean-n_unbalanced_feature_IPTW'].apply(
lambda x: '{:.1f}'.format(x))
df_final['mean_ci-n_unbalanced_feature_IPTW'] = df_final['mean_ci-n_unbalanced_feature_IPTW'].apply(
lambda x: stringlist_2_str(x, False, 1))
df_final['mean-KM1-0_IPTW'] = df_final['mean-KM1-0_IPTW'].apply(
lambda x: '{:.1f}'.format(x * 100))
df_final['mean_ci-KM1-0_IPTW'] = df_final['mean_ci-KM1-0_IPTW'].apply(
lambda x: stringlist_2_str(x, True, 1))
df_final['mean-HR_IPTW'] = df_final['mean-HR_IPTW'].apply(
lambda x: '{:.2f}'.format(x))
df_final['mean_ci-HR_IPTW'] = df_final['mean_ci-HR_IPTW'].apply(
lambda x: stringlist_2_str(x, False, 2))
df_final.to_excel(writer, sheet_name=sheet)
writer.save()
print('Done results_ATE_for_ml_step3_finalInfo')
def combine_ate_final_LR_with(cohort_dir_name, model):
dirname = r'output_marketscan/{}/LR/'.format(cohort_dir_name)
df_lr = pd.read_excel(dirname + 'results/summarized_IPTW_ATE_{}_finalInfo.xlsx'.format('LR'), sheet_name=None,
dtype=str)
df_other = pd.read_excel(r'output_marketscan/{}/{}/'.format(cohort_dir_name, model) +
'results/summarized_IPTW_ATE_{}_finalInfo.xlsx'.format(model), sheet_name=None, dtype=str)
writer = pd.ExcelWriter(dirname + 'results/summarized_IPTW_ATE_LR_finalInfo_cat_{}.xlsx'.format(model),
engine='xlsxwriter')
writer2 = pd.ExcelWriter(dirname + 'results/summarized_IPTW_ATE_LR_finalInfo_outerjoin_{}.xlsx'.format(model),
engine='xlsxwriter')
col_name = ['drug', 'Drug', 'Model', 'niters', 'Support', 'Treat', 'Ctrl',
'n_feature', ' Unbalanced', 'Unbalanced IPTW', 'KM', 'HR']
def return_select_content(key, row, null_model=''):
data = [key, ]
col1 = ['drug_name', 'Model', 'niters', 'support', 'n_treat', 'n_ctrl',
'n_feature', 'mean-n_unbalanced_feature', 'mean-n_unbalanced_feature_IPTW']
if null_model:
for c in col1:
data.append(row[c])
data[2] = null_model.upper()
data[4] = 0
data[5] = data[6] = data[7] = data[8] = data[9] = np.nan
data.append(np.nan)
data.append(np.nan)
else:
for c in col1:
data.append(row[c])
data.append(row['mean-KM1-0_IPTW'] + ' (' + row['mean_ci-KM1-0_IPTW'] + ')')
data.append(row['mean-HR_IPTW'] + ' (' + row['mean_ci-HR_IPTW'] + ')')
return data
for sheet in ['random', 'atc', 'all']:
df1 = df_lr[sheet]
df1['Model'] = 'lr'
df2 = df_other[sheet]
df2['Model'] = 'lstm'
df_outer = df1.join(df2.set_index('drug'), lsuffix='', rsuffix='_{}'.format(model), on='drug', how='outer')
df_outer.to_excel(writer2, sheet_name=sheet)
df1 = df1.set_index('drug')
df2 = df2.set_index('drug')
data = []
for key, row in df1.iterrows():
data.append(return_select_content(key, row))
if key in df2.index:
data.append(return_select_content(key, df2.loc[key, :]))
else:
data.append(return_select_content(key, row, null_model=model))
df_final = pd.DataFrame(data=data, columns=col_name)
df_final.to_excel(writer, sheet_name=sheet)
writer.save()
writer2.save()
print('Done results_ATE_for_ml_step3_finalInfo')
def check_drug_name_code():
df = pd.read_excel(r'../data/repurposed_AD_under_trials_20200227.xlsx', dtype=str)
rx_df = pd.read_csv(r'../ipreprocess/output/save_cohort_all_loose/cohort_all_name_size_positive_loose.csv',
index_col='cohort_name', dtype=str)
gpi_df = pd.read_csv(r'../ipreprocess/output_marketscan/save_cohort_all_loose/cohort_all_name_size_positive.csv',
index_col='cohort_name', dtype=str)
df['rx_drug_name'] = ''
df['rx_n_patients'] = ''
df['rx_n_pos'] = ''
df['rx_pos_ratio'] = ''
df['gpi_drug_name'] = ''
df['gpi_n_patients'] = ''
df['gpi_n_pos'] = ''
df['gpi_pos_ratio'] = ''
for index, row in df.iterrows():
rx = row['rxcui']
gpi = row['gpi']
# print(index, row)
if pd.notna(rx):
rx = [x + '.pkl' for x in re.split('[,;+]', rx)]
else:
rx = []
if pd.notna(gpi):
gpi = [x + '.pkl' for x in re.split('[,;+]', gpi)]
else:
gpi = []
for r in rx:
if r in rx_df.index:
df.loc[index, 'rx_drug_name'] += ('+' + rx_df.loc[r, 'drug_name'])
df.loc[index, 'rx_n_patients'] += ('+' + rx_df.loc[r, 'n_patients'])
df.loc[index, 'rx_n_pos'] += ('+' + rx_df.loc[r, 'n_pos'])
df.loc[index, 'rx_pos_ratio'] += ('+' + rx_df.loc[r, 'pos_ratio'])
for r in gpi:
if r in gpi_df.index:
df.loc[index, 'gpi_drug_name'] += ('+' + gpi_df.loc[r, 'drug_name'])
df.loc[index, 'gpi_n_patients'] += ('+' + gpi_df.loc[r, 'n_patients'])
df.loc[index, 'gpi_n_pos'] += ('+' + gpi_df.loc[r, 'n_pos'])
df.loc[index, 'gpi_pos_ratio'] += ('+' + gpi_df.loc[r, 'pos_ratio'])
df.to_excel(r'../data/repurposed_AD_under_trials_20200227-CHECK.xlsx')
return df
def bar_plot_model_selection(cohort_dir_name, model, contrl_type='random', dump=True, colorful=True):
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
dfall = pd.read_excel(dirname + 'results/summarized_model_selection_{}.xlsx'.format(model), sheet_name=contrl_type)
idx = dfall['success_rate-trainval_final_finalnsmd'] >= MIN_SUCCESS_RATE # 0.1
idx_auc = dfall['success_rate-val_auc_nsmd'] >= MIN_SUCCESS_RATE # 0.1
idx_smd = dfall['success_rate-val_maxsmd_nsmd'] >= MIN_SUCCESS_RATE # 0.1
print('Total drug trials: ', len(idx))
print(r"#df['success_rate-trainval_final_finalnsmd'] > 0: ", idx.sum(), '({:.2f}%)'.format(idx.mean() * 100))
print(r"#df['success_rate-val_auc_nsmd'] > 0: ", idx_auc.sum(), '({:.2f}%)'.format(idx_auc.mean() * 100))
print(r"#df['success_rate-val_maxsmd_nsmd'] > 0: ", idx_smd.sum(), '({:.2f}%)'.format(idx_smd.mean() * 100))
df = dfall.loc[idx, :].sort_values(by=['success_rate-trainval_final_finalnsmd'], ascending=[False])
# df['nsmd_mean_ci-val_auc_nsmd']
N = len(df)
top_1 = df.loc[:, 'success_rate-val_auc_nsmd'] # * 100
top_1_ci = np.array(
df.loc[:, 'success_rate_ci-val_auc_nsmd'].apply(lambda x: stringlist_2_list(x)).to_list()) # *100
# top_1_ci = df.loc[:, 'success_rate_std-val_auc_nsmd']
top_2 = df.loc[:, 'success_rate-val_maxsmd_nsmd'] # * 100
top_2_ci = np.array(
df.loc[:, 'success_rate_ci-val_maxsmd_nsmd'].apply(lambda x: stringlist_2_list(x)).to_list()) # *100
# top_2_ci = df.loc[:, 'success_rate_std-val_maxsmd_nsmd']
top_3 = df.loc[:, 'success_rate-trainval_final_finalnsmd'] # * 100
top_3_ci = np.array(
df.loc[:, 'success_rate_ci-trainval_final_finalnsmd'].apply(lambda x: stringlist_2_list(x)).to_list()) # *100
# top_3_ci = df.loc[:, 'success_rate_std-trainval_final_finalnsmd']
pauc = np.array(df.loc[:, "p-succes-final-vs-auc"])
psmd = np.array(df.loc[:, "p-succes-final-vs-maxsmd"])
paucsmd = np.array(df.loc[:, "p-succes-auc-vs-maxsmd"])
xlabels = df.loc[:, 'drug_name']
# xlabels = df.loc[:, 'drug_name'].apply(lambda x : min(x.split('/'), key=str))
xlabels = [s[:18] for s in xlabels]
width = 0.3 # 0.45 # the width of the bars
ind = np.arange(N) * width * 4 # the x locations for the groups
colors = ['#FAC200', '#82A2D3', '#F65453']
fig, ax = plt.subplots(figsize=(40, 8))
error_kw = {'capsize': 3, 'capthick': 1, 'ecolor': 'black'}
# plt.ylim([0, 1.05])
rects1 = ax.bar(ind, top_1, width, yerr=[top_1 - top_1_ci[:, 0], top_1_ci[:, 1] - top_1], error_kw=error_kw,
color=colors[0], edgecolor=None) # , edgecolor='b' "black"
rects2 = ax.bar(ind + width, top_2, width, yerr=[top_2 - top_2_ci[:, 0], top_2_ci[:, 1] - top_2], error_kw=error_kw,
color=colors[1], edgecolor=None)
rects3 = ax.bar(ind + 2 * width, top_3, width, yerr=[top_3 - top_3_ci[:, 0], top_3_ci[:, 1] - top_3],
error_kw=error_kw, color=colors[2], edgecolor=None) # , hatch='.')
# rects1 = ax.bar(ind, top_1, width, yerr=[top_1_ci, top_1_ci], error_kw=error_kw,
# color='#FAC200', edgecolor="black") # , edgecolor='b'
# rects2 = ax.bar(ind + width, top_2, width, yerr=[top_2_ci, top_2_ci], error_kw=error_kw,
# color='#82A2D3', edgecolor="black")
# rects3 = ax.bar(ind + 2 * width, top_3, width, yerr=[top_3_ci, top_3_ci],
# error_kw=error_kw, color='#F65453', edgecolor="black") # , hatch='.')
ax.set_xticks(ind + width)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# ax.spines['bottom'].set_color('#DDDDDD')
ax.set_axisbelow(True)
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.yaxis.grid(True, color='#EEEEEE', which='both')
ax.xaxis.grid(False)
ax.set_xticklabels(xlabels, fontsize=20, rotation=45, ha='right')
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("Drug Trials", fontsize=25)
ax.set_ylabel("Prop. of success balancing", fontsize=25) # Success Rate of Balancing
# ax.set_title(model, fontsize=25) #fontweight="bold")
# plt.axhline(y=0.5, color='#888888', linestyle='-')
def significance(val):
if val < 0.001:
return '***'
elif val < 0.01:
return '**'
elif val < 0.05:
return '*'
else:
return 'ns'
# def labelvalue(rects, val, height=None):
# for i, rect in enumerate(rects):
# if height is None:
# h = rect.get_height() * 1.03
# else:
# h = height[i] * 1.03
# ax.text(rect.get_x() + rect.get_width() / 2., h,
# significance(val[i]),
# ha='center', va='bottom', fontsize=11)
#
# labelvalue(rects1, pauc, top_1_ci[:,1])
# labelvalue(rects2, psmd, top_2_ci[:,1])
for i, rect in enumerate(rects3):
d = 0.02
y = top_3_ci[i, 1] * 1.03 # rect.get_height()
w = rect.get_width()
x = rect.get_x()
x1 = x - 2 * w
x2 = x - 1 * w
y1 = top_1_ci[i, 1] * 1.03
y2 = top_2_ci[i, 1] * 1.03
# auc v.s. final
l, r = x1, x + w
ax.plot([l, l, (l + r) / 2], [y + 2 * d, y + 3 * d, y + 3 * d], lw=1.2, c=colors[0] if colorful else 'black')
ax.plot([(l + r) / 2, r, r], [y + 3 * d, y + 3 * d, y + 2 * d], lw=1.2, c=colors[2] if colorful else 'black')
# ax.plot([x1, x1, x, x], [y+2*d, y+3*d, y+3*d, y+2*d], c='#FAC200') #c="black")
ax.text((l + r) / 2, y + 2.6 * d, significance(pauc[i]), ha='center', va='bottom', fontsize=13)
# smd v.s. final
l, r = x2 + 0.6 * w, x + w
ax.plot([l, l, (l + r) / 2], [y, y + d, y + d], lw=1.2, c=colors[1] if colorful else 'black')
ax.plot([(l + r) / 2, r, r], [y + d, y + d, y], lw=1.2, c=colors[2] if colorful else 'black')
# ax.plot([x2, x2, x, x], [y, y + d, y + d, y], c='#82A2D3') #c="black")
ax.text((l + r) / 2, y + 0.6 * d, significance(psmd[i]), ha='center', va='bottom', fontsize=13)
# auc v.s. smd
l, r = x1, x2 + 0.4 * w
ax.plot([l, l, (l + r) / 2], [y, y + d, y + d], lw=1.2, c=colors[0] if colorful else 'black')
ax.plot([(l + r) / 2, r, r], [y + d, y + d, y], lw=1.2, c=colors[1] if colorful else 'black')
# ax.plot([x1, x1, x, x], [y+2*d, y+3*d, y+3*d, y+2*d], c='#FAC200') #c="black")
ax.text((l + r) / 2, y + .6 * d, significance(paucsmd[i]), ha='center', va='bottom', fontsize=13)
# ax.set_title('Success Rate of Balancing by Different PS Model Selection Methods')
ax.legend((rects1[0], rects2[0], rects3[0]), ('Val-AUC Select', 'Val-SMD Select', 'Our Strategy'),
fontsize=25) # , bbox_to_anchor=(1.13, 1.01))
# ax.autoscale(enable=True, axis='x', tight=True)
ax.set_xmargin(0.01)
plt.tight_layout()
if dump:
check_and_mkdir(dirname + 'results/fig/')
fig.savefig(dirname + 'results/fig/balance_rate_barplot-{}-{}.png'.format(model, contrl_type))
fig.savefig(dirname + 'results/fig/balance_rate_barplot-{}-{}.pdf'.format(model, contrl_type))
plt.show()
# plt.clf()
plt.close()
def bar_plot_model_selectionV2(cohort_dir_name, model, contrl_type='random', dump=True, colorful=True):
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
dfall = pd.read_excel(dirname + 'results/summarized_model_selection_{}-More.xlsx'.format(model),
sheet_name=contrl_type)
idx = dfall['success_rate-trainval_final_finalnsmd'] >= MIN_SUCCESS_RATE
idx_auc = dfall['success_rate-val_auc_nsmd'] >= MIN_SUCCESS_RATE
idx_smd = dfall['success_rate-val_maxsmd_nsmd'] >= MIN_SUCCESS_RATE
print('Total drug trials: ', len(idx))
print(r"#df['success_rate-trainval_final_finalnsmd'] > 0: ", idx.sum(), '({:.2f}%)'.format(idx.mean() * 100))
print(r"#df['success_rate-val_auc_nsmd'] > 0: ", idx_auc.sum(), '({:.2f}%)'.format(idx_auc.mean() * 100))
print(r"#df['success_rate-val_maxsmd_nsmd'] > 0: ", idx_smd.sum(), '({:.2f}%)'.format(idx_smd.mean() * 100))
df = dfall.loc[idx, :].sort_values(by=['success_rate-trainval_final_finalnsmd'], ascending=[False])
# df['nsmd_mean_ci-val_auc_nsmd']
N = len(df)
col = ['success_rate-val_auc_nsmd', 'success_rate-val_maxsmd_nsmd', 'success_rate-val_nsmd_nsmd',
'success_rate-train_maxsmd_nsmd', 'success_rate-train_nsmd_nsmd',
'success_rate-trainval_maxsmd_nsmd', 'success_rate-trainval_nsmd_nsmd',
'success_rate-trainval_final_finalnsmd']
legs = ['_'.join(x.split('-')[1].split('_')[:-1]) for x in col]
# col_ci = [x.replace('rate', 'rate_ci') for x in col]
top = []
top_ci = []
for c in col:
top.append(df.loc[:, c])
top_ci.append(np.array(df.loc[:, c.replace('rate', 'rate_ci')].apply(lambda x: stringlist_2_list(x)).to_list()))
pauc = np.array(df.loc[:, "p-succes-final-vs-auc"])
psmd = np.array(df.loc[:, "p-succes-final-vs-maxsmd"])
paucsmd = np.array(df.loc[:, "p-succes-auc-vs-maxsmd"])
xlabels = df.loc[:, 'drug_name']
width = 0.45 # the width of the bars
ind = np.arange(N) * width * (len(col) + 1) # the x locations for the groups
colors = ['#FAC200', '#82A2D3', '#F65453']
fig, ax = plt.subplots(figsize=(24, 8))
error_kw = {'capsize': 3, 'capthick': 1, 'ecolor': 'black'}
# plt.ylim([0, 1.05])
rects = []
for i in range(len(top)):
top_1 = top[i]
top_1_ci = top_ci[i]
if i <= 1 or i == len(top) - 1:
rect = ax.bar(ind + width * i, top_1, width, yerr=[top_1 - top_1_ci[:, 0], top_1_ci[:, 1] - top_1],
error_kw=error_kw,
color=colors[min(i, len(colors) - 1)], edgecolor=None) # "black")
else:
rect = ax.bar(ind + width * i, top_1, width, yerr=[top_1 - top_1_ci[:, 0], top_1_ci[:, 1] - top_1],
error_kw=error_kw,
edgecolor="black")
rects.append(rect)
ax.set_xticks(ind + int(len(top) / 2) * width)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# ax.spines['bottom'].set_color('#DDDDDD')
ax.set_axisbelow(True)
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.yaxis.grid(True, color='#EEEEEE', which='both')
ax.xaxis.grid(False)
ax.set_xticklabels(xlabels, fontsize=20, rotation=45, ha='right')
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("Drug Trials", fontsize=25)
ax.set_ylabel("Prop. of success balancing", fontsize=25) # Success Rate of Balancing
def significance(val):
if val < 0.001:
return '***'
elif val < 0.01:
return '**'
elif val < 0.05:
return '*'
else:
return 'ns'
def labelvalue(rects, val, height=None):
for i, rect in enumerate(rects):
if height is None:
h = rect.get_height() * 1.02
else:
h = height[i] * 1.02
ax.text(rect.get_x() + rect.get_width() / 2., h,
significance(val[i]),
ha='center', va='bottom', fontsize=11)
for i in range(len(rects) - 1):
pv = np.array(df.loc[:, "p-succes-fvs-" + col[i].split('-')[-1]])
labelvalue((rects[i]), pv, top_ci[i][:, 1])
# labelvalue(rects1, pauc, top_1_ci[:,1])
# labelvalue(rects2, psmd, top_2_ci[:,1])
# for i, rect in enumerate(rects3):
# d = 0.02
# y = top_3_ci[i, 1] * 1.03 # rect.get_height()
# w = rect.get_width()
# x = rect.get_x()
# x1 = x - 2 * w
# x2 = x - 1 * w
#
# y1 = top_1_ci[i, 1] * 1.03
# y2 = top_2_ci[i, 1] * 1.03
#
# # auc v.s. final
# l, r = x1, x + w
# ax.plot([l, l, (l+r) / 2], [y + 2 * d, y + 3 * d, y + 3 * d], lw=1.2, c=colors[0] if colorful else 'black')
# ax.plot([(l+r) / 2, r, r], [y + 3 * d, y + 3 * d, y + 2 * d], lw=1.2, c=colors[2] if colorful else 'black')
# # ax.plot([x1, x1, x, x], [y+2*d, y+3*d, y+3*d, y+2*d], c='#FAC200') #c="black")
# ax.text((l+r) / 2, y + 2.6 * d, significance(pauc[i]), ha='center', va='bottom', fontsize=13)
#
# # smd v.s. final
# l, r = x2 + 0.6*w, x + w
# ax.plot([l, l, (l + r) / 2], [y, y + d, y + d], lw=1.2, c=colors[1] if colorful else 'black')
# ax.plot([(l + r) / 2, r, r], [y + d, y + d, y], lw=1.2, c=colors[2] if colorful else 'black')
# # ax.plot([x2, x2, x, x], [y, y + d, y + d, y], c='#82A2D3') #c="black")
# ax.text((l + r) / 2, y + 0.6 * d, significance(psmd[i]), ha='center', va='bottom', fontsize=13)
#
# # auc v.s. smd
# l, r = x1, x2 + 0.4*w
# ax.plot([l, l, (l + r) / 2], [y, y + d, y + d], lw=1.2, c=colors[0] if colorful else 'black')
# ax.plot([(l + r) / 2, r, r], [y + d, y + d, y], lw=1.2, c=colors[1] if colorful else 'black')
# # ax.plot([x1, x1, x, x], [y+2*d, y+3*d, y+3*d, y+2*d], c='#FAC200') #c="black")
# ax.text((l + r) / 2, y + .6 * d, significance(paucsmd[i]), ha='center', va='bottom', fontsize=13)
# ax.set_title('Success Rate of Balancing by Different PS Model Selection Methods')
ax.legend((rect[0] for rect in rects), (x for x in legs),
fontsize=18) # , bbox_to_anchor=(1.13, 1.01))
# ax.autoscale(enable=True, axis='x', tight=True)
ax.set_xmargin(0.01)
plt.tight_layout()
if dump:
check_and_mkdir(dirname + 'results/fig/')
fig.savefig(dirname + 'results/fig/balance_rate_barplot-{}-{}-all.png'.format(model, contrl_type))
fig.savefig(dirname + 'results/fig/balance_rate_barplot-{}-{}-all.pdf'.format(model, contrl_type))
plt.show()
plt.clf()
def box_plot_model_selection(cohort_dir_name, model, contrl_type='random', dump=True, colorful=True):
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
dfall = pd.read_excel(dirname + 'results/summarized_model_selection_{}.xlsx'.format(model),
sheet_name=contrl_type, converters={'drug': str})
idx = dfall['success_rate-trainval_final_finalnsmd'] >= MIN_SUCCESS_RATE # 0.1
idx_auc = dfall['success_rate-val_auc_nsmd'] >= MIN_SUCCESS_RATE # 0.1
idx_smd = dfall['success_rate-val_maxsmd_nsmd'] >= MIN_SUCCESS_RATE # 0.1
print('Total drug trials: ', len(idx))
print(r"#df['success_rate-trainval_final_finalnsmd'] > 0: ", idx.sum(), '({:.2f}%)'.format(idx.mean() * 100))
print(r"#df['success_rate-val_auc_nsmd'] > 0: ", idx_auc.sum(), '({:.2f}%)'.format(idx_auc.mean() * 100))
print(r"#df['success_rate-val_maxsmd_nsmd'] > 0: ", idx_smd.sum(), '({:.2f}%)'.format(idx_smd.mean() * 100))
df = dfall.loc[idx, :].sort_values(by=['success_rate-trainval_final_finalnsmd'], ascending=[False])
# df['nsmd_mean_ci-val_auc_nsmd']
N = len(df)
drug_list = df.loc[idx, 'drug']
drug_name_list = df.loc[idx, 'drug_name']
drug_name_list = [s[:18] for s in drug_name_list]
data_1 = []
data_2 = []
data_3 = []
data_pvalue = []
for drug in drug_list:
rdf = pd.read_csv(dirname + 'results/' + drug + '_model_selection.csv')
if contrl_type != 'all':
idx = rdf['ctrl_type'] == contrl_type
else:
idx = rdf['ctrl_type'].notna()
data_1.append(np.array(rdf.loc[idx, "val_auc_testauc"]))
data_2.append(np.array(rdf.loc[idx, "val_maxsmd_testauc"]))
data_3.append(np.array(rdf.loc[idx, "trainval_final_testnauc"]))
p1, test_orig1 = bootstrap_mean_pvalue_2samples(data_3[-1], data_1[-1])
p2, test_orig2 = bootstrap_mean_pvalue_2samples(data_3[-1], data_2[-1])
p3, test_orig3 = bootstrap_mean_pvalue_2samples(data_1[-1], data_2[-1])
# test_orig1_man = stats.mannwhitneyu(data_3[-1], data_1[-1])
# test_orig2_man = stats.mannwhitneyu(data_3[-1], data_2[-1])
data_pvalue.append([test_orig1[1], test_orig2[1], test_orig3[1]])
colors = ['#FAC200', '#82A2D3', '#F65453']
fig, ax = plt.subplots(figsize=(45, 8)) #(18, 8)
width = 0.3 # 0.5 # the width of the bars
ind = np.arange(N) * width * 4 # the x locations for the groups
sym = 'o'
# 'meanline':True,
box_kw = {"sym": sym, "widths": width, "patch_artist": True, "notch": True,
'showmeans': True, # 'meanline':True,
"meanprops": dict(linestyle='--', linewidth=1, markeredgecolor='purple', marker='^',
markerfacecolor="None")}
rects1 = plt.boxplot(data_1, positions=ind - 0.08, **box_kw)
rects2 = plt.boxplot(data_2, positions=ind + width, **box_kw)
rects3 = plt.boxplot(data_3, positions=ind + 2 * width + 0.08, **box_kw)
def plot_strip(ind, data, color):
w = width - 0.15
swarm1 = pd.DataFrame([(ind[i], data[i][j]) for i in range(len(ind)) for j in range(len(data[i]))],
columns=['x', 'y'])
strip_rx = stats.uniform(-w / 2., w).rvs(len(swarm1))
# sns.stripplot(x='x', y='y', data=swarm1, color=".25", alpha=0.2, ax=ax)
plt.scatter(swarm1['x'] + strip_rx, swarm1['y'], alpha=0.2, c=color)
# ticks = list(drug_name_list)
for i, bplot in enumerate([rects1, rects2, rects3]):
for patch in bplot['boxes']:
patch.set_facecolor(colors[i])
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, .7))
# plt.setp(bplot['boxes'], color=color)
# plt.setp(bplot['whiskers'], color=color)
# plt.setp(bplot['caps'], color=color)
plt.setp(bplot['medians'], color='black')
plot_strip(ind - 0.08, data_1, colors[0])
plot_strip(ind + width, data_2, colors[1])
plot_strip(ind + 2 * width + 0.08, data_3, colors[2])
# plt.ylim([0.5, 0.85])
ax.set_xticks(ind + width)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# ax.spines['bottom'].set_color('#DDDDDD')
ax.set_axisbelow(True)
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.yaxis.grid(True, color='#EEEEEE', which='both')
ax.xaxis.grid(False)
ax.set_xticklabels(drug_name_list, fontsize=20, rotation=45, ha='right')
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("Drug Trials", fontsize=25)
ax.set_ylabel("Test AUC", fontsize=25)
def significance(val):
if val < 0.001:
return '***'
elif val < 0.01:
return '**'
elif val < 0.05:
return '*'
else:
return 'ns'
# def labelvalue(rects, x, y, p):
# for i, rect in enumerate(rects):
# ax.text(x[i], y[i],
# significance(p[i]),
# ha='center', va='bottom', fontsize=11)
#
# labelvalue(rects1["boxes"], ind - 0.08, np.max(data_1, axis=1)*1.01, np.array(data_pvalue)[:,0])
# labelvalue(rects2["boxes"], ind + width, np.max(data_2, axis=1)*1.01, np.array(data_pvalue)[:,1])
p_v = np.array(data_pvalue)
for i in range(N):
d = 0.008
y = np.max([data_1[i].max(), data_2[i].max(), data_3[i].max()]) * 1.01 # rect.get_height()
x = ind[i] + 2 * width + 0.08 # + width/2
x1 = ind[i] - 0.08 # - width/2
x2 = ind[i] + width # - width/2
# auc v.s. smd
l, r = x - 0.5 * width, x2 - 0.08
ax.plot([x1, x1, (x2 + x1) / 2], [y, y + d, y + d], lw=1.2, c=colors[0] if colorful else 'black')
ax.plot([(x2 + x1) / 2, x2 - 0.08, x2 - 0.08], [y + d, y + d, y], lw=1.2, c=colors[1] if colorful else 'black')
ax.text((x2 + x1) / 2, y + d, significance(p_v[i, 2]), ha='center', va='bottom', fontsize=12)
# auc v.s. final
ax.plot([x1, x1, (x + x1) / 2], [y + 2 * d, y + 3 * d, y + 3 * d], lw=1.2, c=colors[0] if colorful else 'black')
ax.plot([(x + x1) / 2, x, x], [y + 3 * d, y + 3 * d, y + 2 * d], lw=1.2, c=colors[2] if colorful else 'black')
# ax.plot([x1, x1, x, x], [y+2*d, y+3*d, y+3*d, y+2*d], c="black")
ax.text((x + x1) / 2, y + 3 * d, significance(p_v[i, 0]), ha='center', va='bottom', fontsize=12)
# smd v.s. final
ax.plot([x2 + 0.08, x2 + 0.08, (x + x2) / 2], [y, y + d, y + d], lw=1.2, c=colors[1] if colorful else 'black')
ax.plot([(x + x2) / 2, x, x], [y + d, y + d, y], lw=1.2, c=colors[2] if colorful else 'black')
# ax.plot([x2, x2, x, x], [y, y + d, y + d, y], c="black")
ax.text((x + x2) / 2, y + 1 * d, significance(p_v[i, 1]), ha='center', va='bottom', fontsize=12)
ax.legend((rects1["boxes"][0], rects2["boxes"][0], rects3["boxes"][0]),
('Val-AUC Select', 'Val-SMD Select', 'Our Strategy'),
fontsize=20)
ax.set_xmargin(0.01)
plt.tight_layout()
if dump:
check_and_mkdir(dirname + 'results/fig/')
fig.savefig(dirname + 'results/fig/test_auc_boxplot-{}-{}.png'.format(model, contrl_type))
fig.savefig(dirname + 'results/fig/test_auc_boxplot-{}-{}.pdf'.format(model, contrl_type))
plt.show()
plt.clf()
def box_plot_model_selectionV2(cohort_dir_name, model, contrl_type='random', dump=True, colorful=True):
dirname = r'output_marketscan/{}/{}/'.format(cohort_dir_name, model)
dfall = pd.read_excel(dirname + 'results/summarized_model_selection_{}-More.xlsx'.format(model),
sheet_name=contrl_type, converters={'drug': str})
idx = dfall['success_rate-trainval_final_finalnsmd'] >= MIN_SUCCESS_RATE
idx_auc = dfall['success_rate-val_auc_nsmd'] >= MIN_SUCCESS_RATE
idx_smd = dfall['success_rate-val_maxsmd_nsmd'] >= MIN_SUCCESS_RATE
print('Total drug trials: ', len(idx))
print(r"#df['success_rate-trainval_final_finalnsmd'] > 0: ", idx.sum(), '({:.2f}%)'.format(idx.mean() * 100))
print(r"#df['success_rate-val_auc_nsmd'] > 0: ", idx_auc.sum(), '({:.2f}%)'.format(idx_auc.mean() * 100))
print(r"#df['success_rate-val_maxsmd_nsmd'] > 0: ", idx_smd.sum(), '({:.2f}%)'.format(idx_smd.mean() * 100))
df = dfall.loc[idx, :].sort_values(by=['success_rate-trainval_final_finalnsmd'], ascending=[False])
# df['nsmd_mean_ci-val_auc_nsmd']
N = len(df)
drug_list = df.loc[idx, 'drug']
drug_name_list = df.loc[idx, 'drug_name']
col = ['val_auc_testauc', 'val_maxsmd_testauc', 'val_nsmd_testauc',
'train_maxsmd_testauc', 'train_nsmd_testauc',
'trainval_maxsmd_testauc', 'trainval_nsmd_testauc',
'trainval_final_testnauc']
legs = ['_'.join(x.split('_')[:-1]) for x in col]
data_list = [[] for i in range(len(col))]
data_1 = []
data_2 = []
data_3 = []
data_pvalue = []
for drug in drug_list:
rdf = | pd.read_csv(dirname + 'results/' + drug + '_model_selection.csv') | pandas.read_csv |
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
"""
This script defines some useful functions to use in data analysis and visualization
@ <NAME> (<EMAIL>)
"""
def dl_ia_utils_change_directory(path):
"""
path ='path/to/app/'
"""
import os
new_path = os.path.dirname(os.path.dirname(__file__))
new_path = os.chdir(path)
import sys
sys.path.insert(1, path)
def dl_ia_utils_set_up_logger(path):
""" Set up logger
:arg path: path where to store logs example: 'logs\\dl-ia-cla-predictive'
"""
import logging
logger = logging.getLogger(path)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('{}.log'.format(path))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.getLogger().addHandler(logging.StreamHandler()) # to display in console message
# logger.debug('mensaje debug')
# logger.info('mensaje info')
# logger.warning('mensaje warning')
# logger.error('mensaje error')
# logger.critical('mensaje critical')
def dl_ia_utils_systems_info():
""" Function that shows the system properties
"""
import sys
from platform import python_version
print('Python version:{}'.format(python_version()))
print('Python system version:{}'.format(sys.version))
print('Path:{}'.format(sys.executable))
print('Python version info:{}'.format(sys.version_info))
def dl_ia_utils_config_plotly():
""" this function configures the plotly visualization
:return:
"""
import plotly.io as pio
import plotly.graph_objects as go
import plotly.express as px
pio.renderers.default = "browser"
def dl_ia_utils_config_matplotlib():
""" this function configures the matplotlib style
:return:
"""
from matplotlib import rc
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Times']})
rc('text', usetex=True)
def dl_ia_utils_config_pandas():
"""
Allows to show all the columns of a dataframe in the console
Limit pandas warnings
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
desired_width = 350
np.set_printoptions(linewidth=desired_width) # show dataframes in console
| pd.set_option('display.max_columns', 10) | pandas.set_option |
#coding:utf-8
from scipy import stats
import numpy as np
from pandas import Series,DataFrame
from openpyxl import load_workbook
import math
import uuid
class AnovaTestFile:
def __init__(self, data_file):
self.data_file = data_file
self.wb = load_workbook(data_file)
self.sheetnames = [s for s in self.wb.get_sheet_names()\
if 'result' not in s]
self.sheets = [self.wb.get_sheet_by_name(s) for s in self.sheetnames]
self.resultsheets = []
def create_result_sheet(self):
self.resultsheets = [self.wb.create_sheet(title='result_'+name)\
for name in self.sheetnames]
def save(self,filepath):
self.wb.save(filepath)
class AnovaTestSheet:
def __init__(self,sheet):
self.sheet = sheet
self.sheetname = self.sheet.title
self.max_row = self.sheet.max_row
self.df = self._get_whole_dataframe()
self.ptable = {}
self.pheader = self._get_pheader()
self.tags = {}
@staticmethod
def _product_random_value(mean,stdrange):
b = np.random.uniform(*stdrange)
a = b/math.sqrt(2)
x1,x2 = mean-a, mean+a
return x1,x2,b
def _get_mean_value(self,row):
if row % 2 == 1:
return self.sheet['G'+str(row)].value
else:
return self.sheet['G'+str(row-1)].value
def _set_od_value(self,row,x1,x2):
if row % 2 == 1:
self.sheet['F'+str(row)]=x1
self.sheet['F'+str(row+1)]=x2
def _get_stdev_value(self,row):
if row % 2 == 1:
return self.sheet['H'+str(row)].value
else:
return self.sheet['H'+str(row-1)].value
def _set_stdev_value(self,row,stdev):
if row % 2 == 1:
self.sheet['H'+str(row)] = stdev
def _get_one_row(self,row):
time = self.sheet['A'+str(row)].value
organ = self.sheet['B'+str(row)].value
sp = self.sheet['C'+str(row)].value
c = self.sheet['D'+str(row)].value
rep = self.sheet['E'+str(row)].value
od = self.sheet['F'+str(row)].value
mean = self._get_mean_value(row)
stdev = self._get_stdev_value(row)
return Series([time,organ,sp,c,rep,od,mean,stdev],\
index=['time','organ','sp','c','rep','od','mean','stdev'])
def _get_whole_dataframe(self):
data={}
for i in range(3,self.max_row+1):
data[i]=self._get_one_row(i)
return | DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This script performs reverse geocoding for post coordinates, fetching the name
of the administrative region to which the post is geotagged.
Usage:
Execute the script from the command line using the following command:
python3 reverse_geocode.py -i input.pkl -o output.pkl
Arguments:
-i/--input: Path to the pandas DataFrame containing user location history.
-o/--output: Path to the output pandas DataFrame containing reverse geocoded
location histories.
Output:
A pandas DataFrame containing the reverse geocoded location histories.
"""
import argparse
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
# Set up the argument parser
ap = argparse.ArgumentParser()
# Define arguments
ap.add_argument("-i", "--input", required=True,
help="Path to the DataFrame containing user location history.")
ap.add_argument("-o", "--output", required=True,
help="Path to the output dataframe containing reverse geocoded "
"location histories for the users.")
# Parse arguments
args = vars(ap.parse_args())
# Assign arguments to variables
input_df = | pd.read_pickle(args['input']) | pandas.read_pickle |
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = pd.Series([0, 0, 0, 1, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(
ser, *flag_invert)
assert (flags & quality_mapping.LATEST_VERSION_FLAG).all()
@pytest.fixture()
def ignore_latest_version(mocker):
mocker.patch(
'solarforecastarbiter.validation.quality_mapping.LATEST_VERSION_FLAG',
0)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([mask, mask, 0, mask, 0]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_no_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, False)
assert_series_equal(flags, pd.Series([0, 0, mask, 0, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False])
out = f(_return_mask=True)
assert_series_equal(out, pd.Series([latest, latest, mask, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_tuple(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False]), None
out = f(_return_mask=True)
assert_series_equal(out[0], pd.Series([latest, latest, mask, mask]))
assert out[1] is None
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_noop(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
inp = pd.Series([True, True, False, False])
@quality_mapping.mask_flags(flag)
def f():
return inp
out = f()
assert_series_equal(out, inp)
@pytest.mark.parametrize('flag,expected', [
(0b10, 1),
(0b11, 1),
(0b10010, 1),
(0b10010010, 1),
(0b100, 2),
(0b110, 3),
(0b1110001011111, 7)
])
def test_get_version(flag, expected):
assert quality_mapping.get_version(flag) == expected
def test_has_data_been_validated():
flags = pd.Series([0, 1, 2, 7])
out = quality_mapping.has_data_been_validated(flags)
assert_series_equal(out, pd.Series([False, False, True, True]))
@pytest.mark.parametrize('flag,desc,result', [
(0, 'OK', True),
(1, 'OK', False),
(2, 'OK', True),
(3, 'OK', False),
(0, 'USER FLAGGED', False),
(3, 'USER FLAGGED', True),
(0, 'CLEARSKY', False),
(16, 'OK', False),
(1, 'USER FLAGGED', True),
(16, 'NIGHTTIME', True),
(33, 'CLEARSKY', True),
(33, 'NIGHTTIME', False),
(33, ['OK', 'NIGHTTIME'], False),
(33, ('OK', 'CLEARSKY', 'USER FLAGGED'), True),
(2, ('OK', 'NIGHTTIME'), True),
(9297, 'USER FLAGGED', True)
])
def test_check_if_single_value_flagged(flag, desc, result):
flag |= quality_mapping.LATEST_VERSION_FLAG
out = quality_mapping.check_if_single_value_flagged(flag, desc)
assert out == result
@pytest.mark.parametrize('flag', [0, 1])
def test_check_if_single_value_flagged_validation_error(flag):
with pytest.raises(ValueError):
quality_mapping.check_if_single_value_flagged(flag, 'OK')
@pytest.mark.parametrize('desc', [33, b'OK', [1, 2], []])
def test_check_if_single_value_flagged_type_error(desc):
with pytest.raises(TypeError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('desc', ['NOPE', 'MAYBE', ['YES', 'NO']])
def test_check_if_single_value_flagged_key_error(desc):
with pytest.raises(KeyError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('flags,expected', [
(pd.Series([0, 1, 0]), pd.Series([False, False, False])),
(pd.Series([2, 2, 2]), pd.Series([True, True, True])),
(pd.Series([3, 2, 2]), pd.Series([False, True, True])),
(pd.Series([3, 34, 130]), pd.Series([False, False, False]))
])
def test_which_data_is_ok(flags, expected):
out = quality_mapping.which_data_is_ok(flags)
assert_series_equal(out, expected)
DESCRIPTIONS = ['USER FLAGGED', 'NIGHTTIME', 'CLEARSKY',
'SHADED', 'UNEVEN FREQUENCY', 'LIMITS EXCEEDED',
'CLEARSKY EXCEEDED', 'STALE VALUES', 'INTERPOLATED VALUES',
'CLIPPED VALUES', 'INCONSISTENT IRRADIANCE COMPONENTS',
'DAILY VALIDATION APPLIED']
DERIVED_DESCRIPTIONS = ['DAYTIME', 'DAYTIME STALE VALUES',
'DAYTIME INTERPOLATED VALUES']
@pytest.mark.parametrize('flag,expected', [
(2, pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(3, pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(35, pd.Series([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(2 | 1 << 13 | 1 << 12 | 1 << 10,
pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0],
index=DESCRIPTIONS, dtype=bool))
])
def test_check_for_all_descriptions(flag, expected):
out = quality_mapping.check_for_all_descriptions(flag)
assert_series_equal(out, expected)
@pytest.mark.parametrize('flag', [0, 1])
def test_check_for_all_validation_fail(flag):
with pytest.raises(ValueError):
quality_mapping.check_for_all_descriptions(flag)
def test_convert_mask_into_dataframe():
flags = (pd.Series([0, 0, 1, 1 << 12, 1 << 9 | 1 << 7 | 1 << 5]) |
quality_mapping.LATEST_VERSION_FLAG)
columns = DESCRIPTIONS + ['NOT VALIDATED'] + DERIVED_DESCRIPTIONS
expected = pd.DataFrame([[0] * 13 + [1, 0, 0],
[0] * 13 + [1, 0, 0],
[1] + [0] * 12 + [1, 0, 0],
[0] * 9 + [1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0]],
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected)
def test_convert_mask_into_dataframe_w_unvalidated():
flags = (pd.Series([0, 0, 1, 1 << 12, 1 << 9 | 1 << 7 | 1 << 5]) |
quality_mapping.LATEST_VERSION_FLAG)
flags.iloc[0] = 0
columns = DESCRIPTIONS + ['NOT VALIDATED'] + DERIVED_DESCRIPTIONS
expected = pd.DataFrame([[0] * 12 + [1, 0, 0, 0],
[0] * 13 + [1, 0, 0],
[1] + [0] * 12 + [1, 0, 0],
[0] * 9 + [1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0]],
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected, check_like=True)
def test_convert_mask_into_dataframe_all_unvalidated():
flags = pd.Series([0, 0, 1, 1, 0])
columns = ['NOT VALIDATED']
expected = pd.DataFrame([[1]] * 5,
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected, check_like=True)
def test_convert_flag_frame_to_strings():
frame = pd.DataFrame({'FIRST': [True, False, False],
'SECOND': [False, False, True],
'THIRD': [True, False, True]})
expected = pd.Series(['FIRST, THIRD', 'OK', 'SECOND, THIRD'])
out = quality_mapping.convert_flag_frame_to_strings(frame)
assert_series_equal(expected, out)
@pytest.mark.parametrize('expected,desc', [
(pd.Series([1, 0, 0, 0], dtype=bool), 'OK'),
(pd.Series([0, 1, 0, 1], dtype=bool), 'USER FLAGGED'),
(pd.Series([0, 0, 1, 0], dtype=bool), 'CLEARSKY EXCEEDED'),
(pd.Series([0, 0, 0, 1], dtype=bool), 'CLEARSKY'),
(pd.Series([0, 0, 0, 1], dtype=bool), 'CLIPPED VALUES'),
(pd.Series([0, 0, 0, 0], dtype=bool), 'STALE VALUES'),
])
def test_check_if_series_flagged(expected, desc):
flags = pd.Series([2, 3, 2 | 1 << 9, 2 | 1 << 5 | 1 << 12 | 1])
out = quality_mapping.check_if_series_flagged(flags, desc)
assert_series_equal(out, expected)
def test_check_if_series_flagged_validated_fail():
with pytest.raises(ValueError):
quality_mapping.check_if_series_flagged(pd.Series([0, 1, 0]), 'OK')
def test_check_if_series_flagged_type_fail():
with pytest.raises(TypeError):
quality_mapping.check_if_series_flagged( | pd.Series([2, 3, 35]) | pandas.Series |
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = s.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
s.astype(dt4)
# GH16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
def test_astype_categories_raises(self):
# deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
items = [1, 2, 3, 1]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = DataFrame(
{"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]}
)
cats = Categorical(["a", "b", "b", "a", "a", "d"])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
lst = ["a", "b", "c", "a"]
s = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = s.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = s.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
def test_astype_categorical_to_other(self):
value = np.random.RandomState(0).randint(0, 10000, 100)
df = DataFrame({"value": value})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
s = df["value_group"]
expected = s
tm.assert_series_equal(s.astype("category"), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
msg = r"could not convert string to float|invalid literal for float\(\)"
with pytest.raises(ValueError, match=msg):
s.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name="value_group")
cmp(s.astype("object"), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
tm.assert_series_equal(s.astype("category"), s)
tm.assert_series_equal(s.astype(CategoricalDtype()), s)
roundtrip_expected = s.cat.set_categories(
s.cat.categories.sort_values()
).cat.remove_unused_categories()
tm.assert_series_equal(
s.astype("object").astype("category"), roundtrip_expected
)
tm.assert_series_equal(
s.astype("object").astype(CategoricalDtype()), roundtrip_expected
)
# invalid conversion (these are NOT a dtype)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
for invalid in [
lambda x: x.astype(Categorical),
lambda x: x.astype("object").astype(Categorical),
]:
with pytest.raises(TypeError, match=msg):
invalid(s)
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH 10696/18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = | Series(s_data, name=name, dtype=exp_dtype) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 14 22:43:13 2016
@author: zhouyu
for kaggle challenge - allstate
"""
import pandas as pd
import numpy as np
import seaborn as sns
dataset = | pd.read_csv('/Users/zhouyu/Documents/Zhou_Yu/DS/kaggle_challenge/train.csv') | pandas.read_csv |
#varianta cu pachetul CSV
import csv
import pandas as pd
with open('test.csv', 'r') as f:
r = csv.reader(f, delimiter=',')
for row in r: #loop
for i in range(0, len(row)):
if len(row) == 19: #vreau toate randurile de pe toate coloanele - 19 coloane
print(row[i]+ ",")
# varianta cu pachetul Pandas
data = pd.read_csv('test.csv', delimiter=',', header=None, nrows=120)
print(data.head()) #head() - doar primele 5 randuri
#tuppples
cuts = ("slim", "regular", "large")
cuts = cuts + ('extraslim','extralarge', 'traditional') #concatenarea tuplului cu mai multe elemente
print(cuts)
print(cuts.count('slim'))
print(cuts.index('regular'))
#lists
listaMea = list(cuts)
print(listaMea)
element = listaMea.pop()
print(element)
print(listaMea)
lista2 = listaMea.copy()
print(lista2)
lista2.reverse()
print(lista2)
lista2.append('firisor de aur')
print(lista2)
lista3 = (2, 8, 9, 3, 3, 2)
lista3=list(lista3)
print(lista3)
lista3.sort()
print(lista3)
lista_sortata =sorted(lista3)
print(lista_sortata)
c1=5.63
c2=6.88
c3=4.8
c4=1.09
c5=108
c6=31.5
c7=43.41
c8=23.95
c9=36.33
c10=150
from math import *
# print(round(c1))
# print(round(c2))
# print(round(c3))
# print(round(c4))
# print(round(c5))
print(sqrt(c1))
print(sqrt(c2))
print(sqrt(c3))
def func1(): #definire
for i in range(0,5):
print(lista2[i])
func1()
import pandas as pd
# pd.set_option('display.width', 120) # setare char width
#
# df = pd.read_csv('test.csv')
# print(df.iloc[12], '\n', type(df.iloc[12])) # print row #13 ==> obiect de tipul Series
# print('-' * 50) # despartire - beautifier
# print(df.iloc[[1, 3, 5]], '\n', type(df.iloc[[1, 3, 5]])) #lista de intregi # prints rows (tipul lista),
# # ==> obiect de tipul DataFrame
#Exemplul 10.a
import pandas as pd
df = pd.read_csv('test.csv')
# print(df.loc[(df['type']==3),['name']]) # materialele de tipul auxiliare (3)
# def func2():
# for i in range(0,5):
# while len(lista3)!=0:
# lista3.pop()
# print(lista3)
# func2()
lista2.clear()
print(lista2)
lista2.insert(2,'bro<NAME>')
print(lista2)
import math
df = pd.read_csv('suppliers.csv', index_col="name")
# print('Valoarea medie a lunilor in care am vandut este', df['month'].mean())
# print('Valoarea maxima a zilelor inregistrate este', df['day'].max())
# print('Primul an inregistrat al iesirilor este', df['year'].min())
print(df)
df.dropna(inplace=True)
print(df)
print(df.loc[0, 'id'])
df.loc[0,'id'] = 1255
print(df.loc[0, 'id'])
#Dicts
dict = {"laptop":"Dell", "software":"Windows", "periferice":"kit mouse-tastatura RGB"}
print(dict)
dict["laptop"] = "Lenovo"
print(dict)
dict.popitem()
#sterge de la coada
print(dict)
x=dict.items()
#returneaza o lista de tupluri - pereche cheie-valoare
import matplotlib.pyplot as plt
print(x)
df1 = pd.DataFrame(
{
"1": "InterSport",
"2": "Taco",
"3": "0CCC",
"4": "PPP",
},
index=[0, 1, 2, 3],
)
df2 = pd.DataFrame(
{
"5": "eMAG",
"6": "AboutYou",
"7": "InterSport",
"8": "Taco",
},
index=[4, 5, 6, 7],
)
result = pd.concat([df1, df2], axis=1, join="inner")
print(result)
print(df['Price'])
df['Price'].plot(kind='hist')
plt.ylabel('Price')
plt.xlabel('Price min-qty')
plt.show()
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
pd.options.display.max_columns = 11
test = | pd.read_csv('test.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 13:15:21 2020
@author: jm
"""
#%% required libraries
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%% read data
#df_original = pd.read_csv('https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv?cachebust=5805f0ab2859cf87', encoding = 'utf-8')
df_original = pd.read_csv('data/google_mobility_report_2020-07-25.csv', encoding = 'utf-8')
df = df_original.copy()
# check columns
df.columns
# see head of data frame
df.head()
#%% filter data for Argentina only
df = df[df['country_region'] == 'Argentina']
# check resulting data
df.info()
# check NA
df.isna().any()
df.isna().sum().plot(kind = 'bar')
# drop columns with many NA
df = df.drop(columns = ['country_region_code', 'sub_region_2', 'iso_3166_2_code', 'census_fips_code'])
# rename columns
df.rename(columns = {'country_region': 'pais',
'sub_region_1': 'provincia',
'date': 'fecha',
'retail_and_recreation_percent_change_from_baseline': 'retail_and_recreation',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery_and_pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit_stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'},
inplace = True)
# drop row where 'provincia' is NA
df = df.dropna(subset = ['provincia'])
# check NA
df.isna().sum().plot(kind = 'bar')
#%% set index to plot the data
df['fecha'] = pd.to_datetime(df['fecha'])
df.set_index('fecha', inplace = True)
# check index
print(df.index)
#%% subsets
bsas = df[df['provincia'] == 'Buenos Aires Province']
caba = df[df['provincia'] == 'Buenos Aires']
#%% plot for CABA
plt.rcParams["figure.dpi"] = 1200
plt.figure(figsize = (10, 10))
fig, ax = plt.subplots()
# plot data
ax.plot(caba.index, caba['workplaces'], color = 'darkred', label = 'Workplaces')
ax.plot(caba.index, caba['retail_and_recreation'], color = 'darkblue', label = 'Retail and recreation')
# color the area of lockdown phase 1
p1 = caba['2020-07-01':'2020-07-17'].index
ax.fill_between(p1, -90, -30, facecolor = 'lightsteelblue', alpha = 0.3, label = 'Fase 1')
# annotate carnaval
ax.annotate('Carnaval', xy = [pd.Timestamp('2020-02-24'), -71],
xytext = [pd.Timestamp('2020-03-25'), 10],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia del trabajador
ax.annotate('Día del \ntrabajador', xy = [pd.Timestamp('2020-05-01'), -87],
xytext = [pd.Timestamp('2020-03-28'), -50],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia de la Revolucion de Mayo
ax.annotate('Día de la \nRevolución de Mayo', xy = [pd.Timestamp('2020-05-25'), -84],
xytext = [pd.Timestamp('2020-04-01'), -30],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate paso a la inmortalidad <NAME>
ax.annotate('Paso a la inmortalidad \nGral. Güemes', xy = [pd.Timestamp('2020-06-15'), -80],
xytext = [pd.Timestamp('2020-04-15'), -15],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate paso a la inmortalidad Gral. Belgrano
ax.annotate('Paso a la \ninmortalidad \nGral. Belgrano', xy = [pd.Timestamp('2020-06-20'), -55],
xytext = [ | pd.Timestamp('2020-05-23') | pandas.Timestamp |
"""
Functions for loading models and generating predictions:
- `load_model` downloads and returns a Simple Transformers model from HuggingFace.
- `predict_domains` generates a multi-label which indicates which of the 9 ICF domains are discussed in a given sentence; the order is ['ADM', 'ATT', 'BER', 'ENR', 'ETN', 'FAC', 'INS', 'MBW', 'STM'], i.e. if the sentence is labeled as [1, 0, 0, 0, 0, 1, 0, 0, 0], it means it contains the ADM and FAC domains
- `predict_levels` generates a float that indicates the level of functioning (for a specific domain) discussed in the sentence
"""
import numpy as np
import pandas as pd
import torch
import warnings
from simpletransformers.classification import MultiLabelClassificationModel, ClassificationModel
from src import timer
@timer
def load_model(
model_type,
model_name,
task,
):
"""
Download and return a Simple Transformers model from HuggingFace.
Parameters
----------
model_type: str
type of the pre-trained model, e.g. bert, roberta, electra
model_name: {str, Path}
path to a local directory with model file or model name on Hugging Face
task: str
simpletransformers class: 'multi' loads MultiLabelClassificationModel, 'clf' loads ClassificationModel
Returns
-------
model: MultiLabelClassificationModel or ClassificationModel
"""
# check task
msg = f'task should be either "multi" or "clf"; "{task}" is not valid.'
assert task in ['multi', 'clf'], msg
# check CUDA
cuda_available = torch.cuda.is_available()
if not cuda_available:
def custom_formatwarning(msg, *args, **kwargs):
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
warnings.warn('CUDA device not available; running on a CPU!')
# load model
print(f'Downloading the model from https://huggingface.co/{model_name}')
if task == 'multi':
Model = MultiLabelClassificationModel
else:
Model = ClassificationModel
return Model(
model_type,
model_name,
use_cuda=cuda_available,
)
@timer
def predict_domains(
text,
model,
):
"""
Apply a fine-tuned multi-label classification model to generate predictions.
Parameters
----------
text: pd Series
a series of strings
model: MultiLabelClassificationModel
fine-tuned multi-label classification model (simpletransformers)
Returns
-------
df: pd Series
a series of lists; each list is a multi-label prediction
"""
print('Generating domains predictions. This might take a while.', flush=True)
predictions, _ = model.predict(text.to_list())
return pd.Series(predictions, index=text.index)
@timer
def predict_levels(
text,
model,
):
"""
Apply a fine-tuned regression model to generate predictions.
Parameters
----------
text: pd Series
a series of strings
model: ClassificationModel
fine-tuned regression model (simpletransformers)
Returns
-------
predictions: pd Series
a series of floats or an empty series (if text is empty)
"""
to_predict = text.to_list()
if not len(to_predict):
return | pd.Series() | pandas.Series |
import json
import warnings
from collections import Counter, defaultdict
from glob import glob
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.optimize import minimize
from scipy.stats import norm
from tqdm import tqdm
plt.style.use('fivethirtyeight')
# %matplotlib qt
# %%
class TransferEntropy:
"""
Class to compute asset graphs using transfer entropy.
Parameters
----------
assets: list[str], default=None
List of assets to use from loaded data.
data: pd.DataFrame, default=None
DataFrame of asset log returns with datetime index and no missing data.
If None, price datafiles are loaded from data directory.
Attributes
----------
data: pd.DataFrame
DataFrame of asset log returns with datetime index.
assets: list(str)
List of asset names.
corr: pd.DataFrame
Spearman correlation between assets.
Methods
-------
set_timeperiod(start, end): Set starting and ending dates for analysis.
subset_assets(assets): Subset assets used.
build_asset_graph(solver): Find graph coordinates.
compute_transfer_entorpy(bins): Return transfer entropy.
compute_effective_transfer_entropy: Return effective transfer entropy.
plot_asset_graph(threshold): Plot asset graph edges that meet threshold.
plot_corr(method): Plot correlations between assets.
plot_te(te): Plot transfer entropy heatmap.
"""
def __init__(self, assets=None, data=None):
self.assets = assets
self._prices = data if data is not None else self._load_data()
self.set_timeperiod('1/3/2011', '12/31/2018')
def _read_file(self, fid):
"""Read data file as DataFrame."""
df = pd.read_csv(
fid,
index_col=0,
parse_dates=True,
infer_datetime_format=True,
)
return df
def _load_data(self):
"""Load data from data directory into single into DataFrame."""
fids = glob('../data/*.csv')
df = pd.DataFrame().join(
[self._read_file(fid) for fid in fids], how='outer')
return df
def set_timeperiod(self, start=None, end=None):
"""
Updata self.data with start and end dates for analysis.
Parameters
----------
start: str or datetime object, default=None
Starting date for analysis.
end: str or datetime object, default=None
Ending date for analysis.
"""
data = self._prices.copy()
# Ignore warnings for missing data.
warnings.filterwarnings('ignore')
# Subset data by time period.
if start is not None:
data = data[data.index >= pd.to_datetime(start)].copy()
if end is not None:
data = data[data.index <= pd.to_datetime(end)].copy()
# Drop Weekends and forward fill Holidays.
keep_ix = [ix.weekday() < 5 for ix in list(data.index)]
data = data[keep_ix].copy()
data.fillna(method='ffill', inplace=True)
self.prices = data.copy()
# Calculate Log Returns.
self.data = np.log(data[1:] / data[:-1].values) # log returns
self.data.dropna(axis=1, inplace=True) # Drop assets with missing data.
# Map asset names to DataFrame.
# with open('../data/asset_mapping.json', 'r') as fid:
# asset_map = json.load(fid)
# self.assets = [asset_map.get(a, a) for a in list(self.data)]
# self._n = len(self.assets)
# Subset data to specified assets.
if self.assets is not None:
self.subset_assets(self.assets)
else:
self.assets = list(self.data)
self._n = len(self.assets)
# Rename DataFrame with asset names and init data matrix.
# self.data.columns = self.assets
self._data_mat = self.data.values
def subset_assets(self, assets):
"""
Subset data to specified assets.
Parameters
----------
assets: list[str]
List of assets to use.
"""
self.prices = self.prices[assets].copy()
self.data = self.data[assets].copy()
self.assets = assets
self._n = len(self.assets)
def _euclidean_distance(self, x, i, j):
"""Euclidean distance between points in x-coordinates."""
m = x.shape[1]
return sum((x[i, a] - x[j, a])**2 for a in range(m))**0.5
def _stress_function(self, x):
"""Stress function for Classical Multidimensional Scaling."""
# Map 1-D input coordinates to 2-D
x = np.reshape(x, (-1, 2))
n = x.shape[0]
num, denom = 0, 0
for i in range(n):
for j in range(i, n):
delta = int(i == j)
euc_d = self._euclidean_distance(x, i, j)
# Build numerator and denominator sums.
num += (delta - euc_d)**2
denom += self._distance[i, j]**2
return (num / denom)**0.5
def build_asset_graph(self, solver, distance=None, verbose=False):
"""
Build asset graph of transfer entropy with specified threshold.
Parameters
----------
solver: str, default='SLSQP'
Scipy mimiziation solving technique.
"""
# Find correlations and distance metric.
self.corr = self.data.corr('spearman')
self._distance = distance if distance is not None \
else np.sqrt(2 * (1-self.corr.values))
# Solve 2-D coordinate positions.
def exit_opt(Xi):
if np.sum(np.isnan(Xi)) > 1:
raise RuntimeError('Minimize convergence failed.')
def printx(Xi):
print(Xi)
exit_opt(Xi)
opt = minimize(
self._stress_function,
x0=np.random.rand(2*self._n),
method=solver,
tol=1e-3,
options={'disp': False, 'maxiter': 10000},
callback=printx if verbose else exit_opt,
)
if opt.status != 0:
raise RuntimeError(opt.message)
self._coordinates = np.reshape(opt.x, (-1, 2))
def plot_asset_graph(self, threshold, all_thresholds=None,
ax=None, figsize=(6, 6), fontsize=6):
"""
Plot asset graph network.
Parameters
----------
threshold: float
Maximum threshold distance for edges in network.
all_thresholds: list[float], default=None
If provided, the colorbar maximum value will be set as the
maximum threshold, otherwise the given threshold is used.
This is convenient for keeping a standard scale when plotting
multiple thresholds.
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
fontsize: int, default=6
Fontsize for asset labels.
"""
# Find edges and nodes.
edges = {}
nodes = []
for i in range(self._n - 1):
d_i = self._distance[i, :]
edges_i = np.argwhere(d_i < threshold).reshape(-1)
edges_i = list(edges_i[edges_i > i])
edges[i] = edges_i
if len(edges_i) > 0:
nodes.append(i)
nodes.extend(edges_i)
nodes = list(set(nodes))
edges = {key: val for key, val in edges.items() if len(val) > 0}
# Store values of edges.
edge_vals = {}
for node0, node0_edges in edges.items():
for node1 in node0_edges:
edge_vals[(node0, node1)] = self._distance[node0, node1]
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
x = self._coordinates[:, 0]
y = self._coordinates[:, 1]
# Get sequential colors for edges based on distance.
cmap = sns.color_palette('magma', 101).as_hex()
emin = min(self._distance[self._distance > 0])
emax = threshold if all_thresholds is None else max(all_thresholds)
emax += 1e-3
edge_color = {key: cmap[int(100 * (val-emin) / (emax-emin))]
for key, val in edge_vals.items()}
# Plot edges.
for node0, node0_edges in edges.items():
for node1 in node0_edges:
ix = [node0, node1]
ax.plot(x[ix], y[ix], c=edge_color[tuple(ix)], lw=2, alpha=0.7)
# Plot asset names over edges.
box = {'fill': 'white', 'facecolor': 'white', 'edgecolor': 'k'}
for node in nodes:
ax.text(x[node], y[node], self.assets[node], fontsize=fontsize,
horizontalalignment='center', verticalalignment='center',
bbox=box)
def plot_corr(self, method='pearson', ax=None, figsize=(6, 6),
fontsize=8, cbar=True, labels=True):
"""
Plot correlation of assets.
Parameters
----------
method: {'spearman', 'pearson', 'kendall'}, default='pearson'
Correlation method.
- 'spearman': Spearman rank correlation
- 'pearson': standard correlation coefficient
- 'kendall': Kendall Tau correlation coefficient
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
fontsize: int, default=8
Fontsize for asset labels.
cbar: bool, default=True
If True include color bar.
labels: bool, default=False
If True include tick labels for x & y axis.
If False do not inlclude labels.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
self._corr = self.data.corr(method)
sns.heatmap(self._corr, ax=ax, vmin=-1, vmax=1, cbar=cbar,
xticklabels=labels, yticklabels=labels, cmap='coolwarm')
if labels:
plt.setp(ax.get_xticklabels(), fontsize=fontsize)
plt.setp(ax.get_yticklabels(), fontsize=fontsize)
if cbar:
cbar_ax = ax.collections[0].colorbar
ticks = np.linspace(-1, 1, 5)
cbar_ax.set_ticks(ticks)
cbar_ax.set_ticklabels(ticks)
def _transfer_entropy_function(self, data, X, Y, bins, shuffle):
"""
Compute transfer entropy for asset x on lagged x and lagged y
log returns.
Parameters
----------
X: int
Index location of asset x, asset to be predicted.
Y: int
Index location of asset y, asset which influences asset x.
bins: int
Number of bins to place log returns in.
shuffle: bool
If True, shuffle all time series for randomized transfer entropy.
Returns
-------
te: float
Transfer entropy of y --> x.
"""
x = data[1:, X]
x_lag = data[:-1, X]
y_lag = data[:-1, Y]
n = len(x)
# Find respective historgram bin for each time series value.
data_matrix = np.concatenate([x, x_lag, y_lag])
bin_edges = np.concatenate([
np.linspace(np.min(data_matrix), 0, int(bins/2)+1),
np.linspace(0, np.max(data_matrix), int(bins/2)+1)[1:],
])
bin_vals = np.reshape(pd.cut(
data_matrix, bins=bin_edges, labels=False), (3, -1)).T
if shuffle:
# Shuffle y_lag for randomized transfer entropy.
np.random.shuffle(bin_vals[:, 2])
# Find frequency of occurce for each set of joint vectors.
p_ilag = Counter(bin_vals[:, 1])
p_i_ilag = Counter(list(zip(bin_vals[:, 0], bin_vals[:, 1])))
p_ilag_jlag = Counter(list(zip(bin_vals[:, 1], bin_vals[:, 2])))
p_i_ilag_jlag = Counter(
list(zip(bin_vals[:, 0], bin_vals[:, 1], bin_vals[:, 2])))
# Catch warnings as errors for np.log2(0).
warnings.filterwarnings('error')
# Compute transfer entropy.
te = 0
for i in range(bins):
for ilag in range(bins):
for jlag in range(bins):
try:
te_i = (p_i_ilag_jlag.get((i, ilag, jlag), 0) / n
* np.log2(p_i_ilag_jlag.get((i, ilag, jlag), 0)
* p_ilag.get(ilag, 0)
/ p_i_ilag.get((i, ilag), 0)
/ p_ilag_jlag.get((ilag, jlag), 0)
))
except (ZeroDivisionError, RuntimeWarning):
te_i = 0
te += te_i
# Reset warnings to default.
warnings.filterwarnings('ignore')
return te
def compute_transfer_entropy(self, bins=6, shuffle=False, save=True):
"""
Compute transfer entropy matrix. Returned matrix is directional
such that asset on X-axis inluences next day transfer entropy of
asset on the Y-axis.
Parameters
----------
bins: int, default=6
Number of bins to place log returns in.
shuffle: bool, default=False
If True, shuffle all time series for randomized transfer entropy.
save: bool, default=True
If True save result
Returns
-------
te: [n x n] nd.array
Transfer entropy matrix.
"""
n = self._n
te = np.zeros([n, n])
data = self._data_mat.copy()
for i in range(n):
for j in range(n):
te[i, j] = self._transfer_entropy_function(
data, i, j, bins, shuffle=shuffle)
if save:
self._te = te.copy() # store te matrix.
self._te_min = np.min(te)
self._te_max = np.max(te)
self.te = | pd.DataFrame(te, columns=self.assets, index=self.assets) | pandas.DataFrame |
import argparse
import datetime
import glob
import os
import re
from tqdm import tqdm
import pandas as pd
from textwrap import dedent
def combineDelayFiles(outName, loc=os.getcwd(), ext='.csv'):
files = glob.glob(os.path.join(loc, '*' + ext))
print('Ensuring that "Datetime" column exists in files')
addDateTimeToFiles(files)
print('Combining weather model delay files')
concatDelayFiles(
files,
sort_list=['ID', 'Datetime'],
outName=outName
)
def addDateTimeToFiles(fileList, force=False):
''' Run through a list of files and add the datetime of each file as a column '''
print('Adding Datetime to delay files')
for f in tqdm(fileList):
data = pd.read_csv(f)
if 'Datetime' in data.columns and not force:
print(
'File {} already has a "Datetime" column, pass'
'"force = True" if you want to override and '
're-process'.format(f)
)
else:
try:
dt = getDateTime(f)
data['Datetime'] = dt
data.to_csv(f, index=False)
except (AttributeError, ValueError):
print(
'File {} does not contain datetime info, skipping'
.format(f)
)
del data
def getDateTime(filename):
''' Parse a datetime from a RAiDER delay filename '''
filename = os.path.basename(filename)
dtr = re.compile(r'\d{8}T\d{6}')
dt = dtr.search(filename)
return datetime.datetime.strptime(
dt.group(),
'%Y%m%dT%H%M%S'
)
def concatDelayFiles(
fileList,
sort_list=['ID', 'Datetime'],
return_df=False,
outName=None
):
'''
Read a list of .csv files containing the same columns and append them
together, sorting by specified columns
'''
dfList = []
print('Concatenating delay files')
for f in tqdm(fileList):
dfList.append(pd.read_csv(f))
df_c = pd.concat(
dfList,
ignore_index=True
).drop_duplicates().reset_index(drop=True)
df_c.sort_values(by=sort_list, inplace=True)
print('Total number of rows in the concatenated file: {}'.format(df_c.shape[0]))
print('Total number of rows containing NaNs: {}'.format(
df_c[df_c.isna().any(axis=1)].shape[0]
)
)
if return_df or outName is None:
return df_c
else:
df_c.to_csv(outName, index=False)
def mergeDelayFiles(
raiderFile,
ztdFile,
col_name='ZTD',
raider_delay='totalDelay',
outName=None
):
'''
Merge a combined RAiDER delays file with a GPS ZTD delay file
'''
print('Merging delay files {} and {}'.format(raiderFile, ztdFile))
dfr = pd.read_csv(raiderFile, parse_dates=['Datetime'])
dfz = readZTDFile(ztdFile, col_name=col_name)
print('Beginning merge')
dfc = dfr.merge(
dfz[['ID', 'Datetime', 'ZTD']],
how='left',
left_on=['Datetime', 'ID'],
right_on=['Datetime', 'ID'],
sort=True
)
dfc['ZTD_minus_RAiDER'] = dfc['ZTD'] - dfc[raider_delay]
print('Total number of rows in the concatenated file: {}'.format(dfc.shape[0]))
print('Total number of rows containing NaNs: {}'.format(
dfc[dfc.isna().any(axis=1)].shape[0]
)
)
print('Merge finished')
if outName is None:
return dfc
else:
dfc.to_csv(outName, index=False)
def readZTDFile(filename, col_name='ZTD'):
'''
Read and parse a GPS zenith delay file
'''
try:
data = pd.read_csv(filename, parse_dates=['Date'])
times = data['times'].apply(lambda x: datetime.timedelta(seconds=x))
data['Datetime'] = data['Date'] + times
except KeyError:
data = | pd.read_csv(filename, parse_dates=['Datetime']) | pandas.read_csv |
#!/usr/bin/env python
from settings import settings
import numpy as np
import pandas as pd
import os
import rospy # ros library for publishing and subscribing
from std_msgs.msg import Int16MultiArray # ros library for string type of msgs
def detection(K, fps, v, Xi, lcr, W, ww):
time_factor = 1
dt = 1./fps
time_waiting = dt/time_factor
'''
the reference to get each tested value is tk, which is the absolute time elapsed
1. Create a matrix to store the values
first column, time
second column, right most hill
third column, with weeds or without weeds
fourth column, if with weeds, calculate the frame coordinate
2. -1 value means none
'''
my_data = np.full((int(K),5),"",object)
my_data = | pd.DataFrame(data=my_data) | pandas.DataFrame |
#!/usr/bin/python3
import os
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, ShuffleSplit
from pprint import pprint
import numpy as np
import pandas as pd
from scipy import sparse
def main():
"""
Using the Logistic Regression model here to predict licenses using Random and Grid Search for cross-validation and
hyperparameter tuning
"""
os.chdir('../../../all_files_generated')
current_dir = os.getcwd()
data_pickles_dir = os.path.join(current_dir, 'data_pickles')
model_pickles_dir = os.path.join(current_dir, 'model_pickles')
model_confusion_matrix_dir = os.path.join(current_dir, 'model_confusion_matrix_files')
x_train_path = os.path.join(data_pickles_dir, 'x_train.pickle')
x_validation_path = os.path.join(data_pickles_dir, 'x_validation.pickle')
x_test_path = os.path.join(data_pickles_dir, 'x_test.pickle')
y_train_path = os.path.join(data_pickles_dir, 'y_train.pickle')
y_validation_path = os.path.join(data_pickles_dir, 'y_validation.pickle')
y_test_path = os.path.join(data_pickles_dir, 'y_test.pickle')
model_path = os.path.join(model_pickles_dir, 'logistic_regression.pickle')
confusion_matrix_path = os.path.join(model_confusion_matrix_dir, 'logistic_regression_confusion_matrix.png')
# read in all pickle files that may be required
with open(x_train_path, 'rb') as data:
x_train = pickle.load(data)
with open(x_validation_path, 'rb') as data:
x_validation = pickle.load(data)
with open(x_test_path, 'rb') as data:
x_test = pickle.load(data)
with open(y_train_path, 'rb') as data:
y_train = pickle.load(data)
with open(y_validation_path, 'rb') as data:
y_validation = pickle.load(data)
with open(y_test_path, 'rb') as data:
y_test = pickle.load(data)
# combine training and validation datasets
x_train = sparse.vstack((x_train, x_validation)) # scipy.sparse.csr matrix
y_train = y_train.append( | pd.Series(y_validation) | pandas.Series |
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright (c) 2018--, Qurro development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
import logging
import skbio
import pandas as pd
from qurro._df_utils import escape_columns
from qurro._metadata_utils import get_q2_comment_lines
def read_rank_file(file_loc):
"""Converts an input file of ranks to a DataFrame.
Also returns a human-readable "rank type" -- either "Differential" or
"Feature Loading".
"""
if file_loc.endswith(".tsv"):
rank_df = differentials_to_df(file_loc)
rank_type = "Differential"
else:
# ordination_to_df() will raise an appropriate error if it can't
# process this file.
rank_df = ordination_to_df(file_loc)
rank_type = "Feature Loading"
return escape_columns(rank_df, "feature ranks"), rank_type
def rename_loadings(loadings_df):
"""Renames a DataFrame of loadings to say "Axis 1", "Axis 2", etc.
This should match what Emperor does in its visualizations.
"""
loadings_df_copy = loadings_df.copy()
new_column_names = []
for n in range(1, len(loadings_df_copy.columns) + 1):
new_column_names.append("Axis {}".format(n))
loadings_df_copy.columns = new_column_names
return loadings_df_copy
def ordination_to_df(ordination_file_loc):
"""Returns a DataFrame of feature loadings from a skbio ordination file."""
# If this fails, it raises an skbio.io.UnrecognizedFormatError.
ordination = skbio.OrdinationResults.read(ordination_file_loc)
return rename_loadings(ordination.features)
def differentials_to_df(differentials_loc):
"""Converts a differential rank TSV file to a DataFrame."""
# As of QIIME 2 2019.7, differentials exported from QIIME 2 can have q2
# comments! So we need to detect these.
q2_lines = get_q2_comment_lines(differentials_loc)
differentials = pd.read_csv(
differentials_loc,
sep="\t",
na_filter=False,
dtype=object,
skiprows=q2_lines,
)
# Delay setting index column so we can first load it as an object (this
# saves us from situations where the index col would otherwise be read as a
# number or something that would mess things up -- read_metadata_file()
# does the same sorta thing)
differentials.set_index(differentials.columns[0], inplace=True)
# Also, we don't bother naming the differentials index (yet). This is
# actually needed to make some of the tests pass (which is dumb, I know,
# but if I pass check_names=False to assert_frame_equal then the test
# doesn't check column names, and I want it to do that...)
differentials.index.rename(None, inplace=True)
# This is slow but it should at least *work as intended.*
# If there are any non-numeric differentials, or any NaN differentials, or
# any infinity/-infinity differentials (???), then we should raise an
# error. This code should do that.
for feature_row in differentials.itertuples():
for differential in feature_row[1:]:
try:
fd = float(differential)
if | pd.isna(fd) | pandas.isna |
import pandas as pd
import statsmodels.api as sm
import numpy as np
from pathlib import Path
outdir = Path('data')
def download_rivm_r():
df_rivm = | pd.read_json('https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json') | pandas.read_json |
import ipywidgets as widgets
# import bql
# import bqviz as bqv
from bqplot import Figure, Pie, pyplot as plt
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from components.efficient_frontier import EfficientFrontier
# bq = bql.Service()
class ETFViewer:
def __init__(self, etf_fields, drivers):
"""Creates an empty component with the label requiring to select an ETF.
"""
label = widgets.Label("Selecione um ETF.")
self.component = widgets.VBox([label])
self.component.layout.width = "40%"
self.etf_fields = etf_fields
self.drivers = drivers
def set_etf(self, ids):
"""Set an ETF to the component and creates all tabs of visualizations.
:param ids: List of BQL ids of the selected ETFs.
:type ids: list
"""
self.ids = ids
self.component.children = [widgets.Label("Carregando...")]
self.hist_returns = self.get_etf_historical_returns()
out_error = widgets.Output()
with out_error:
titles = ['Performance', 'Volatilidade', 'Volume', 'Tracking', 'Holdings', 'Macro Factors', 'Infos']
tab = widgets.Tab()
tab.children = [
self.create_performance_view(),
self.create_volatility_view(),
self.create_volume_view(),
self.create_delta_benchmark_view(),
self.create_holdings_view(),
self.create_drivers_view(),
self.create_infos_view(),
]
for i, title in enumerate(titles):
tab.set_title(i, title)
tab.add_class('custom-tabs')
self.component.children = [self.get_tabs_styles(), tab]
return
self.component.children = [out_error]
def ids_as_string(self):
return ','.join(["'%s'"%id for id in self.ids])
def get_etf_historical_returns(self):
"""Gets a pandas DataFrame with the historical monthly returns of \
the ETFs from a period of 3 years.
:return: The monthly historical returns.
:rtype: pd.DataFrame
"""
# bql_response = bq.execute("""
# get(PCT_DIFF(PX_LAST(M, dates=range(-3y, 0d)))) for([%s])
# """%self.ids_as_string())
# df = bql_response[0].df()
# df.to_csv('./data/etfs/%s_returns.csv'%(self.ids[0]))
df = pd.DataFrame()
for id in self.ids:
_df = pd.read_csv('./data/etfs/%s_returns.csv'%(id), index_col=0, parse_dates=['DATE'])
_df.columns = ['DATE', 'RETURN']
_df = _df.reset_index().pivot(index='DATE', columns='ID', values='RETURN')
df = | pd.concat([df, _df]) | pandas.concat |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df), expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df), expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = | DataFrame([[3, 350]], columns=['count', 'sum']) | pandas.DataFrame |
"""
Classes for analyzing RSMTool predictions, metrics, etc.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import warnings
from functools import partial
import numpy as np
import pandas as pd
from scipy.stats import kurtosis, pearsonr
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, mean_squared_error, r2_score
from skll.metrics import kappa
from .container import DataContainer
from .utils.metrics import (agreement,
difference_of_standardized_means,
partial_correlations,
quadratic_weighted_kappa,
standardized_mean_difference)
from .utils.prmse import get_true_score_evaluations
class Analyzer:
"""Class to perform analysis on all metrics, predictions, etc."""
@staticmethod
def check_frame_names(data_container, dataframe_names):
"""
Check that all specified dataframes are available.
This method checks to make sure all specified DataFrames
are in the given data container object.
Parameters
----------
data_container : container.DataContainer
A DataContainer object
dataframe_names : list of str
The names of the DataFrames expected in the
DataContainer object.
Raises
------
KeyError
If a given dataframe_name is not in the DataContainer object.
"""
for dataframe_name in dataframe_names:
if dataframe_name not in data_container:
raise KeyError('The DataFrame `{}` does not exist in the '
'DataContainer object.'.format(dataframe_name))
@staticmethod
def check_param_names(configuration_obj, parameter_names):
"""
Check that all specified parameters are available.
This method checks to make sure all specified parameters
are in the given configuration object.
Parameters
----------
configuration_obj : configuration_parser.Configuration
A configuration object
parameter_names : list of str
The names of the parameters (keys) expected in the
Configuration object.
Raises
------
KeyError
If a given parameter_name is not in the Configuration object.
"""
for parameter_name in parameter_names:
if parameter_name not in configuration_obj:
raise KeyError('The parameter `{}` does not exist in the '
'Configuration object.'.format(parameter_name))
@staticmethod
def analyze_excluded_responses(df,
features,
header,
exclude_zero_scores=True,
exclude_listwise=False):
"""
Compute statistics for responses excluded from analyses.
This method computes various statistics for the responses that
were excluded from analyses, either in the training set or in
the test set.
Parameters
----------
df : pandas DataFrame
Data frame containing the excluded responses
features : list of str
List of column names containing the features
to which we want to restrict the analyses.
header : str
String to be used as the table header for the
output data frame.
exclude_zero_scores : bool, optional
Whether or not the zero-score responses
should be counted in the exclusion statistics.
Defaults to ``True``.
exclude_listwise : bool, optional
Whether or not the candidates were excluded
based on minimal number of responses.
Defaults to ``False``.
Returns
-------
df_full_crosstab : pandas DataFrame
Two-dimensional data frame containing the
exclusion statistics.
"""
# create an empty output data frame
df_full_crosstab = pd.DataFrame({'all features numeric': [0, 0, 0],
'non-numeric feature values': [0, 0, 0]},
index=['numeric non-zero human score',
'zero human score',
'non-numeric human score'])
if not df.empty:
# re-code human scores into numeric, missing or zero
df['score_category'] = 'numeric non-zero human score'
df.loc[df['sc1'].isnull(), 'score_category'] = 'non-numeric human score'
df.loc[df['sc1'].astype(float) == 0, 'score_category'] = 'zero human score'
# recode feature values: a response with at least one
# missing feature is assigned 'non-numeric feature values'
df_features_only = df[features + ['spkitemid']]
null_feature_rows = df_features_only.isnull().any(axis=1)
df_null_features = df_features_only[null_feature_rows]
df['feat_category'] = 'all features numeric'
df.loc[df['spkitemid'].isin(df_null_features['spkitemid']),
'feat_category'] = 'non-numeric feature values'
# crosstabulate
df_crosstab = pd.crosstab(df['score_category'],
df['feat_category'])
df_full_crosstab.update(df_crosstab)
# convert back to integers as these are all counts
df_full_crosstab = df_full_crosstab.astype(int)
df_full_crosstab.insert(0, header, df_full_crosstab.index)
if not exclude_listwise:
# if we are not excluding listwise, rename the first cell so
# that it is not set to zero
assert(df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] = '-'
# if we are not excluding the zeros, rename the corresponding cells
# so that they are not set to zero. We do not do this for listwise exclusion
if not exclude_zero_scores:
assert(df_full_crosstab.loc['zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['zero human score',
'all features numeric'] = '-'
return df_full_crosstab
@staticmethod
def analyze_used_responses(df_train, df_test, subgroups, candidate_column):
"""
Compute statistics for responses used in analyses.
This method computes various statistics on the responses that
were used in analyses, either in the training set or in the
test set.
Parameters
----------
df_train : pandas DataFrame
Data frame containing the response information
for the training set.
df_test : pandas DataFrame
Data frame containing the response information
for the test set.
subgroups : list of str
List of column names that contain grouping
information.
candidate_column : str
Column name that contains candidate
identification information.
Returns
-------
df_analysis : pandas DataFrame
Data frame containing information about the used
responses.
"""
# create a basic data frame for responses only
train_responses = set(df_train['spkitemid'])
test_responses = set(df_test['spkitemid'])
rows = [{'partition': 'Training', 'responses': len(train_responses)},
{'partition': 'Evaluation', 'responses': len(test_responses)},
{'partition': 'Overlapping', 'responses': len(train_responses & test_responses)},
{'partition': 'Total', 'responses': len(train_responses | test_responses)}]
df_analysis = pd.DataFrame.from_dict(rows)
columns = ['partition', 'responses'] + subgroups
if candidate_column:
train_candidates = set(df_train['candidate'])
test_candidates = set(df_test['candidate'])
df_analysis['candidates'] = [len(train_candidates), len(test_candidates),
len(train_candidates & test_candidates),
len(train_candidates | test_candidates)]
columns = ['partition', 'responses', 'candidates'] + subgroups
for group in subgroups:
train_group = set(df_train[group])
test_group = set(df_test[group])
df_analysis[group] = [len(train_group), len(test_group),
len(train_group & test_group),
len(train_group | test_group)]
df_analysis = df_analysis[columns]
return df_analysis
@staticmethod
def analyze_used_predictions(df_test, subgroups, candidate_column):
"""
Compute various statistics for predictions used in analyses.
Parameters
----------
df_test : pandas DataFrame
Data frame containing the test set predictions.
subgroups : list of str
List of column names that contain grouping
information.
candidate_column : str
Column name that contains candidate
identification information.
Returns
-------
df_analysis : pandas DataFrame
Data frame containing information about the used
predictions.
"""
rows = [{'partition': 'Evaluation', 'responses': df_test['spkitemid'].size}]
df_analysis = pd.DataFrame.from_dict(rows)
df_columns = ['partition', 'responses'] + subgroups
if candidate_column:
df_analysis['candidates'] = [df_test['candidate'].unique().size]
df_columns = ['partition', 'responses', 'candidates'] + subgroups
for group in subgroups:
df_analysis[group] = [df_test[group].unique().size]
df_analysis = df_analysis[df_columns]
return df_analysis
@staticmethod
def compute_basic_descriptives(df, selected_features):
"""
Compute basic descriptive statistics for columns in the given data frame.
Parameters
----------
df : pandas DataFrame
Input data frame containing the feature values.
selected_features : list of str
List of feature names for which to compute
the descriptives.
Returns
-------
df_desc : pandas DataFrame
DataFrame containing the descriptives for
each of the features.
"""
# select only feature columns
df_desc = df[selected_features]
# get the H1 scores
scores = df['sc1']
# compute correlations and p-values separately for efficiency
cor_series = df_desc.apply(lambda s: pearsonr(s, scores))
cors = cor_series.apply(lambda t: t[0])
pvalues = cor_series.apply(lambda t: t[1])
# create a data frame with all the descriptives
df_output = pd.DataFrame({'mean': df_desc.mean(),
'min': df_desc.min(),
'max': df_desc.max(),
'std. dev.': df_desc.std(),
'skewness': df_desc.skew(),
'kurtosis': df_desc.apply(lambda s: kurtosis(s, fisher=False)),
'Correlation': cors,
'p': pvalues,
'N': len(df_desc)})
# reorder the columns to make it look better
df_output = df_output[['mean', 'std. dev.', 'min', 'max', 'skewness',
'kurtosis', 'Correlation', 'p', 'N']]
return df_output
@staticmethod
def compute_percentiles(df,
selected_features,
percentiles=None):
"""
Compute percentiles and outliers for columns in the given data frame.
Parameters
----------
df : pandas DataFrame
Input data frame containing the feature values.
selected_features : list of str
List of feature names for which to compute the
percentile descriptives.
percentiles : list of ints, optional
The percentiles to calculate. If ``None``, use the percentiles
{1, 5, 25, 50, 75, 95, 99}.
Defaults to ``None``.
Returns
-------
df_output : pandas DataFrame
Data frame containing the percentile information
for each of the features.
"""
# select only feature columns
df_desc = df[selected_features]
# compute the various percentile levels
if percentiles is None:
percentiles = [1, 5, 25, 50, 75, 95, 99]
df_output = df_desc.apply(lambda series: pd.Series(np.percentile(series,
percentiles,
interpolation='lower')))
df_output = df_output.transpose()
# change the column names to be more readable
df_output.columns = ['{}%'.format(p) for p in percentiles]
# add the inter-quartile range column
df_output['IQR'] = df_output['75%'] - df_output['25%']
# compute the various outlier statistics
mild_upper = df_output['75%'] + 1.5 * df_output['IQR']
mild_bottom = df_output['25%'] - 1.5 * df_output['IQR']
extreme_upper = df_output['75%'] + 3 * df_output['IQR']
extreme_bottom = df_output['25%'] - 3 * df_output['IQR']
# compute the mild and extreme outliers
num_mild_outliers = {}
num_extreme_outliers = {}
for c in df_desc.columns:
is_extreme = (df_desc[c] <= extreme_bottom[c]) | (df_desc[c] >= extreme_upper[c])
is_mild = ((df_desc[c] > extreme_bottom[c]) & (df_desc[c] <= mild_bottom[c]))
is_mild = is_mild | ((df_desc[c] >= mild_upper[c]) & (df_desc[c] < extreme_upper[c]))
num_mild_outliers[c] = len(df_desc[is_mild])
num_extreme_outliers[c] = len(df_desc[is_extreme])
# add those to the output data frame
df_output['Mild outliers'] = pd.Series(num_mild_outliers)
df_output['Extreme outliers'] = pd.Series(num_extreme_outliers)
return df_output
@staticmethod
def compute_outliers(df, selected_features):
"""
Compute number and percentage of outliers for given columns.
This method computes the number and percentage of outliers
that lie outside the range mean +/- 4 SD for each of the
given columns in the given data frame.
Parameters
----------
df : pandas DataFrame
Input data frame containing the feature values.
selected_features : list of str
List of feature names for which to compute
outlier information.
Returns
-------
df_output : pandas DataFrame
Data frame containing outlier information
for each of the features.
"""
# select only feature columns
df_desc = df[selected_features]
# compute the means and standard deviations
means = df_desc.mean()
stds = df_desc.std()
# compute the number of upper and lower outliers
lower_outliers = {}
upper_outliers = {}
for c in df_desc.columns:
lower_outliers[c] = len(df_desc[df_desc[c] < means[c] - 4 * stds[c]])
upper_outliers[c] = len(df_desc[df_desc[c] > means[c] + 4 * stds[c]])
# generate the output data frame
lower_s = pd.Series(lower_outliers)
upper_s = pd.Series(upper_outliers)
both_s = lower_s + upper_s
df_output = pd.DataFrame({'lower': lower_s,
'upper': upper_s,
'both': both_s,
'lowerperc': round(lower_s / len(df_desc) * 100, 2),
'upperperc': round(upper_s / len(df_desc) * 100, 2),
'bothperc': round(both_s / len(df_desc) * 100, 2)})
return df_output
@staticmethod
def compute_pca(df, selected_features):
"""
Compute PCA decomposition of the given features.
This method computes the PCA decomposition of features in the
data frame, restricted to the given columns. The number of components
is set to be min(n_features, n_samples).
Parameters
----------
df : pandas DataFrame
Input data frame containing feature values.
selected_features : list of str
List of feature names to be used in the
PCA decomposition.
Returns
-------
df_components : pandas DataFrame
Data frame containing the PCA components.
df_variance : pandas DataFrame
Data frame containing the variance information.
"""
# restrict to the given features
df_pca = df[selected_features]
# fit the PCA
n_components = min(len(selected_features), len(df_pca))
pca = PCA(n_components=n_components)
pca.fit(df_pca)
df_components = pd.DataFrame(pca.components_)
n_components = len(df_components)
df_components.columns = selected_features
df_components.index = ['PC{}'.format(i) for i in range(1, n_components + 1)]
df_components = df_components.transpose()
# compute the variance data frame
df_variance = {'Eigenvalues': pca.explained_variance_,
'Percentage of variance': pca.explained_variance_ratio_,
'Cumulative percentage of '
'variance': np.cumsum(pca.explained_variance_ratio_)
}
df_variance = pd.DataFrame(df_variance)
# reorder the columns
df_variance = df_variance[['Eigenvalues', 'Percentage of variance',
'Cumulative percentage of variance']]
# set the row names and take the transpose
df_variance.index = ['PC{}'.format(i) for i in range(1, n_components + 1)]
df_variance = df_variance.transpose()
return df_components, df_variance
@staticmethod
def correlation_helper(df,
target_variable,
grouping_variable,
include_length=False):
"""
Compute marginal and partial correlations for all columns.
This helper method computes marginal and partial correlations of
all the columns in the given data frame against the target variable
separately for each level in the the grouping variable.
If ``include_length`` is ``True``, it additionally computes partial
correlations of each column in the data frame against the target
variable after controlling for the "length" column.
Parameters
----------
df : pandas DataFrame
Input data frame containing numeric feature values, the numeric
`target variable` and the `grouping variable`.
target_variable: str
The name of the column used as a reference for computing correlations.
grouping_variable: str
The name of the column defining groups in the data
include_length: bool, optional
If True compute additional partial correlations of each column
in the data frame against `target variable` only partialling out
"length" column.
Returns
-------
df_target_cors : pandas DataFrame
Data frame containing Pearson's correlation coefficients for
marginal correlations between features and `target_variable`.
df_target_partcors : pandas DataFrame
Data frame containing Pearson's correlation coefficients for
partial correlations between each feature and `target_variable`
after controlling for all other features. If ``include_length`` is
set to ``True``, the "length" column will not be included in the
partial correlation computation.
df_target_partcors_no_length: pandas DataFrame
If ``include_length`` is set to ``True``: Data frame containing
Pearson's correlation coefficients for partial correlations
between each feature and ``target_variable`` after controlling
for "length". Otherwise, it will be an empty data frame.
"""
# group by the group columns
grouped = df.groupby(grouping_variable)
df_target_cors = pd.DataFrame()
df_target_pcorr = pd.DataFrame()
df_target_pcorr_no_length = pd.DataFrame()
for group, df_group in grouped:
df_group = df_group.drop(grouping_variable, axis=1)
# first check if we have at least 2 cases and return np.nan otherwise
if len(df_group) == 1:
df_target_cors[group] = pd.Series(data=np.nan,
index=df_group.columns)
df_target_pcorr[group] = pd.Series(data=np.nan,
index=df_group.columns)
df_target_pcorr_no_length[group] = pd.Series(data=np.nan,
index=df_group.columns)
else:
# if we are asked to include length, that means 'length' is
# in the data frame which means that we want to exclude that
# before computing the regular marginal and partial correlations
if not include_length:
df_target_cors[group] = df_group.apply(lambda s:
pearsonr(s,
df_group[target_variable])[0])
df_target_pcorr[group] = partial_correlations(df_group)[target_variable]
else:
df_group_no_length = df_group.drop('length', axis=1)
partial_pearsonr = partial(pearsonr, y=df_group_no_length[target_variable])
df_target_cors[group] = df_group_no_length.apply(lambda s:
partial_pearsonr(s)[0])
df_target_pcorr[group] = partial_correlations(df_group_no_length)[target_variable]
pcor_dict = {}
columns = [c for c in df_group.columns if c not in ['sc1', 'length']]
for c in columns:
pcor_dict[c] = partial_correlations(df_group[[c,
'sc1',
'length']])['sc1'][c]
df_target_pcorr_no_length[group] = pd.Series(pcor_dict)
# remove the row containing the correlation of the target variable
# with itself and take the transpose
df_target_cors = df_target_cors.drop(target_variable).transpose()
df_target_pcorr = df_target_pcorr.drop(target_variable).transpose()
df_target_pcorr_no_length = df_target_pcorr_no_length.transpose()
return (df_target_cors,
df_target_pcorr,
df_target_pcorr_no_length)
@staticmethod
def metrics_helper(human_scores,
system_scores,
population_human_score_sd=None,
population_system_score_sd=None,
population_human_score_mn=None,
population_system_score_mn=None,
smd_method='unpooled',
use_diff_std_means=False):
"""
Compute basic association metrics between system and human scores.
Parameters
----------
human_scores : pandas Series
Series containing numeric human (reference) scores.
system_scores: pandas Series
Series containing numeric scores predicted by the model.
population_human_score_sd : float, optional
Reference standard deviation for human scores.
This must be specified when the function is used to compute
association metrics for a subset of responses, for example,
responses from a particular demographic subgroup. If ``smd_method``
is set to "williamson" or "johnson", this should be the standard
deviation for the whole population (in most cases, the standard
deviation for the whole test set). If ``use_diff_std_means`` is
``True``, this must be the standard deviation for the whole
population and ``population_human_score_mn`` must also be specified.
Otherwise, it is ignored.
Defaults to ``None``.
population_system_score_sd : float, optional
Reference standard deviation for system scores.
This must be specified when the function is used to compute
association metrics for a subset of responses, for example,
responses from a particular demographic subgroup. If ``smd_method``
is set to "williamson", this should be the standard deviation for
the whole population (in most cases, the standard deviation for the
whole test set). If ``use_diff_std_means`` is ``True``, this must
be the standard deviation for the whole population and
``population_system_score_mn`` must also be specified. Otherwise,
it is ignored.
Defaults to ``None``.
population_human_score_mn : float, optional
Reference mean for human scores. This must be specified when the
function is used to compute association metrics for a subset of
responses, for example, responses from a particular demographic
subgroup. If ``use_diff_std_means`` is ``True``, this must be the
mean for the whole population (in most cases, the full test set)
and ``population_human_score_sd`` must also be specified.
Otherwise, it is ignored.
Defaults to ``None``.
population_system_score_mn : float, optional
Reference mean for system scores. This must be specified when the
function is used to compute association metrics for a subset of
responses, for example, responses from a particular demographic
subgroup. If ``use_diff_std_means`` is ``True``, this must be the
mean for the whole population (in most cases, the full test set)
and ``population_system_score_sd`` must also be specified. Otherwise,
it is ignored.
Defaults to ``None``.
smd_method : {"williamson", "johnson", "pooled", "unpooled"}, optional
The SMD method to use, only used if ``use_diff_std_means`` is
``False``. All methods have the same numerator
mean(`y_pred`) - mean(`y_true_observed`) and the following
denominators :
- "williamson": pooled population standard deviation of
`y_true_observed` and `y_pred` computed using
``population_human_score_sd`` and ``population_system_score_sd``.
- "johnson": ``population_human_score_sd``.
- "pooled": pooled standard deviation of `y_true_observed` and
`y_pred` for this group.
- "unpooled": standard deviation of `y_true_observed` for this
group.
Defaults to "unpooled".
use_diff_std_means : bool, optional
Whether to use the difference of standardized means, rather than
the standardized mean difference. This is most useful with subgroup
analysis.
Defaults to ``False``.
Returns
-------
metrics: pandas Series
Series containing different evaluation metrics comparing human
and system scores. The following metrics are included:
- `kappa`: unweighted Cohen's kappa
- `wtkappa`: quadratic weighted kappa
- `exact_agr`: exact agreement
- `adj_agr`: adjacent agreement with tolerance set to 1
- One of the following :
* `SMD`: standardized mean difference, if ``use_diff_std_means``
is ``False``.
* `DSM`: difference of standardized means, if ``use_diff_std_means``
is ``True``.
- `corr`: Pearson's r
- `R2`: r squared
- `RMSE`: root mean square error
- `sys_min`: min system score
- `sys_max`: max system score
- `sys_mean`: mean system score (ddof=1)
- `sys_sd`: standard deviation of system scores (ddof=1)
- `h_min`: min human score
- `h_max`: max human score
- `h_mean`: mean human score (ddof=1)
- `h_sd`: standard deviation of human scores (ddof=1)
- `N`: total number of responses
"""
# compute the kappas
unweighted_kappa = kappa(human_scores, system_scores)
weighted_kappa = quadratic_weighted_kappa(human_scores,
system_scores)
# compute the agreement statistics
human_system_agreement = agreement(human_scores, system_scores)
human_system_adjacent_agreement = agreement(human_scores,
system_scores,
tolerance=1)
# compute the Pearson correlation after removing
# any cases where either of the scores are NaNs.
df = pd.DataFrame({'human': human_scores,
'system': system_scores}).dropna(how='any')
if (len(df) == 1 or
len(df['human'].unique()) == 1 or
len(df['system'].unique()) == 1):
# set correlations to 1 if we have a single instance or zero variance
correlations = np.nan
else:
correlations = pearsonr(df['human'], df['system'])[0]
# compute the min/max/mean/std. dev. for the system and human scores
min_system_score = np.min(system_scores)
min_human_score = np.min(human_scores)
max_system_score = np.max(system_scores)
max_human_score = np.max(human_scores)
mean_system_score = np.mean(system_scores)
mean_human_score = np.mean(human_scores)
system_score_sd = np.std(system_scores, ddof=1)
human_score_sd = np.std(human_scores, ddof=1)
if use_diff_std_means:
# calculate the difference of standardized means
smd_name = 'DSM'
smd = difference_of_standardized_means(human_scores,
system_scores,
population_human_score_mn,
population_system_score_mn,
population_human_score_sd,
population_system_score_sd)
else:
# calculate the standardized mean difference
smd_name = 'SMD'
smd = standardized_mean_difference(human_scores,
system_scores,
population_human_score_sd,
population_system_score_sd,
method=smd_method)
# compute r2
if len(df) == 1:
r2 = np.nan
else:
r2 = r2_score(human_scores, system_scores)
# compute MSE
mse = mean_squared_error(human_scores, system_scores)
rmse = np.sqrt(mse)
# return everything as a series
metrics = pd.Series({'kappa': unweighted_kappa,
'wtkappa': weighted_kappa,
'exact_agr': human_system_agreement,
'adj_agr': human_system_adjacent_agreement,
smd_name: smd,
'corr': correlations,
'R2': r2,
'RMSE': rmse,
'sys_min': min_system_score,
'sys_max': max_system_score,
'sys_mean': mean_system_score,
'sys_sd': system_score_sd,
'h_min': min_human_score,
'h_max': max_human_score,
'h_mean': mean_human_score,
'h_sd': human_score_sd,
'N': len(system_scores)})
return metrics
@staticmethod
def compute_disattenuated_correlations(human_system_corr,
human_human_corr):
"""
Compute disattenuated correlations between human and system scores.
These are computed as the Pearson's correlation between the human score
and the system score divided by the square root of correlation between
two human raters.
Parameters
----------
human_system_corr : pandas Series
Series containing of pearson's correlation coefficients human-system
correlations.
human_human_corr : pandas Series
Series containing of pearson's correlation coefficients for human-human
correlations. This can contain a single value or have the index
matching that of human-system correlations.
Returns
-------
df_correlations: pandas DataFrame
Data frame containing the human-system correlations, human-human
correlations, and disattenuated correlations.
"""
# if we only have a single value for human correlation and the index
# is not in human-system values, we use the same HH value in all cases
if (len(human_human_corr) == 1 and
not human_human_corr.index[0] in human_system_corr.index):
human_human_corr = pd.Series(human_human_corr.values.repeat(len(human_system_corr)),
index=human_system_corr.index)
# we now concatenate the two series on index
df_correlations = pd.concat([human_system_corr, human_human_corr],
axis=1,
sort=True,
keys=['corr_HM', 'corr_HH'])
# if any of the HH correlations are negative, we will ignore these
# and treat them as Nones
with np.errstate(invalid='ignore'):
df_correlations['sqrt_HH'] = np.sqrt(df_correlations['corr_HH'])
df_correlations['corr_disattenuated'] = (df_correlations['corr_HM'] /
df_correlations['sqrt_HH'])
return df_correlations
def compute_correlations_by_group(self,
df,
selected_features,
target_variable,
grouping_variable,
include_length=False):
"""
Compute marginal and partial correlations against target variable.
This method computes various marginal and partial correlations of the
given columns in the given data frame against the target variable for
all data and for each level of the grouping variable.
Parameters
----------
df : pandas DataFrame
Input data frame.
selected_features : list of str
List of feature names for which to compute
the correlations.
target_variable : str
Feature name indicating the target variable i.e., the
dependent variable
grouping_variable : str
Feature name that contain the grouping information
include_length : bool, optional
Whether or not to include the length when
computing the partial correlations.
Defaults to ``False``.
Returns
-------
df_output : pandas DataFrame
Data frame containing the correlations.
"""
df_desc = df.copy()
columns = selected_features + [target_variable, grouping_variable]
if include_length:
columns.append('length')
df_desc = df_desc[columns]
# create a duplicate data frame to compute correlations
# over the whole data, i.e., across all grouping variables
df_desc_all = df_desc.copy()
df_desc_all[grouping_variable] = 'All data'
# combine the two data frames
df_desc_combined = pd.concat([df_desc, df_desc_all], sort=True)
df_desc_combined.reset_index(drop=True, inplace=True)
# compute the various (marginal and partial) correlations with score
ret = self.correlation_helper(df_desc_combined,
target_variable,
grouping_variable,
include_length=include_length)
return ret
def filter_metrics(self,
df_metrics,
use_scaled_predictions=False,
chosen_metric_dict=None):
"""
Filter data frame to retain only the given metrics.
This method filters the data frame ``df_metrics`` -- containing
all of the metric values by all score types (raw, raw_trim etc.)
-- to retain only the metrics as defined in the given dictionary
``chosen_metric_dict``. This dictionary maps score types ("raw",
"scale", "raw_trim" etc.) to metric names. The available metric
names are:
- "corr"
- "kappa"
- "wtkappa"
- "exact_agr"
- "adj_agr"
- "SMD" or "DSM", depending on what is in ``df_metrics``.
- "RMSE"
- "R2"
- "sys_min"
- "sys_max"
- "sys_mean"
- "sys_sd"
- "h_min"
- "h_max"
- "h_mean"
- "h_sd"
- "N"
Parameters
----------
df_metrics : pd.DataFrame
The DataFrame to filter.
use_scaled_predictions : bool, optional
Whether to use scaled predictions.
Defaults to ``False``.
chosen_metric_dict : dict, optional
The dictionary to map score types to metrics that should be
computer for them.
Defaults to ``None``.
Note
----
The last five metrics will be the `same` for all score types.
If ``chosen_metric_dict`` is not specified then, the following default
dictionary, containing the recommended metrics, is used::
{"X_trim": ["N", "h_mean", "h_sd", "sys_mean", "sys_sd", "wtkappa",
"corr", "RMSE", "R2", "SMD"],
"X_trim_round": ["sys_mean", "sys_sd", "kappa",
"exact_agr", "adj_agr", "SMD"]}
where X = "raw" or "scale" depending on whether
``use_scaled_predictions`` is ``False`` or ``True``, respectively.
"""
# do we want the raw or the scaled metrics
score_prefix = 'scale' if use_scaled_predictions else 'raw'
# what metrics are we choosing to include?
if chosen_metric_dict:
chosen_metrics = chosen_metric_dict
else:
smd_name = 'DSM' if 'DSM' in df_metrics else 'SMD'
chosen_metrics = {'{}_trim'.format(score_prefix): ['N',
'h_mean',
'h_sd',
'sys_mean',
'sys_sd',
'wtkappa',
'corr',
smd_name,
'RMSE',
'R2'],
'{}_trim_round'.format(score_prefix): ['sys_mean',
'sys_sd',
'kappa',
'exact_agr',
'adj_agr',
smd_name]}
# extract the metrics we need from the given metrics frame
metricdict = {}
for score_type in chosen_metrics:
for metric in chosen_metrics[score_type]:
colname = (metric if metric in ['h_mean', 'h_sd', 'N']
else '{}.{}'.format(metric, score_type))
values = df_metrics[metric][score_type]
metricdict[colname] = values
df_filtered_metrics = pd.DataFrame([metricdict])
return df_filtered_metrics
def compute_metrics(self,
df,
compute_shortened=False,
use_scaled_predictions=False,
include_second_score=False,
population_sd_dict=None,
population_mn_dict=None,
smd_method='unpooled',
use_diff_std_means=False):
"""
Compute association metrics for scores in the given data frame.
This function compute association metrics for all score types.
If ``include_second_score`` is ``True``, then it is assumed that
a column called `sc2` containing a second human score is available
and it should be used to compute the human-human evaluation stats
and the performance degradation statistics.
If ``compute_shortened`` is ``True``, then this function also
computes a shortened version of the full human-system metrics data
frame. See ``filter_metrics()`` for the description of the default
columns included in the shortened data frame.
Parameters
----------
df : pandas DataFrame
Input data frame
compute_shortened : bool, optional
Also compute a shortened version of the full
metrics data frame.
Defaults to ``False``.
use_scaled_predictions : bool, optional
Use evaluations based on scaled predictions in
the shortened version of the metrics data frame.
Defaults to ``False``.
include_second_score : bool, optional
Second human score available.
Defaults to ``False``.
population_sd_dict : dict, optional
Dictionary containing population standard deviation for each column containing
human or system scores. This is used to compute SMD for subgroups.
Defaults to ``None``.
population_mn_dict : dict, optional
Dictionary containing population mean for each column containing
human or system scores. This is used to compute SMD for subgroups.
Defaults to ``None``.
smd_method : {"williamson", "johnson", pooled", "unpooled"}, optional
The SMD method to use, only used if ``use_diff_std_means`` is
``False``. All methods have the same numerator
mean(`y_pred`) - mean(`y_true_observed`) and the following
denominators:
- "williamson": pooled population standard deviation of human and
system scores computed based on values in ``population_sd_dict``.
- "johnson": population standard deviation of human scores computed
based on values in ``population_sd_dict``.
- "pooled": pooled standard deviation of `y_true_observed` and
`y_pred` for this group.
- "unpooled": standard deviation of `y_true_observed` for this
group.
Defaults to "unpooled".
use_diff_std_means : bool, optional
Whether to use the difference of standardized means, rather than the standardized mean
difference. This is most useful with subgroup analysis.
Defaults to ``False``.
Returns
-------
df_human_system_eval : pandas DataFrame
Data frame containing the full set of evaluation
metrics.
df_human_system_eval_filtered : pandas DataFrame
Data frame containing the human-human statistics
but is empty if ``include_second_score`` is ``False``.
df_human_human_eval : pandas DataFrame
A shortened version of the first data frame but
is empty if ``compute_shortened`` is ``False``.
"""
# shorter variable name is easier to work with
use_scaled = use_scaled_predictions
# are we using DSM or SMD?
smd_name = 'DSM' if use_diff_std_means else 'SMD'
# get the population standard deviations for SMD if none were supplied
if not population_sd_dict:
population_sd_dict = {col: None for col in df.columns}
# get the population standard deviations for SMD if none were supplied
if not population_mn_dict:
population_mn_dict = {col: None for col in df.columns}
# if the second human score column is available, the values are
# probably not available for all of the responses in the test
# set and so we want to exclude 'sc2' from human-system metrics
# computation. In addition, we also want to compute the human-human
# metrics only on the data that is double scored.
df_human_human = pd.DataFrame()
if include_second_score:
df_single = df.drop('sc2', axis=1)
df_human_system = df_single.apply(lambda s:
self.metrics_helper(df_single['sc1'],
s,
population_sd_dict['sc1'],
population_sd_dict[s.name],
population_mn_dict['sc1'],
population_mn_dict[s.name],
smd_method,
use_diff_std_means))
df_double = df[df['sc2'].notnull()][['sc1', 'sc2']]
df_human_human = df_double.apply(lambda s:
self.metrics_helper(df_double['sc1'],
s,
population_sd_dict['sc1'],
population_sd_dict[s.name],
population_mn_dict['sc1'],
population_mn_dict[s.name],
'pooled',
use_diff_std_means))
# drop the sc1 column from the human-human agreement frame
df_human_human = df_human_human.drop('sc1', axis=1)
# sort the rows in the correct order
df_human_human = df_human_human.reindex(['N', 'h_mean', 'h_sd',
'h_min', 'h_max',
'sys_mean', 'sys_sd',
'sys_min', 'sys_max',
'corr', 'wtkappa', 'R2',
'kappa', 'exact_agr',
'adj_agr', smd_name, 'RMSE'])
# rename `h_*` -> `h1_*` and `sys_*` -> `h2_*`
df_human_human.rename(lambda c: c.replace('h_', 'h1_').replace('sys_', 'h2_'),
inplace=True)
# drop RMSE and R2 because they are not meaningful for human raters
df_human_human.drop(['R2', 'RMSE'], inplace=True)
df_human_human = df_human_human.transpose()
# convert N to integer if it's not empty else set to 0
try:
df_human_human['N'] = df_human_human['N'].astype(int)
except ValueError:
df_human_human['N'] = 0
df_human_human.index = ['']
else:
df_human_system = df.apply(lambda s: self.metrics_helper(df['sc1'],
s,
population_sd_dict['sc1'],
population_sd_dict[s.name],
population_mn_dict['sc1'],
population_mn_dict[s.name],
smd_method,
use_diff_std_means))
# drop 'sc1' column from the human-system frame and transpose
df_human_system = df_human_system.drop('sc1', axis=1)
df_human_system = df_human_system.transpose()
# sort the columns and rows in the correct order
df_human_system = df_human_system[['N',
'h_mean', 'h_sd',
'h_min', 'h_max',
'sys_mean', 'sys_sd',
'sys_min', 'sys_max',
'corr',
'wtkappa', 'R2', 'kappa',
'exact_agr', 'adj_agr',
smd_name, 'RMSE']]
# make N column an integer if it's not NaN else set it to 0
df_human_system['N'] = df_human_system['N'].astype(int)
all_rows_order = ['raw', 'raw_trim', 'raw_trim_round',
'scale', 'scale_trim', 'scale_trim_round']
existing_rows_index = [row for row in all_rows_order if row in df_human_system.index]
df_human_system = df_human_system.reindex(existing_rows_index)
# extract some default metrics for a shorter version of this data frame
# if we were asked to do so
if compute_shortened:
df_human_system_filtered = self.filter_metrics(df_human_system,
use_scaled_predictions=use_scaled)
else:
df_human_system_filtered = pd.DataFrame()
# return all data frames
return (df_human_system,
df_human_system_filtered,
df_human_human)
def compute_metrics_by_group(self,
df_test,
grouping_variable,
use_scaled_predictions=False,
include_second_score=False):
"""
Compute a subset of evaluation metrics by subgroups.
This method computes a subset of evalution metrics for the scores
in the given data frame by group specified in ``grouping_variable``.
See ``filter_metrics()`` above for a description of the subset
that is selected.
Parameters
----------
df_test : pandas DataFrame
Input data frame.
grouping_variable : str
Feature name indicating the column that
contains grouping information.
use_scaled_predictions : bool, optional
Include scaled predictions when computing
the evaluation metrics.
Defaults to ``False``.
include_second_score : bool, optional
Include human-human association statistics.
Defaults to ``False``.
Returns
-------
df_human_system_by_group : pandas DataFrame
Data frame containing the correlation
human-system association statistics.
df_human_human_by_group : pandas DataFrame
Data frame that either contains the human-human
statistics or is an empty data frame, depending
on whether ``include_second_score`` is `True``.
"""
# get the population standard deviation that we will need to compute SMD for all columns
# other than id and subgroup
population_sd_dict = {col: df_test[col].std(ddof=1)
for col in df_test.columns if col not in ['spkitemid',
grouping_variable]}
population_mn_dict = {col: df_test[col].mean()
for col in df_test.columns if col not in ['spkitemid',
grouping_variable]}
# check if any of the standard deviations is zero and
# tell user to expect to see many warnings.
zero_sd_scores = [score for (score, sd) in population_sd_dict.items() if
np.isclose(sd, 0, atol=1e-07)]
if len(zero_sd_scores) > 0:
warnings.warn("The standard deviation for {} scores "
"is zero (all values are the same). You "
"will see multiple warnings about DSM computation "
"since this metric is computed separately for "
"each subgroup.".format(', '.join(zero_sd_scores)))
# create a duplicate data frame to compute evaluations
# over the whole data, i.e., across groups
df_preds_all = df_test.copy()
df_preds_all[grouping_variable] = 'All data'
# combine the two data frames
df_preds_combined = pd.concat([df_test, df_preds_all], sort=True)
df_preds_combined.reset_index(drop=True, inplace=True)
# group by the grouping_variable columns
grouped = df_preds_combined.groupby(grouping_variable)
df_human_system_by_group = pd.DataFrame()
df_human_human_by_group = pd.DataFrame()
for group, df_group in grouped:
df_group = df_group.drop(grouping_variable, axis=1)
(df_human_system_metrics,
df_human_system_metrics_short,
df_human_human_metrics
) = self.compute_metrics(df_group,
compute_shortened=True,
use_scaled_predictions=use_scaled_predictions,
include_second_score=include_second_score,
population_sd_dict=population_sd_dict,
population_mn_dict=population_mn_dict,
use_diff_std_means=True)
# we need to convert the shortened data frame to a series here
df_human_system_by_group[group] = df_human_system_metrics_short.iloc[0]
# update the by group human-human metrics frame if
# we have the second score column available
if include_second_score:
df_human_human_metrics.index = [group]
df_human_human_by_group = df_human_human_by_group.append(df_human_human_metrics)
# transpose the by group human-system metrics frame
df_human_system_by_group = df_human_system_by_group.transpose()
return (df_human_system_by_group, df_human_human_by_group)
def compute_degradation_and_disattenuated_correlations(self,
df,
use_all_responses=True):
"""
Compute the degradation in performance when using system score.
This method computes the degradation in performance when using the
system to predict the score instead of a second human and also the
disattenuated correlations between human and system scores.
These are computed as the Pearson's correlation between the human score
and the system score divided by the square root of correlation between
two human raters.
For this, we can compute the system performance either only on the
double scored data or on the full dataset. Both options have their
pros and cons. The default is to use the full dataset. This function
also assumes that the `sc2` column exists in the given data frame,
in addition to `sc1` and the various types of predictions.
Parameters
----------
df : pandas DataFrame
Input data frame.
use_all_responses : bool, optional
Use the full data set instead of only using the double-scored subset.
Defaults to ``True``.
Returns
-------
df_degradation : pandas DataFrame
Data frame containing the degradation statistics.
df_correlations : pandas DataFrame
Data frame containing the human-system correlations, human-human
correlations and disattenuated correlation.
"""
if use_all_responses:
df_responses = df
else:
# use only double scored data
df_responses = df[df['sc2'].notnull()]
# compute the human-system and human-human metrics
(df_human_system_eval,
_,
df_human_human_eval) = self.compute_metrics(df_responses,
include_second_score=True)
# compute disattenuated correlations
df_correlations = self.compute_disattenuated_correlations(df_human_system_eval['corr'],
df_human_human_eval['corr'])
# Compute degradation. we only care about the degradation in these metrics
degradation_metrics = ['corr', 'kappa', 'wtkappa',
'exact_agr', 'adj_agr', 'SMD']
df_human_system_eval = df_human_system_eval[degradation_metrics]
df_human_human_eval = df_human_human_eval[degradation_metrics]
df_degradation = df_human_system_eval.apply(lambda row:
row - df_human_human_eval.loc[''], axis=1)
return (df_degradation, df_correlations)
def run_training_analyses(self,
data_container,
configuration):
"""
Run all analyses on the training data.
Parameters
----------
data_container : container.DataContainer
The DataContainer object. This container must include the following
DataFrames: {"train_features", "train_metadata",
"train_preprocessed_features", "train_length", "train_features"}.
configuration : configuration_parser.Configuration
The Configuration object. This configuration object must include the
following parameters (keys): {"length_column", "subgroups",
"selected_features"}.
Returns
-------
data_container : container.DataContainer
A new DataContainer object with the following DataFrames:
- feature_descriptives
- feature_descriptivesExtra
- feature_outliers
- cors_orig
- cors_processed
- margcor_score_all_data
- pcor_score_all_data
- pcor_score_no_length_all_data
- margcor_length_all_data
- pcor_length_all_data
- pca
- pcavar
- margcor_length_by_*
- pcor_length_by_*
- margcor_score_by_*
- pcor_score_by_*
- pcor_score_no_length_by_*
configuration : configuration_parser.Configuration
A new Configuration object.
"""
frame_names = ['train_features', 'train_metadata',
'train_preprocessed_features', 'train_length',
'train_features']
param_names = ['length_column', 'subgroups', 'selected_features']
self.check_frame_names(data_container, frame_names)
self.check_param_names(configuration, param_names)
# only use the features selected by the model but keep their order the same
# as in the original file as ordering may affect the sign in pca
df_train = data_container.train_features.copy()
df_train_length = data_container.train_length.copy()
df_train_metadata = data_container.train_metadata.copy()
df_train_preprocessed_features = data_container.train_preprocessed_features.copy()
subgroups = configuration['subgroups']
selected_features = configuration['selected_features']
df_train_preprocessed = pd.merge(df_train_preprocessed_features,
df_train_metadata, on='spkitemid')
assert (len(df_train_preprocessed.index) ==
len(df_train_preprocessed_features.index) ==
len(df_train_metadata.index))
# get descriptives, percentiles and outliers for the original feature values
df_descriptives = self.compute_basic_descriptives(df_train, selected_features)
df_percentiles = self.compute_percentiles(df_train, selected_features)
df_outliers = self.compute_outliers(df_train, selected_features)
# set a general boolean flag indicating if we should include length
include_length = not df_train_length.empty
# include length if available
if include_length:
columns = selected_features + ['sc1', 'length']
df_train_with_length = df_train.merge(df_train_length, on='spkitemid')
df_train_preprocess_length = df_train_preprocessed.merge(df_train_length,
on='spkitemid')
else:
columns = selected_features + ['sc1']
df_train_with_length = df_train
df_train_preprocess_length = df_train_preprocessed
# get pairwise correlations against the original training features
# as well as the pre-processed training features
df_pairwise_cors_orig = df_train_with_length[columns].corr(method='pearson')
df_pairwise_cors_preprocess = df_train_preprocess_length[columns].corr(method='pearson')
# get marginal and partial correlations against sc1 for all data
# for partial correlations, we partial out all other features
df_train_with_group_for_all = df_train_preprocess_length.copy()
df_train_with_group_for_all = df_train_with_group_for_all[columns]
df_train_with_group_for_all['all_data'] = 'All data'
(df_margcor_sc1,
df_pcor_sc1,
df_pcor_sc1_no_length) = self.correlation_helper(df_train_with_group_for_all,
'sc1',
'all_data',
include_length=include_length)
# get marginal and partial correlations against length for all data
# if the length column is available
df_margcor_length = pd.DataFrame()
df_pcor_length = pd.DataFrame()
if include_length:
df_train_with_group_for_all = df_train_preprocess_length.copy()
columns = selected_features + ['length']
df_train_with_group_for_all = df_train_with_group_for_all[columns]
df_train_with_group_for_all['all_data'] = 'All data'
(df_margcor_length,
df_pcor_length,
_) = self.correlation_helper(df_train_with_group_for_all,
'length',
'all_data')
# get marginal and partial correlations against sc1 by group (preprocessed features)
# also include partial correlations with length if length is available
score_corr_by_group_dict = {}
include_length = 'length' in df_train_preprocess_length
for grouping_variable in subgroups:
corr_by_group = self.compute_correlations_by_group(df_train_preprocess_length,
selected_features,
'sc1',
grouping_variable,
include_length=include_length)
score_corr_by_group_dict[grouping_variable] = corr_by_group
# get marginal and partial correlations against sc1 by group (preprocessed features)
length_corr_by_group_dict = {}
if include_length:
for grouping_variable in subgroups:
corr_by_group = self.compute_correlations_by_group(df_train_preprocess_length,
selected_features,
'length',
grouping_variable)
length_corr_by_group_dict[grouping_variable] = corr_by_group
# get PCA information
df_pca_components, df_pca_variance = self.compute_pca(df_train_preprocessed,
selected_features)
# Datasets to add
datasets = [{'name': 'feature_descriptives', 'frame': df_descriptives},
{'name': 'feature_descriptivesExtra', 'frame': df_percentiles},
{'name': 'feature_outliers', 'frame': df_outliers},
{'name': 'cors_orig', 'frame': df_pairwise_cors_orig},
{'name': 'cors_processed', 'frame': df_pairwise_cors_preprocess},
{'name': 'margcor_score_all_data', 'frame': df_margcor_sc1},
{'name': 'pcor_score_all_data', 'frame': df_pcor_sc1},
{'name': 'pcor_score_no_length_all_data', 'frame': df_pcor_sc1_no_length},
{'name': 'margcor_length_all_data', 'frame': df_margcor_length},
{'name': 'pcor_length_all_data', 'frame': df_pcor_length},
{'name': 'pca', 'frame': df_pca_components},
{'name': 'pcavar', 'frame': df_pca_variance}]
# Add length correlation by group datasets
for group in length_corr_by_group_dict:
(length_marg_cors,
length_part_cors,
_) = length_corr_by_group_dict.get(group,
(pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame()))
datasets.extend([{'name': 'margcor_length_by_{}'.format(group),
'frame': length_marg_cors},
{'name': 'pcor_length_by_{}'.format(group),
'frame': length_part_cors}])
# Add score correlations by group datasets
for group in score_corr_by_group_dict:
(sc1_marg_cors,
sc1_part_cors,
sc1_part_cors_no_length) = score_corr_by_group_dict[group]
datasets.extend([{'name': 'margcor_score_by_{}'.format(group),
'frame': sc1_marg_cors},
{'name': 'pcor_score_by_{}'.format(group),
'frame': sc1_part_cors},
{'name': 'pcor_score_no_length_by_{}'.format(group),
'frame': sc1_part_cors_no_length}])
return configuration, DataContainer(datasets=datasets)
def run_prediction_analyses(self,
data_container,
configuration):
"""
Run all analyses on the system scores (predictions).
Parameters
----------
data_container : container.DataContainer
The DataContainer object. This container must include the following
DataFrames: {"train_features", "train_metadata",
"train_preprocessed_features", "train_length",
"train_features"}.
configuration : configuration_parser.Configuration
The Configuration object. This configuration object must include the
following parameters (keys): {"subgroups", "second_human_score_column",
"use_scaled_predictions"}.
Returns
-------
data_container : container.DataContainer
A new DataContainer object with the following DataFrames:
- eval
- eval_short
- consistency
- degradation
- disattenudated_correlations
- confMatrix
- score_dist
- eval_by_*
- consistency_by_*
- disattenduated_correlations_by_*
- true_score_eval
configuration : configuration_parser.Configuration
A new Configuration object.
"""
frame_names = ['pred_test', 'test_metadata', 'test_human_scores']
param_names = ['subgroups',
'second_human_score_column',
'use_scaled_predictions']
self.check_frame_names(data_container, frame_names)
self.check_param_names(configuration, param_names)
df_test = data_container.pred_test.copy()
df_test_metadata = data_container.test_metadata.copy()
df_test_human_scores = data_container.test_human_scores.copy()
subgroups = configuration['subgroups']
use_scaled_predictions = configuration['use_scaled_predictions']
df_preds = | pd.merge(df_test, df_test_metadata, on='spkitemid') | pandas.merge |
# -*- coding: utf-8 -*-
import re
import os
import shutil
import pandas as pd
import zipfile
from Classifylib import Extraction
from chardet.universaldetector import UniversalDetector
from io import StringIO
import configparser
import tarfile
import subprocess
###############################################################################
class Initialize:
def __init__(self, basedir):
self.basedir = basedir
config = configparser.ConfigParser()
config.read('conf/config.ini')
self.edit = int(config['logbeats']['edit'])
self.dataPath = {}
if self.edit:
self.conf = config['logbeats']['conf']
self.input = config['logbeats']['input']
self.output = config['logbeats']['output']
self.project = config['logbeats']['project']
else:
self.conf = os.path.join(self.basedir, 'conf')
self.workspace = os.path.join(self.basedir, 'workspace')
self.input = os.path.join(self.basedir, 'workspace', 'input')
self.output = os.path.join(self.basedir, 'workspace', 'output')
self.project = os.path.join(self.basedir, 'workspace', 'project')
def run(self):
self.dataPath['conf'] = self.conf
self.dataPath['input'] = self.input
self.dataPath['output'] = self.output
self.dataPath['project'] = self.project
return self.dataPath
def dirCheck(self):
if not os.path.isdir(self.workspace):
try:
os.mkdir(self.workspace)
except FileExistsError:
pass
if not os.path.isdir(self.input):
try:
os.mkdir(self.input)
except FileExistsError:
pass
if not os.path.isdir(self.output):
try:
os.mkdir(self.output)
except FileExistsError:
pass
if not os.path.isdir(self.project):
try:
os.mkdir(self.project)
except FileExistsError:
pass
###############################################################################
def FindDataFiles(Path, ext):
filefind = DirFiles(Path, ext)
zipName = filefind.FileNames()
return zipName
###############################################################################
def get_encoding_type(pathname): # 인코딩을 확인한다.
detector = UniversalDetector()
detector.reset()
try:
with open(pathname, 'rb') as fp:
for line in fp:
detector.feed(line)
if detector.done:
break
except FileNotFoundError:
return str('FileNotFoundError')
detector.close()
return detector.result['encoding']
###############################################################################
def convert_encoding_type(pathname): # utf-8로 변환한다.
encode_type = get_encoding_type(pathname)
with open(pathname, 'r', encoding=encode_type) as fp:
content = fp.read()
with open(pathname, 'w', encoding='utf-8', newline='\n') as fp:
fp.write(content)
return content
###############################################################################
class DataToZipfile: # 패스워드로 압축된 파일의 내용을 읽어온다.
def __init__(self, zName, password=None):
self._pItem = False
self._zName = zName
self._password = password
self.NameDict = {}
if password is not None:
self._pItem = True
self._archive = zipfile.ZipFile(zName, 'r')
###############################################################################
def Get_InfoList(self): # 아직 필수적인 함수 아님
return self._archive.infolist()
###############################################################################
def Get_PrintDir(self): # 아직 필수적인 함수 아님
return self._archive.printdir() # 필요시 커스텀 작업 필요함
###############################################################################
def Get_InfoFile(self, fName):
return self._archive.getinfo(fName)
###############################################################################
def Get_FileList(self, reverse=True): # 확장자 제거 : True 앞, False 뒤
files = self._archive.namelist()
for file in files:
fileName = file.split('.')
if reverse:
self.NameDict[fileName[0]] = file
else:
self.NameDict[file] = fileName[0]
return self.NameDict
###############################################################################
def FileToPd(self, fName): # 인코딩 UTF-8로 pandas형식으로 불러온다.
if self._pItem:
self._archive.setpassword(self._password.encode())
try:
data = self._archive.read(fName)
data = str(data, 'utf-8')
data = StringIO(data)
except IOError:
return str('암호가 다름 & 다른문제???')
return pd.read_csv(data)
###############################################################################
class strToInt:
def __init__(self):
self.lDiv = {'d': 'Dir', '-': 'File', 's': 'Socket', 'b': 'Block',
'l': 'Link', 'c': 'IO'}
self.lChmod = {'r': 4, 'w': 2, 'x': 1, '-': 0, 's': 1, 't': 1}
self.lClass = {'r': 'Read', 'w': 'Write', 'x': 'Execute', '-': 'None',
's': 'Set', 't': 'Bit', 'S': 'NoBit', 'T': 'NoBit'}
def div(self, data):
if data is None:
raise Exception("Need to data")
return self.lDiv[data[0:1]]
def special(self, data):
if data is None:
raise Exception("Need to data")
temp = []
temp.append(self.lClass[data[3]])
temp.append(self.lClass[data[6]])
temp.append(self.lClass[data[9]])
return temp
def permission(self, data):
if data is None:
raise Exception("Need to data")
temp = []
temp.append(self.chmodToInt(data[1:4]))
temp.append(self.chmodToInt(data[4:7]))
temp.append(self.chmodToInt(data[7:10]))
return int(''.join(temp))
def chmodToInt(self, data):
if data is None:
raise Exception("Need to data")
result = 0
for key in data:
result += self.lChmod[key.lower()] # 대소문자 구분 X
return str(result)
###############################################################################
class DirFiles: # 특정 디렉터리에서 파일 이름만 추출함
def __init__(self, path, ext=None):
if ext is None:
ext = 'zip'
self._ext = ext.lower()
self.path = path
self.NameDict = {}
###############################################################################
def FileNames(self, reverse=True): # 확장자 제거 : True 앞, False 뒤
for root, dirs, files in os.walk(self.path):
for file in files:
if file[-len(self._ext):].lower() == self._ext:
fileName = file[:file.rfind('.')]
if reverse:
self.NameDict[fileName] = file
else:
self.NameDict[file] = fileName
return self.NameDict
###############################################################################
def DirNames(self, reverse=True): # 특정 디렉터리에서 디렉터리 이름만 추출함
for root, dirs, files in os.walk(self.path):
for Dir in dirs:
DirPath = os.path.join(self.path, Dir)
if reverse:
self.NameDict[Dir] = DirPath
else:
self.NameDict[DirPath] = Dir
return self.NameDict
###############################################################################
class DataToProcess:
def __init__(self, location):
self.location = location
self.dPath = location['input']
self.dirFind = DirFiles(self.dPath)
###############################################################################
def DataMining(self, name):
if os.path.splitext(name)[1] == '.tar':
self.tarToZip(self.location['input']+'/'+name)
if os.path.isfile(self.location['input']+'/'+name):
os.remove(self.location['input']+'/'+name)
if os.path.splitext(name)[1] == '.zip':
self.read_input(self.location['input']+'/'+name,
self.location['input']+'/'+os.path.splitext(name)[0],
None)
name = os.path.splitext(name)[0]
csvfind = DirFiles(self.location['input']+'/'+name, '.csv')
csvName = csvfind.FileNames()
for cName in csvName:
self.dataArrange(self.location['input']+'/'+name, csvName[cName])
f = zipfile.ZipFile(self.location['input']+'/'+name+'.zip', 'w')
for file in os.listdir(self.location['input']+'/'+name):
f.write(self.location['input']+'/'+name+'/'+file,
file, compress_type=zipfile.ZIP_DEFLATED)
f.close()
shutil.rmtree(self.location['input']+'/'+name)
EaN = os.path.exists(self.location['project']+'/'+name+'.zip')
if EaN:
os.remove(self.location['project']+'/'+name+'.zip')
shutil.move(self.location['input']+'/'+name+'.zip',
self.location['project'])
else:
shutil.move(self.location['input']+'/'+name+'.zip',
self.location['project'])
###############################################################################
def DataExtract(self, pw=None):
zipName = self.dirFind.FileNames()
for name in zipName: # 특정폴더에서 압축파일을 찾고 해제한다.
self.read_input(self.dPath+zipName[name], self.dPath+name, pw)
###############################################################################
def DataConvert(self):
dirName = self.dirFind.DirNames()
for dName in dirName: # 특정폴더에서 압축파일을 찾고 해제한다.
csvfind = DirFiles(dirName[dName], '.csv')
csvName = csvfind.FileNames()
for cName in csvName:
self.dataArrange(dirName[dName], csvName[cName])
###############################################################################
def DataUnite(self):
dirName = self.dirFind.DirNames()
for name in dirName:
f = zipfile.ZipFile(dirName[name]+'.zip', 'w')
for file in os.listdir(dirName[name]):
f.write(dirName[name]+'/'+file, file,
compress_type=zipfile.ZIP_DEFLATED)
f.close()
###############################################################################
def DelandMove(self, ProjectPath):
dirName = self.dirFind.DirNames()
for name in dirName:
shutil.rmtree(dirName[name])
moveName = self.dirFind.FileNames()
for name in moveName:
EaN = os.path.exists(ProjectPath+moveName[name])
if EaN:
os.remove(ProjectPath+moveName[name])
shutil.move(self.dPath+moveName[name], ProjectPath)
else:
shutil.move(self.dPath+moveName[name], ProjectPath)
###############################################################################
def read_input(self, zDataName, unzipName, password):
zf = zipfile.ZipFile(zDataName)
if password is not None:
zf.setpassword(password.encode())
try:
for name in zf.namelist():
zf.extract(name, unzipName)
zf.close()
except Exception:
pass
###############################################################################
def tarToZip(self, fname):
if tarfile.is_tarfile(fname):
dpath = os.path.split(fname)[0]
dname = os.path.split(fname)[1].split('.tar')
zipdir = os.path.join(dpath, dname[0])
if not os.path.isdir(zipdir):
try:
os.mkdir(zipdir)
except FileExistsError:
pass
tar = tarfile.open(fname)
# 전체 압축 해제
tar.extractall(zipdir)
filelist = tar.getnames()
tar.close()
basedir = os.path.abspath(os.path.dirname(__file__))
cli = os.path.join(basedir, 'conf', 'cli', 'Linux', 'Linux.cli')
df = self.ResultDATA(zipdir, cli)
df.to_csv(os.path.join(zipdir, 'ResultData.csv'), mode='w',
index=False)
for dName in filelist:
if dName.split('.')[1] == 'csv':
df = self.EtcToFile(os.path.join(zipdir, dName))
df.to_csv(os.path.join(zipdir, dName), mode='w',
index=False)
self.PdFileDir(os.path.join(zipdir, dName))
else:
pass
###############################################################################
def ResultDATA(self, dirname, filename):
encode_type = get_encoding_type(filename)
ProcessState = os.path.join(dirname, 'ProcessState.csv')
ExtractResult = os.path.join(dirname, 'ExtractResult.txt')
DataResult = []
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in contents:
DataLine = line.split(',')
Data = DataLine[0]
DataLine[1] = DataLine[1].replace('ProcessState.csv',
ProcessState)
DataLine[1] = DataLine[1].replace('ExtractResult.txt',
ExtractResult)
sysMsg = subprocess.getstatusoutput(DataLine[1])
if sysMsg[1]: # 데이터 있음
Resul = Data, sysMsg[1].strip()
DataResult.append(Resul)
else: # 데이터 없음
Resul = Data, 'NoData'
DataResult.append(Resul)
return pd.DataFrame(DataResult, columns=('Name', 'Data'))
###############################################################################
def dataArrange(self, PathDir, PathFile): # 선택된 파일 추출
PathDirFile = os.path.join(PathDir, PathFile)
try:
if PathFile == 'SecurityPolicy.csv':
df = self.SeceditFile(PathDirFile)
df.to_csv(PathDirFile, mode='w', index=False)
elif PathFile == 'RegistryData.csv':
df = self.RegistryFile(PathDirFile)
df.to_csv(PathDirFile, mode='w', index=False)
elif PathFile == 'UserResult.csv':
df = self.UserResult(PathDirFile)
df.to_csv(PathDirFile, mode='w', index=False)
else:
self.PdFileDir(PathDirFile)
except Exception:
self.PdFileDir(PathDirFile)
###############################################################################
def EtcToFile(self, file):
filename = os.path.split(os.path.splitext(file)[0])
encode_type = get_encoding_type(file)
try:
ToData = []
pattern = re.compile(r'\s+')
with open(file, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in contents:
if filename[1] == 'passwd' or filename[1] == 'shadow':
line = re.sub(pattern, '', line) # 공백 제거
splitLine = line.split(':')
elif filename[1] == 'Permission':
if not line.startswith('ls:'):
splitLine = line.split()
psn = strToInt().permission(splitLine[0])
splitLine.append(int(psn))
elif (filename[1] == 'WorldWritable'
or filename[1] == 'StickBit'
or filename[1] == 'RootOrUser'):
splitLine = line.split()
elif filename[1] == 'ProcessState':
splitLine = line.split(',')
ToData.append(splitLine)
except FileNotFoundError:
return str('FileNotFoundError')
if filename[1] == 'passwd':
return pd.DataFrame(ToData, columns=('Name', 'Pwd', 'Uid', 'Gid',
'Info', 'Home', 'Login'))
elif filename[1] == 'shadow':
return pd.DataFrame(ToData, columns=('Name', 'Pwd', 'Last', 'Min',
'Max', 'Expiration',
'Destruction', 'Expiration',
'reserved'))
elif filename[1] == 'Permission':
return pd.DataFrame(ToData, columns=('Permission', 'Link', 'Owner',
'Group', 'Size', 'Month',
'Day', 'Year', 'Name',
'Data'))
elif filename[1] == 'StickBit' or filename[1] == 'RootOrUser':
return pd.DataFrame(ToData, columns=('Permission', 'Link', 'Owner',
'Group', 'Size', 'Month',
'Day', 'Year', 'Name'))
elif filename[1] == 'WorldWritable':
return pd.DataFrame(ToData, columns=('Inode', 'BlockCount',
'Permission', 'Link', 'Owner',
'Group', 'Size', 'Month',
'Day', 'Year', 'Name'))
elif filename[1] == 'ProcessState':
return pd.DataFrame(ToData, columns=('Name', 'Started',
'Cmd')).drop([0])
else:
return pd.DataFrame(ToData)
###############################################################################
def SeceditFile(self, filename): # SeceditFile를 pandas형식으로 불러온다.
encode_type = get_encoding_type(filename)
try:
ToData = []
pattern = re.compile(r'\s+')
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in contents:
line = line.replace('"', '')
line = line.replace(',', '')
line = re.sub(pattern, '', line) # 공백 제거
splitLine = line.split('=')
ToData.append(splitLine)
except FileNotFoundError:
return str('FileNotFoundError')
return pd.DataFrame(ToData, columns=('Name', 'Data')).dropna()
###############################################################################
def RegistryFile(self, filename):
encode_type = get_encoding_type(filename)
try:
ToData = []
pattern = re.compile(r'\s+')
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in contents:
line = re.sub(pattern, '', line) # 공백 제거
line = line.replace('0x', '')
line = line.replace('REG_SZ', '=')
line = line.replace('REG_DWORD', '=')
line = line.replace('REG_BINARY', '=')
splitLine = line.split('=')
ToData.append(splitLine)
except FileNotFoundError:
return str('FileNotFoundError')
return pd.DataFrame(ToData, columns=('Name', 'Data'))
###############################################################################
def EtcInfoFile(self, filename): # EtcInfoFile를 pandas형식으로 불러온다.
encode_type = get_encoding_type(filename)
try:
ToData = []
pattern = re.compile(r'\s+')
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in contents:
line = re.sub(pattern, '', line) # 공백 제거
splitLine = line.split(':')
ToData.append(splitLine)
except FileNotFoundError:
return str('FileNotFoundError')
return pd.DataFrame(ToData, columns=('Name', 'Data'))
###############################################################################
def UserResult(self, filename): # NetStatFile pandas형식으로 불러온다.
NameInfo = []
UserInfo = {'User name': [],
'Account active': [],
'Account expires': [],
'Last logon': [],
'Password expires': [],
'Password changeable': [],
'Password required': [],
'Password last set': [],
'Local Group Memberships': []}
encode_type = get_encoding_type(filename)
try:
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip() for x in contents]
for line in UserInfo.keys():
NameInfo.append(line)
for line in contents:
line = line.rstrip()
if line.startswith(NameInfo[0]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[0]].append(result)
if line.startswith(NameInfo[1]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[1]].append(result)
if line.startswith(NameInfo[2]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[2]].append(result)
if line.startswith(NameInfo[3]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[3]].append(result)
if line.startswith(NameInfo[4]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[4]].append(result)
if line.startswith(NameInfo[5]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[5]].append(result)
if line.startswith(NameInfo[6]):
Product = line.split()
result = ' '.join(Product[2:])
UserInfo[NameInfo[6]].append(result)
if line.startswith(NameInfo[7]):
Product = line.split()
result = ' '.join(Product[3:])
UserInfo[NameInfo[7]].append(result)
if line.startswith(NameInfo[8]):
Product = line.split()
result = ' '.join(Product[3:])
UserInfo[NameInfo[8]].append(result)
except FileNotFoundError:
return str('FileNotFoundError')
return pd.DataFrame(UserInfo)
###############################################################################
def PdFileDir(self, filename): # 인코딩 UTF-8로 pandas형식으로 불러온다.
encode_type = get_encoding_type(filename)
try:
with open(filename, 'r', encoding=encode_type) as file:
contents = file.readlines()
contents = [x.strip(', ') for x in contents]
ToData = []
for line in contents:
text = re.sub(', ', ' ', line)
ToData.append(text)
with open(filename, 'w', encoding='utf-8', newline='\n') as file:
for line in ToData:
file.write(line)
except FileNotFoundError:
return str('FileNotFoundError')
return filename
###############################################################################
def NetStatFile(self, filename): # NetStatFile pandas형식으로 불러온다.
encode_type = get_encoding_type(filename)
try:
ToData = []
with open(filename, 'r', encoding=encode_type) as file:
for i, line in enumerate(file):
if i >= 4:
splitLine = line.split()
ToData.append(splitLine)
except FileNotFoundError:
return str('FileNotFoundError')
return pd.DataFrame(ToData, columns=('Proto', 'LocalAddress',
'ForeignAddress',
'State', 'PID'))
###############################################################################
class DataUnpack:
def __init__(self, ProjectPath):
self.ProjectPath = ProjectPath
self.zipName = FindDataFiles(self.ProjectPath, 'zip')
###############################################################################
def UnpackExtract(self, zID):
archive = DataToZipfile(self.ProjectPath+'/'+self.zipName.get(zID))
csvName = archive.Get_FileList()
try:
RegistryData = archive.FileToPd(
csvName['RegistryData']).dropna(axis=1, how='all')
RegistryData = Extraction('RegistryData').Analysis(RegistryData)
except Exception:
ToData = {'Name': ['RegistryData'], 'Data': [str(0)]}
RegistryData = pd.DataFrame(ToData)
try:
result = archive.FileToPd(
csvName['ExtractResult']).dropna(axis=1, how='all')
result = Extraction(zID).Analysis(result)
result = result.replace(' ', '', regex=True)
except Exception:
ToData = {'Name': ['result'], 'Data': [str(0)]}
result = | pd.DataFrame(ToData) | pandas.DataFrame |
import json
from logging import getLogger
import numpy as np
import pandas as pd
import pytest
from whylogs.app.config import load_config
from whylogs.app.session import session_from_config
from whylogs.core.statistics.constraints import (
MAX_SET_DISPLAY_MESSAGE_LENGTH,
DatasetConstraints,
MultiColumnValueConstraint,
MultiColumnValueConstraints,
Op,
SummaryConstraint,
SummaryConstraints,
ValueConstraint,
ValueConstraints,
_matches_json_schema,
_summary_funcs1,
_value_funcs,
approximateEntropyBetweenConstraint,
columnChiSquaredTestPValueGreaterThanConstraint,
columnExistsConstraint,
columnKLDivergenceLessThanConstraint,
columnMostCommonValueInSetConstraint,
columnPairValuesInSetConstraint,
columnsMatchSetConstraint,
columnUniqueValueCountBetweenConstraint,
columnUniqueValueProportionBetweenConstraint,
columnValuesAGreaterThanBConstraint,
columnValuesInSetConstraint,
columnValuesNotNullConstraint,
columnValuesTypeEqualsConstraint,
columnValuesTypeInSetConstraint,
columnValuesUniqueWithinRow,
containsCreditCardConstraint,
containsEmailConstraint,
containsSSNConstraint,
containsURLConstraint,
dateUtilParseableConstraint,
distinctValuesContainSetConstraint,
distinctValuesEqualSetConstraint,
distinctValuesInSetConstraint,
jsonParseableConstraint,
matchesJsonSchemaConstraint,
maxBetweenConstraint,
meanBetweenConstraint,
minBetweenConstraint,
missingValuesProportionBetweenConstraint,
numberOfRowsConstraint,
parametrizedKSTestPValueGreaterThanConstraint,
quantileBetweenConstraint,
stddevBetweenConstraint,
strftimeFormatConstraint,
stringLengthBetweenConstraint,
stringLengthEqualConstraint,
sumOfRowValuesOfMultipleColumnsEqualsConstraint,
)
from whylogs.proto import InferredType, Op
from whylogs.util.protobuf import message_to_json
TEST_LOGGER = getLogger(__name__)
def test_value_summary_serialization():
for each_op, _ in _value_funcs.items():
if each_op == Op.APPLY_FUNC:
continue
if each_op == Op.IN:
value = ValueConstraint(each_op, {3.6})
else:
value = ValueConstraint(each_op, 3.6)
msg_value = value.to_protobuf()
json_value = json.loads(message_to_json(msg_value))
if each_op == Op.IN:
assert json_value["name"] == "value " + Op.Name(each_op) + " {3.6}"
assert json_value["valueSet"][0] == [3.6]
else:
assert json_value["name"] == "value " + Op.Name(each_op) + " 3.6"
assert pytest.approx(json_value["value"], 0.001) == 3.6
assert json_value["op"] == Op.Name(each_op)
assert json_value["verbose"] is False
for each_op, _ in _summary_funcs1.items():
if each_op in (Op.BTWN, Op.IN_SET, Op.CONTAIN_SET, Op.EQ_SET, Op.CONTAIN, Op.IN):
continue
# constraints may have an optional name
sum_constraint = SummaryConstraint("min", each_op, 300000, name="< 30K")
msg_sum_const = sum_constraint.to_protobuf()
json_summary = json.loads(message_to_json(msg_sum_const))
assert json_summary["name"] == "< 30K"
assert pytest.approx(json_summary["value"], 0.1) == 300000
assert json_summary["firstField"] == "min"
assert json_summary["op"] == str(Op.Name(each_op))
assert json_summary["verbose"] is False
def test_value_constraints(df_lending_club, local_config_path):
conforming_loan = ValueConstraint(Op.LT, 548250)
smallest_loan = ValueConstraint(Op.GT, 2500.0, verbose=True)
high_fico = ValueConstraint(Op.GT, 4000)
dc = DatasetConstraints(None, value_constraints={"loan_amnt": [conforming_loan, smallest_loan], "fico_range_high": [high_fico]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = dc.report()
assert len(report) == 2
# make sure it checked every value
for each_feat in report:
for each_constraint in each_feat[1]:
assert each_constraint[1] == 50
assert report[1][1][0][2] == 50
def test_value_constraints_pattern_match(df_lending_club, local_config_path):
regex_state_abbreviation = r"^[a-zA-Z]{2}$"
contains_state = ValueConstraint(Op.MATCH, regex_pattern=regex_state_abbreviation)
regex_date = r"^[a-zA-Z]{3}-[0-9]{4}$"
not_contains_date = ValueConstraint(Op.NOMATCH, regex_pattern=regex_date)
# just to test applying regex patterns on non-string values
contains_state_loan_amnt = ValueConstraint(Op.MATCH, regex_pattern=regex_state_abbreviation)
dc = DatasetConstraints(
None, value_constraints={"addr_state": [contains_state], "earliest_cr_line": [not_contains_date], "loan_amnt": [contains_state_loan_amnt]}
)
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = dc.report()
# checks there are constraints for 3 features
assert len(report) == 3
# make sure it checked every value
for each_feat in report:
for each_constraint in each_feat[1]:
assert each_constraint[1] == 50
# Every row should match a state abbreviation
assert report[0][1][0][2] == 0
# At least 1 should be a match w/ the given pattern (# of failures of NOMATCH = # Matches)
assert report[1][1][0][2] > 0
# Every row should be a failure, because "loan_amnt" is not a string type
assert report[2][1][0][2] == 50
def test_summary_constraints(df_lending_club, local_config_path):
non_negative = SummaryConstraint("min", Op.GE, 0)
dc = DatasetConstraints(None, summary_constraints={"annual_inc": [non_negative]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = r = profile.apply_summary_constraints()
assert len(report) == 1
# make sure it checked every value
for each_feat in report:
for each_constraint in each_feat[1]:
assert each_constraint[1] == 1
def test_value_constraints_no_merge_different_names():
constraint1 = ValueConstraint(Op.LT, 1, name="c1")
constraint2 = ValueConstraint(Op.LT, 1, name="c2")
with pytest.raises(AssertionError):
constraint1.merge(constraint2)
def test_value_constraints_no_merge_different_values():
constraint1 = ValueConstraint(Op.LT, 1)
constraint2 = ValueConstraint(Op.LT, 2)
with pytest.raises(AssertionError):
constraint1.merge(constraint2)
def test_summary_constraints_no_merge_different_names():
constraint1 = SummaryConstraint("min", Op.GE, 0, name="non-negative")
constraint2 = SummaryConstraint("min", Op.GE, 0, name="positive-number")
with pytest.raises(AssertionError):
constraint1.merge(constraint2)
def test_summary_constraints_no_merge_different_values():
constraint1 = SummaryConstraint("min", Op.GE, 1, name="GreaterThanThreshold")
constraint2 = SummaryConstraint("min", Op.GE, 2, name="GreaterThanThreshold")
with pytest.raises(AssertionError):
constraint1.merge(constraint2)
def test_value_constraints_merge():
constraint1 = ValueConstraint(Op.LT, 1)
constraint2 = ValueConstraint(Op.LT, 1)
merged = constraint1.merge(constraint2)
assert merged.report() == ("value LT 1", 0, 0), "merging unlogged constraints should not change them from initiat state"
def test_value_constraints_merge_empty():
constraint1 = ValueConstraint(Op.LT, 1)
constraint2 = None
merged = constraint1.merge(constraint2)
assert merged == constraint1, "merging empty constraints should preserve left hand side"
def test_value_constraints_with_zero_as_value():
c1 = ValueConstraint(Op.LT, 0)
json_value = json.loads(message_to_json(c1.to_protobuf()))
assert json_value["name"] == f"value {Op.Name(Op.LT)} 0"
assert pytest.approx(json_value["value"], 0.01) == 0.0
assert json_value["op"] == Op.Name(Op.LT)
assert json_value["verbose"] is False
def test_value_constraints_raw_and_coerced_types_serialize_deserialize():
pattern = r"\S+@\S+"
c1 = ValueConstraint(Op.GE, 0)
c2 = ValueConstraint(Op.MATCH, regex_pattern=pattern)
constraints = ValueConstraints([c1, c2])
constraints.update("abc")
constraints.update_typed(1)
constraints.from_protobuf(constraints.to_protobuf())
msg_const = constraints.to_protobuf()
json_val = json.loads(message_to_json(msg_const))
first_val_constraint = json_val["constraints"][0]
second_val_constraint = json_val["constraints"][1]
assert first_val_constraint["name"] == f"value {Op.Name(Op.MATCH)} {pattern}"
assert first_val_constraint["op"] == Op.Name(Op.MATCH)
assert first_val_constraint["regexPattern"] == pattern
assert first_val_constraint["verbose"] is False
assert second_val_constraint["name"] == f"value {Op.Name(Op.GE)} 0"
assert second_val_constraint["op"] == Op.Name(Op.GE)
assert pytest.approx(second_val_constraint["value"], 0.01) == 0
assert second_val_constraint["verbose"] is False
def test_value_constraints_raw_and_coerced_types_merge():
pattern = r"\S+@\S+"
c1 = ValueConstraint(Op.GE, 0)
c2 = ValueConstraint(Op.MATCH, regex_pattern=pattern)
constraints = ValueConstraints([c1, c2])
c3 = ValueConstraint(Op.GE, 0)
c4 = ValueConstraint(Op.MATCH, regex_pattern=pattern)
constraints2 = ValueConstraints([c3, c4])
merged = constraints.merge(constraints2)
json_val = json.loads(message_to_json(merged.to_protobuf()))
first_val_constraint = json_val["constraints"][0]
second_val_constraint = json_val["constraints"][1]
assert first_val_constraint["name"] == f"value {Op.Name(Op.MATCH)} {pattern}"
assert first_val_constraint["op"] == Op.Name(Op.MATCH)
assert first_val_constraint["regexPattern"] == pattern
assert first_val_constraint["verbose"] is False
assert second_val_constraint["name"] == f"value {Op.Name(Op.GE)} 0"
assert second_val_constraint["op"] == Op.Name(Op.GE)
assert pytest.approx(second_val_constraint["value"], 0.01) == 0
assert second_val_constraint["verbose"] is False
def test_value_constraints_raw_and_coerced_types_report():
pattern = r"\S+@\S+"
c1 = ValueConstraint(Op.GE, 0)
c2 = ValueConstraint(Op.MATCH, regex_pattern=pattern)
constraints = ValueConstraints({c1.name: c1, c2.name: c2})
report = constraints.report()
assert report[0][0] == f"value {Op.Name(Op.MATCH)} {pattern}"
assert report[0][1] == 0
assert report[0][2] == 0
assert report[1][0] == f"value {Op.Name(Op.GE)} 0"
assert report[1][1] == 0
assert report[1][2] == 0
def test_summary_between_serialization_deserialization():
# constraints may have an optional name
sum_constraint = SummaryConstraint("min", Op.BTWN, 0.1, 2.4)
msg_sum_const = sum_constraint.to_protobuf()
json_summary = json.loads(message_to_json(msg_sum_const))
assert json_summary["name"] == "summary min BTWN 0.1 and 2.4"
assert pytest.approx(json_summary["between"]["lowerValue"], 0.1) == 0.1
assert pytest.approx(json_summary["between"]["upperValue"], 0.1) == 2.4
assert json_summary["firstField"] == "min"
assert json_summary["op"] == str(Op.Name(Op.BTWN))
assert json_summary["verbose"] == False
sum_deser_constraint = SummaryConstraint.from_protobuf(sum_constraint.to_protobuf())
json_deser_summary = json.loads(message_to_json(sum_deser_constraint.to_protobuf()))
assert json_summary["name"] == json_deser_summary["name"]
assert pytest.approx(json_summary["between"]["lowerValue"], 0.001) == pytest.approx(json_deser_summary["between"]["lowerValue"], 0.001)
assert pytest.approx(json_summary["between"]["upperValue"], 0.001) == pytest.approx(json_deser_summary["between"]["upperValue"], 0.001)
assert json_summary["firstField"] == json_deser_summary["firstField"]
assert json_summary["op"] == json_deser_summary["op"]
assert json_summary["verbose"] == json_deser_summary["verbose"]
def test_summary_between_constraint_incompatible_parameters():
with pytest.raises(TypeError):
SummaryConstraint("min", Op.BTWN, 0.1, "stddev")
with pytest.raises(ValueError):
SummaryConstraint("min", Op.BTWN, 0.1, second_field="stddev")
with pytest.raises(ValueError):
SummaryConstraint("min", Op.BTWN, 0.1, 2.4, "stddev")
with pytest.raises(ValueError):
SummaryConstraint("min", Op.BTWN, 0.1, 2.4, third_field="stddev")
with pytest.raises(TypeError):
SummaryConstraint("stddev", Op.BTWN, second_field=2, third_field="max")
with pytest.raises(TypeError):
SummaryConstraint("stddev", Op.BTWN, 2, "max")
def _apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, between_constraint):
min_gt_constraint = SummaryConstraint("min", Op.GT, value=100)
max_le_constraint = SummaryConstraint("max", Op.LE, value=5)
dc = DatasetConstraints(None, summary_constraints={"annual_inc": [between_constraint, max_le_constraint], "loan_amnt": [min_gt_constraint]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
assert len(report) == 2
# make sure it checked every value
for each_feat in report:
for each_constraint in each_feat[1]:
assert each_constraint[1] == 1
def test_summary_between_constraints_values(df_lending_club, local_config_path):
std_dev_between = SummaryConstraint("stddev", Op.BTWN, value=100, upper_value=200)
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, std_dev_between)
def test_summary_between_constraints_fields(df_lending_club, local_config_path):
std_dev_between = SummaryConstraint("stddev", Op.BTWN, second_field="mean", third_field="max")
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, std_dev_between)
def test_summary_between_constraints_no_merge_different_values_fields():
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200)
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, value=0.2, upper_value=200)
with pytest.raises(AssertionError):
std_dev_between1.merge(std_dev_between2)
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200)
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=300)
with pytest.raises(AssertionError):
std_dev_between1.merge(std_dev_between2)
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, second_field="min", third_field="max")
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, second_field="mean", third_field="max")
with pytest.raises(AssertionError):
std_dev_between1.merge(std_dev_between2)
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, second_field="min", third_field="mean")
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, second_field="min", third_field="max")
with pytest.raises(AssertionError):
std_dev_between1.merge(std_dev_between2)
def test_summary_between_constraints_no_merge_different_names():
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200, name="std dev between 1")
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200, name="std dev between 2")
with pytest.raises(AssertionError):
std_dev_between1.merge(std_dev_between2)
def test_summary_between_constraints_merge():
std_dev_between1 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200)
std_dev_between2 = SummaryConstraint("stddev", Op.BTWN, value=0.1, upper_value=200)
merged = std_dev_between1.merge(std_dev_between2)
pre_merge_json = json.loads(message_to_json(std_dev_between1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pytest.approx(pre_merge_json["between"]["lowerValue"], 0.001) == pytest.approx(merge_json["between"]["lowerValue"], 0.001)
assert pytest.approx(pre_merge_json["between"]["upperValue"], 0.001) == pytest.approx(merge_json["between"]["upperValue"], 0.001)
assert pre_merge_json["firstField"] == merge_json["firstField"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
def test_stddev_between_constraint_value(df_lending_club, local_config_path):
lower = 2.3
upper = 5.4
stddev_between_values = stddevBetweenConstraint(lower_value=lower, upper_value=upper)
# check if all constraints are applied
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_values)
def test_stddev_between_constraint_field(df_lending_club, local_config_path):
lower = "min"
upper = "max"
stddev_between_fields = stddevBetweenConstraint(lower_field=lower, upper_field=upper)
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_fields)
def test_stddev_between_constraint_invalid():
with pytest.raises(ValueError):
stddevBetweenConstraint(lower_value=2)
with pytest.raises(ValueError):
stddevBetweenConstraint(lower_field="min")
with pytest.raises(TypeError):
stddevBetweenConstraint(lower_value="2", upper_value=2)
with pytest.raises(TypeError):
stddevBetweenConstraint(lower_field="max", upper_field=2)
def test_mean_between_constraint_value(df_lending_club, local_config_path):
lower = 2.3
upper = 5.4
stddev_between_values = meanBetweenConstraint(lower_value=lower, upper_value=upper)
# check if all constraints are applied
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_values)
def test_mean_between_constraint_field(df_lending_club, local_config_path):
lower = "min"
upper = "max"
stddev_between_fields = meanBetweenConstraint(lower_field=lower, upper_field=upper)
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_fields)
def test_mean_between_constraint_invalid():
with pytest.raises(ValueError):
meanBetweenConstraint(lower_value=2)
with pytest.raises(ValueError):
meanBetweenConstraint(lower_field="min")
with pytest.raises(TypeError):
meanBetweenConstraint(lower_value="2", upper_value=2)
with pytest.raises(TypeError):
meanBetweenConstraint(lower_field="max", upper_field=2)
def test_min_between_constraint_value(df_lending_club, local_config_path):
lower = 2.3
upper = 5.4
stddev_between_values = minBetweenConstraint(lower_value=lower, upper_value=upper)
# check if all constraints are applied
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_values)
def test_min_between_constraint_field(df_lending_club, local_config_path):
lower = "stddev"
upper = "max"
stddev_between_fields = minBetweenConstraint(lower_field=lower, upper_field=upper)
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_fields)
def test_min_between_constraint_invalid():
with pytest.raises(ValueError):
minBetweenConstraint(lower_value=2)
with pytest.raises(ValueError):
minBetweenConstraint(lower_field="min")
with pytest.raises(TypeError):
minBetweenConstraint(lower_value="2", upper_value=2)
with pytest.raises(TypeError):
minBetweenConstraint(lower_field="max", upper_field=2)
def test_max_between_constraint_value(df_lending_club, local_config_path):
lower = 2.3
upper = 5.4
stddev_between_values = maxBetweenConstraint(lower_value=lower, upper_value=upper)
# check if all constraints are applied
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_values)
def test_max_between_constraint_field(df_lending_club, local_config_path):
lower = "stddev"
upper = "mean"
stddev_between_fields = maxBetweenConstraint(lower_field=lower, upper_field=upper)
_apply_between_summary_constraint_on_dataset(df_lending_club, local_config_path, stddev_between_fields)
def test_max_between_constraint_invalid():
with pytest.raises(ValueError):
maxBetweenConstraint(lower_value=2)
with pytest.raises(ValueError):
maxBetweenConstraint(lower_field="min")
with pytest.raises(TypeError):
maxBetweenConstraint(lower_value="2", upper_value=2)
with pytest.raises(TypeError):
maxBetweenConstraint(lower_field="max", upper_field=2)
def _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints):
dc = DatasetConstraints(None, summary_constraints=summary_constraints)
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
return report
def test_set_summary_constraints(df_lending_club, local_config_path):
org_list = list(df_lending_club["annual_inc"])
org_list2 = list(df_lending_club["annual_inc"])
org_list2.extend([1, 4, 5555, "gfsdgs", 0.00333, 245.32])
in_set = distinctValuesInSetConstraint(reference_set=org_list2, name="True")
in_set2 = distinctValuesInSetConstraint(reference_set=org_list, name="True2")
in_set3 = distinctValuesInSetConstraint(reference_set=org_list[:-1], name="False")
eq_set = distinctValuesEqualSetConstraint(reference_set=org_list, name="True3")
eq_set2 = distinctValuesEqualSetConstraint(reference_set=org_list2, name="False2")
eq_set3 = distinctValuesEqualSetConstraint(reference_set=org_list[:-1], name="False3")
contains_set = distinctValuesContainSetConstraint(reference_set=[org_list[2]], name="True4")
contains_set2 = distinctValuesContainSetConstraint(reference_set=org_list, name="True5")
contains_set3 = distinctValuesContainSetConstraint(reference_set=org_list[:-1], name="True6")
contains_set4 = distinctValuesContainSetConstraint(reference_set=[str(org_list[2])], name="False4")
contains_set5 = distinctValuesContainSetConstraint(reference_set=[2.3456], name="False5")
contains_set6 = distinctValuesContainSetConstraint(reference_set=org_list2, name="False6")
list(df_lending_club["annual_inc"])
constraints = [in_set, in_set2, in_set3, eq_set, eq_set2, eq_set3, contains_set, contains_set2, contains_set3, contains_set4, contains_set5, contains_set6]
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, {"annual_inc": constraints})
for r in report[0][1]:
if "True" in r[0]:
assert r[2] == 0
else:
assert r[2] == 1
def test_set_summary_constraint_invalid_init():
with pytest.raises(TypeError):
SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=1)
with pytest.raises(ValueError):
SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, 1)
with pytest.raises(ValueError):
SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, second_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, third_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, upper_value=2)
def test_set_summary_no_merge_different_set():
set_c_1 = SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=[1, 2, 3])
set_c_2 = SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=[2, 3, 4, 5])
with pytest.raises(AssertionError):
set_c_1.merge(set_c_2)
def test_set_summary_merge():
set_c_1 = SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=[1, 2, 3])
set_c_2 = SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=[1, 2, 3])
merged = set_c_1.merge(set_c_2)
pre_merge_json = json.loads(message_to_json(set_c_1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pre_merge_json["referenceSet"] == merge_json["referenceSet"]
assert pre_merge_json["firstField"] == merge_json["firstField"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
def test_set_summary_serialization():
set1 = SummaryConstraint("distinct_column_values", Op.CONTAIN_SET, reference_set=[1, 2, 3])
set2 = SummaryConstraint.from_protobuf(set1.to_protobuf())
set1_json = json.loads(message_to_json(set1.to_protobuf()))
set2_json = json.loads(message_to_json(set2.to_protobuf()))
assert set1_json["name"] == set2_json["name"]
assert set1_json["referenceSet"] == set2_json["referenceSet"]
assert set1_json["firstField"] == set2_json["firstField"]
assert set1_json["op"] == set2_json["op"]
assert set1_json["verbose"] == set2_json["verbose"]
def test_column_values_in_set_constraint(df_lending_club, local_config_path):
cvisc = columnValuesInSetConstraint(value_set={2, 5, 8, 90671227})
ltc = ValueConstraint(Op.LT, 1)
dc = DatasetConstraints(None, value_constraints={"id": [cvisc, ltc]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = dc.report()
# check if all of the rows have been reported
assert report[0][1][0][1] == len(df_lending_club)
# the number of fails should equal the number of rows - 1 since the column id only has the value 90671227 in set
assert report[0][1][0][2] == len(df_lending_club) - 1
def test_merge_values_in_set_constraint_different_value_set():
cvisc1 = columnValuesInSetConstraint(value_set={1, 2, 3})
cvisc2 = columnValuesInSetConstraint(value_set={3, 4, 5})
with pytest.raises(AssertionError):
cvisc1.merge(cvisc2)
def test_merge_values_in_set_constraint_same_value_set():
val_set = {"abc", "b", "c"}
cvisc1 = columnValuesInSetConstraint(value_set=val_set)
cvisc2 = columnValuesInSetConstraint(value_set=val_set)
merged = cvisc1.merge(cvisc2)
TEST_LOGGER.info(f"Serialize the merged columnValuesInSetConstraint:\n {merged.to_protobuf()}")
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == f"values are in {val_set}"
assert json_value["op"] == Op.Name(Op.IN)
assert json_value["valueSet"][0] == list(val_set)
def test_serialization_deserialization_values_in_set_constraint():
val_set = {"abc", 1, 2}
cvisc = columnValuesInSetConstraint(value_set=val_set)
cvisc.from_protobuf(cvisc.to_protobuf())
json_value = json.loads(message_to_json(cvisc.to_protobuf()))
TEST_LOGGER.info(f"Serialize columnValuesInSetConstraint from deserialized representation:\n {cvisc.to_protobuf()}")
assert json_value["name"] == f"values are in {val_set}"
assert json_value["op"] == Op.Name(Op.IN)
assert json_value["valueSet"][0] == list(val_set)
def test_column_values_in_set_wrong_datatype():
with pytest.raises(TypeError):
cvisc = columnValuesInSetConstraint(value_set=1)
def _report_email_value_constraint_on_data_set(local_config_path, pattern=None):
df = pd.DataFrame(
[
{"email": r"<EMAIL>"}, # valid
{"email": r'"aVrrR Test \@"<EMAIL>'}, # valid
{"email": r"abc..<EMAIL>"}, # invalid
{"email": r'"sdsss\d"@gmail.<EMAIL>'}, # valid
{"email": r"customer/[email protected]"}, # valid
{"email": r".<EMAIL>"}, # invalid
{"email": r"<EMAIL>"}, # invalid
{"email": r"abs@yahoo."}, # invalid
]
)
email_constraint = containsEmailConstraint(regex_pattern=pattern)
dc = DatasetConstraints(None, value_constraints={"email": [email_constraint]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
report = dc.report()
return report
def _apply_string_length_constraints(local_config_path, length_constraints):
df = pd.DataFrame(
[
{"str1": "length7"},
{"str1": "length_8"},
{"str1": "length__9"},
{"str1": "a 10"},
{"str1": "11 b"},
{"str1": '(*&^%^&*(24!@_+>:|}?><"\\'},
{"str1": "1b34567"},
]
)
dc = DatasetConstraints(None, value_constraints={"str1": length_constraints})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
report = dc.report()
return report
def test_string_length_constraints(local_config_path):
length_constraint7 = stringLengthEqualConstraint(length=7)
length_constraint24 = stringLengthEqualConstraint(length=24)
length_constraint7to10 = stringLengthBetweenConstraint(lower_value=7, upper_value=10)
length_constraints = [length_constraint7, length_constraint24, length_constraint7to10]
report = _apply_string_length_constraints(local_config_path, length_constraints)
# report[column_n][report_list][report][name total or failure]
assert report[0][1][0][1] == 7 and report[0][1][0][2] == 5 and report[0][1][0][0] == f"length of the string values is equal to 7"
assert report[0][1][1][1] == 7 and report[0][1][1][2] == 6 and report[0][1][1][0] == f"length of the string values is equal to 24"
assert report[0][1][2][1] == 7 and report[0][1][2][2] == 2 and report[0][1][2][0] == f"length of the string values is between 7 and 10"
def test_email_constraint(local_config_path):
report = _report_email_value_constraint_on_data_set(local_config_path)
assert report[0][1][0][1] == 8
assert report[0][1][0][2] == 4
def test_email_constraint_supply_regex_pattern(local_config_path):
report = _report_email_value_constraint_on_data_set(local_config_path, r"\S+@\S+")
assert report[0][1][0][0] == "column values match the email regex pattern"
assert report[0][1][0][1] == 8
assert report[0][1][0][2] == 1
def test_email_constraint_merge_valid():
ec1 = containsEmailConstraint(regex_pattern=r"\S+@\S+", verbose=True)
ec2 = containsEmailConstraint(regex_pattern=r"\S+@\S+")
merged = ec1.merge(ec2)
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == "column values match the email regex pattern"
assert json_value["op"] == Op.Name(Op.MATCH)
assert json_value["regexPattern"] == r"\S+@\S+"
assert json_value["verbose"] is True
def test_email_constraint_merge_invalid():
ec1 = containsEmailConstraint(regex_pattern=r"\S+@\S+", verbose=True)
ec2 = containsEmailConstraint(regex_pattern=r"\W+@\W+")
with pytest.raises(AssertionError):
ec1.merge(ec2)
def _report_credit_card_value_constraint_on_data_set(local_config_path, regex_pattern=None):
df = pd.DataFrame(
[
{"credit_card": "3714-496353-98431"}, # amex
{"credit_card": "3787 344936 71000"}, # amex
{"credit_card": "3056 930902 5904"}, # diners club
{"credit_card": "3065 133242 2899"}, # invalid
{"credit_card": "3852-000002-3237"}, # diners club
{"credit_card": "6011 1111 1111 1117"}, # discover
{"credit_card": "6011-0009-9013-9424"}, # discover
{"credit_card": "3530 1113 3330 0000"}, # jcb
{"credit_card": "3566-0020-2036-0505"}, # jcb
{"credit_card": "5555 5555 5555 4444"}, # master card
{"credit_card": "5105 1051 0510 5100"}, # master card
{"credit_card": "4111 1111 1111 1111"}, # visa
{"credit_card": "4012 8888 8888 1881"}, # visa
{"credit_card": "4222-2222-2222-2222"}, # visa
{"credit_card": "1111-1111-1111-1111"}, # invalid
{"credit_card": "a4111 1111 1111 1111b"}, # invalid
{"credit_card": "4111111111111111"}, # visa
{"credit_card": 12345}, # invalid
{"credit_card": "absfcvs"}, # invalid
]
)
credit_card_constraint = containsCreditCardConstraint(regex_pattern=regex_pattern)
dc = DatasetConstraints(None, value_constraints={"credit_card": [credit_card_constraint]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
return dc.report()
def test_credit_card_constraint(local_config_path):
report = _report_credit_card_value_constraint_on_data_set(local_config_path)
assert report[0][1][0][1] == 19
assert report[0][1][0][2] == 5
def test_credit_card_constraint_supply_regex_pattern(local_config_path):
report = _report_credit_card_value_constraint_on_data_set(local_config_path, r"^(?:[0-9]{4}[\s-]?){3,4}$")
assert report[0][1][0][0] == "column values match the credit card regex pattern"
assert report[0][1][0][1] == 19
assert report[0][1][0][2] == 8
def test_credit_card_constraint_merge_valid():
pattern = r"[0-9]{13,16}"
ccc1 = containsCreditCardConstraint(regex_pattern=pattern, verbose=True)
ccc2 = containsCreditCardConstraint(regex_pattern=pattern)
merged = ccc1.merge(ccc2)
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == "column values match the credit card regex pattern"
assert json_value["op"] == Op.Name(Op.MATCH)
assert json_value["regexPattern"] == pattern
assert json_value["verbose"] is True
def test_credit_card_constraint_merge_invalid():
ccc1 = containsCreditCardConstraint()
ccc2 = containsCreditCardConstraint(regex_pattern=r"[0-9]{13,16}", verbose=False)
with pytest.raises(AssertionError):
ccc1.merge(ccc2)
def test_credit_card_invalid_pattern():
with pytest.raises(TypeError):
containsCreditCardConstraint(123)
def _apply_apply_func_constraints(local_config_path, apply_func_constraints):
df = pd.DataFrame(
[
{"str1": "1990-12-1"}, # dateutil valid; strftime valid
{"str1": "1990/12/1"},
{"str1": "2005/3"},
{"str1": "2005.3.5"},
{"str1": "Jan 19, 1990"},
{"str1": "today is 2019-03-27"}, # dateutil invalid
{"str1": "Monday at 12:01am"},
{"str1": "xyz_not_a_date"}, # dateutil invalid
{"str1": "yesterday"}, # dateutil invalid
{"str1": {"name": "s", "w2w2": "dgsg", "years": 232, "abc": 1}}, # schema valid
{"str1": {"name": "s", "w2w2": 12.38, "years": 232, "abc": 1}}, # schema valid
{"str1": {"name": "s", "years": 232, "abc": 1}}, # schema valid
{"str1": {"name": "s", "abc": 1}}, # schema valid
{"str1": {"name": "s", "w2w2": "dgsg", "years": 232}}, # schema invalid
{"str1": {"name": "s", "w2w2": "dgsg", "years": "232", "abc": 1}}, # schema invalid
{"str1": {"name": 14, "w2w2": "dgsg", "years": "232", "abc": 1}}, # schema invalid
{"str1": {"name": "14", "w2w2": "dgsg", "years": 232.44, "abc": 1}}, # schema invalid
{"str1": {"w2w2": "dgsg", "years": 232, "abc": 1}}, # schema invalid
{"str1": {"years": 232}}, # schema invalid
{"str1": json.dumps({"name": "s", "w2w2": "dgsg", "years": 232, "abc": 1})}, # json valid, schema valid
{"str1": json.dumps({"name": "s", "w2w2": 12.38, "years": 232, "abc": 1})}, # json valid, schema valid
{"str1": json.dumps({"name": "s", "years": 232, "abc": 1})}, # json valid, schema valid
{"str1": json.dumps({"name": "s", "abc": 1})}, # json valid, schema valid
{"str1": json.dumps({"name": "s", "w2w2": "dgsg", "years": "232", "abc": 1})}, # json valid
{"str1": "random str : fail everything"},
{"str1": "2003-12-23"}, # strftime valid
{"str1": "2010-10-18"}, # strftime valid
{"str1": "2003-15-23"}, # strftime invalid
{"str1": "2003-12-32"}, # strftime invalid
{"str1": "10-12-32"}, # strftime invalid, dateutil valid
]
)
dc = DatasetConstraints(None, value_constraints={"str1": apply_func_constraints})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
report = dc.report()
return report
def test_apply_func_value_constraints(local_config_path):
dateutil_parseable = dateUtilParseableConstraint()
json_parseable = jsonParseableConstraint()
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"years": {"type": "integer"},
},
"required": ["name", "abc"],
}
matches_json_schema = matchesJsonSchemaConstraint(json_schema=json_schema)
is_strftime = strftimeFormatConstraint(format="%Y-%m-%d")
apply_constraints = [dateutil_parseable, json_parseable, matches_json_schema, is_strftime]
report = _apply_apply_func_constraints(local_config_path, apply_constraints)
# report[column_n][report_list][report][name total or failure]
assert report[0][1][0][1] == 30 and report[0][1][0][2] == 21 and report[0][1][0][0] == "column values are dateutil parseable"
assert report[0][1][1][1] == 30 and report[0][1][1][2] == 25 and report[0][1][1][0] == "column values are JSON parseable"
assert report[0][1][2][1] == 30 and report[0][1][2][2] == 22 and report[0][1][2][0] == f"column values match the provided JSON schema {json_schema}"
assert report[0][1][3][1] == 30 and report[0][1][3][2] == 27 and report[0][1][3][0] == "column values are strftime parseable"
def test_apply_func_invalid_init():
with pytest.raises(ValueError):
apply2 = ValueConstraint(Op.APPLY_FUNC, apply_function=lambda x: x)
with pytest.raises(ValueError):
apply2 = ValueConstraint(Op.APPLY_FUNC, apply_function="".startswith)
with pytest.raises(ValueError):
apply2 = ValueConstraint(Op.APPLY_FUNC, apply_function=any)
def test_apply_func_merge():
apply1 = dateUtilParseableConstraint()
apply2 = ValueConstraint(Op.APPLY_FUNC, apply_function=_matches_json_schema)
with pytest.raises(AssertionError):
apply1.merge(apply2)
apply3 = dateUtilParseableConstraint()
merged = apply1.merge(apply3)
pre_merge_json = json.loads(message_to_json(apply1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pre_merge_json["function"] == merge_json["function"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
def test_apply_func_serialization():
apply1 = dateUtilParseableConstraint()
apply2 = ValueConstraint.from_protobuf(apply1.to_protobuf())
apply1_json = json.loads(message_to_json(apply1.to_protobuf()))
apply2_json = json.loads(message_to_json(apply2.to_protobuf()))
apply1.merge(apply2)
apply2.merge(apply1)
assert apply1_json["name"] == apply2_json["name"]
assert apply1_json["function"] == apply2_json["function"]
assert apply1_json["op"] == apply2_json["op"]
assert apply1_json["verbose"] == apply2_json["verbose"]
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"years": {"type": "integer"},
},
"required": ["name", "abc"],
}
apply1 = matchesJsonSchemaConstraint(json_schema)
apply2 = ValueConstraint.from_protobuf(apply1.to_protobuf())
apply1_json = json.loads(message_to_json(apply1.to_protobuf()))
apply2_json = json.loads(message_to_json(apply2.to_protobuf()))
assert apply1_json["name"] == apply2_json["name"]
assert apply1_json["function"] == apply2_json["function"]
assert apply1_json["op"] == apply2_json["op"]
assert apply1_json["verbose"] == apply2_json["verbose"]
def _report_ssn_value_constraint_on_data_set(local_config_path, regex_pattern=None):
df = pd.DataFrame(
[
{"ssn": "123-01-2335"}, # valid
{"ssn": "039780012"}, # valid
{"ssn": "000231324"}, # invalid
{"ssn": "666781132"}, # invalid
{"ssn": "926-89-1234"}, # invalid
{"ssn": "001-01-0001"}, # valid
{"ssn": "122 23 0001"}, # valid
{"ssn": "1234-12-123"}, # invalid
]
)
ssn_constraint = containsSSNConstraint(regex_pattern=regex_pattern)
dc = DatasetConstraints(None, value_constraints={"ssn": [ssn_constraint]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
return dc.report()
def test_contains_ssn_constraint(local_config_path):
report = _report_ssn_value_constraint_on_data_set(local_config_path)
assert report[0][1][0][1] == 8
assert report[0][1][0][2] == 4
def test_ssn_constraint_supply_regex_pattern(local_config_path):
pattern = r"^[0-9]{3}-[0-9]{2}-[0-9]{4}$"
report = _report_ssn_value_constraint_on_data_set(local_config_path, pattern)
assert report[0][1][0][0] == "column values match the SSN regex pattern"
assert report[0][1][0][1] == 8
assert report[0][1][0][2] == 5
def test_ssn_constraint_merge_valid():
pattern = r"^[0-9]{3}-[0-9]{2}-[0-9]{4}$"
ccc1 = containsSSNConstraint(regex_pattern=pattern, verbose=True)
ccc2 = containsSSNConstraint(regex_pattern=pattern)
merged = ccc1.merge(ccc2)
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == "column values match the SSN regex pattern"
assert json_value["op"] == Op.Name(Op.MATCH)
assert json_value["regexPattern"] == pattern
assert json_value["verbose"] is True
def test_ssn_constraint_merge_invalid():
ccc1 = containsSSNConstraint()
ccc2 = containsSSNConstraint(regex_pattern=r"[0-9]{13,16}", verbose=False)
with pytest.raises(AssertionError):
ccc1.merge(ccc2)
def test_ssn_invalid_pattern():
with pytest.raises(TypeError):
containsSSNConstraint(123)
def _report_url_value_constraint_on_data_set(local_config_path, regex_pattern=None):
df = pd.DataFrame(
[
{"url": "http://www.example.com"}, # valid
{"url": "abc.test.com"}, # valid (without protocol)
{"url": "abc.w23w.asb#abc?a=2"}, # valid (without protocol)
{"url": "https://ab.abc.bc"}, # valid
{"url": "a.b.c"}, # valid
{"url": "abcd"}, # invalid
{"url": "123.w23.235"}, # valid
{"url": "asf://saf.we.12"}, # invalid
{"url": "12345"}, # invalid
{"url": "1.2"}, # invalid
]
)
url_constraint = containsURLConstraint(regex_pattern=regex_pattern)
dc = DatasetConstraints(None, value_constraints={"url": [url_constraint]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
return dc.report()
def test_contains_url_constraint(local_config_path):
report = _report_url_value_constraint_on_data_set(local_config_path)
assert report[0][1][0][1] == 10
assert report[0][1][0][2] == 4
def test_url_constraint_supply_regex_pattern(local_config_path):
pattern = r"^http(s)?:\/\/(www\.)?.+\..+$"
report = _report_url_value_constraint_on_data_set(local_config_path, pattern)
assert report[0][1][0][0] == "column values match the URL regex pattern"
assert report[0][1][0][1] == 10
assert report[0][1][0][2] == 8
def test_url_constraint_merge_valid():
pattern = r"^http(s)?://(www)?\..*\..*$"
ccc1 = containsURLConstraint(regex_pattern=pattern, verbose=False)
ccc2 = containsURLConstraint(regex_pattern=pattern)
merged = ccc1.merge(ccc2)
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == "column values match the URL regex pattern"
assert json_value["op"] == Op.Name(Op.MATCH)
assert json_value["regexPattern"] == pattern
assert json_value["verbose"] is False
def test_url_constraint_merge_invalid():
ccc1 = containsURLConstraint()
ccc2 = containsURLConstraint(regex_pattern=r"http(s)?://.+", verbose=False)
with pytest.raises(AssertionError):
ccc1.merge(ccc2)
def test_url_invalid_pattern():
with pytest.raises(TypeError):
containsURLConstraint(2124)
def test_summary_constraint_quantile_invalid():
with pytest.raises(ValueError):
SummaryConstraint("stddev", op=Op.LT, value=2, quantile_value=0.2)
with pytest.raises(ValueError):
SummaryConstraint("quantile", op=Op.GT, value=2)
def test_quantile_between_constraint_apply(local_config_path, df_lending_club):
qc = quantileBetweenConstraint(quantile_value=0.25, lower_value=13308, upper_value=241001)
summary_constraint = {"annual_inc": [qc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
assert report[0][1][0][0] == f"0.25-th quantile value is between 13308 and 241001"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_merge_quantile_between_constraint_different_values():
qc1 = quantileBetweenConstraint(quantile_value=0.25, lower_value=0, upper_value=2)
qc2 = quantileBetweenConstraint(quantile_value=0.25, lower_value=1, upper_value=2)
with pytest.raises(AssertionError):
qc1.merge(qc2)
def test_merge_quantile_between_constraint_same_values():
qc1 = quantileBetweenConstraint(quantile_value=0.5, lower_value=0, upper_value=5)
qc2 = quantileBetweenConstraint(quantile_value=0.5, lower_value=0, upper_value=5)
merged = qc1.merge(qc2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"0.5-th quantile value is between 0 and 5"
assert message["firstField"] == "quantile"
assert message["op"] == Op.Name(Op.BTWN)
assert pytest.approx(message["between"]["lowerValue"], 0.001) == 0.0
assert pytest.approx(message["between"]["upperValue"], 0.001) == 5.0
assert message["verbose"] is False
def test_serialization_deserialization_quantile_between_constraint():
qc1 = quantileBetweenConstraint(quantile_value=0.5, lower_value=1.24, upper_value=6.63, verbose=True)
qc1.from_protobuf(qc1.to_protobuf())
json_value = json.loads(message_to_json(qc1.to_protobuf()))
assert json_value["name"] == f"0.5-th quantile value is between 1.24 and 6.63"
assert json_value["firstField"] == "quantile"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.001) == 1.24
assert pytest.approx(json_value["between"]["upperValue"], 0.001) == 6.63
assert json_value["verbose"] is True
def test_quantile_between_wrong_datatype():
with pytest.raises(TypeError):
quantileBetweenConstraint(quantile_value=[0.5], lower_value=1.24, upper_value=6.63, verbose=True)
with pytest.raises(TypeError):
quantileBetweenConstraint(quantile_value=0.5, lower_value="1.24", upper_value=6.63, verbose=True)
with pytest.raises(TypeError):
quantileBetweenConstraint(quantile_value=0.3, lower_value=1.24, upper_value=[6.63], verbose=True)
with pytest.raises(ValueError):
quantileBetweenConstraint(quantile_value=0.3, lower_value=2.3, upper_value=1.5, verbose=True)
def test_unique_value_count_between_constraint_apply(local_config_path, df_lending_club):
uc = columnUniqueValueCountBetweenConstraint(lower_value=5, upper_value=50)
summary_constraint = {"annual_inc": [uc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
assert report[0][1][0][0] == f"number of unique values is between 5 and 50"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_merge_unique_value_count_between_constraint_different_values():
u1 = columnUniqueValueCountBetweenConstraint(lower_value=0, upper_value=2)
u2 = columnUniqueValueCountBetweenConstraint(lower_value=1, upper_value=2)
with pytest.raises(AssertionError):
u1.merge(u2)
def test_merge_unique_value_count_between_constraint_same_values():
u1 = columnUniqueValueCountBetweenConstraint(lower_value=0, upper_value=5)
u2 = columnUniqueValueCountBetweenConstraint(lower_value=0, upper_value=5)
merged = u1.merge(u2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"number of unique values is between 0 and 5"
assert message["firstField"] == "unique_count"
assert message["op"] == Op.Name(Op.BTWN)
assert pytest.approx(message["between"]["lowerValue"], 0.001) == 0.0
assert pytest.approx(message["between"]["upperValue"], 0.001) == 5.0
assert message["verbose"] is False
def test_serialization_deserialization_unique_value_count_between_constraint():
u1 = columnUniqueValueCountBetweenConstraint(lower_value=15, upper_value=50, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == f"number of unique values is between 15 and 50"
assert json_value["firstField"] == "unique_count"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.001) == 15
assert pytest.approx(json_value["between"]["upperValue"], 0.001) == 50
assert json_value["verbose"] is True
def test_unique_count_between_constraint_wrong_datatype():
with pytest.raises(ValueError):
columnUniqueValueCountBetweenConstraint(lower_value="0", upper_value=1, verbose=True)
with pytest.raises(ValueError):
columnUniqueValueCountBetweenConstraint(lower_value=5, upper_value=6.63, verbose=True)
with pytest.raises(ValueError):
columnUniqueValueCountBetweenConstraint(lower_value=1, upper_value=0)
def test_unique_value_proportion_between_constraint_apply(local_config_path, df_lending_club):
uc = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.6, upper_fraction=0.9)
summary_constraint = {"annual_inc": [uc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
assert report[0][1][0][0] == f"proportion of unique values is between 0.6 and 0.9"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_merge_unique_value_proportion_between_constraint_different_values():
u1 = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.2, upper_fraction=0.3)
u2 = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.3)
with pytest.raises(AssertionError):
u1.merge(u2)
def test_merge_unique_value_proportion_between_constraint_same_values():
u1 = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.5)
u2 = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.5)
merged = u1.merge(u2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"proportion of unique values is between 0.1 and 0.5"
assert message["firstField"] == "unique_proportion"
assert message["op"] == Op.Name(Op.BTWN)
assert pytest.approx(message["between"]["lowerValue"], 0.001) == 0.1
assert pytest.approx(message["between"]["upperValue"], 0.001) == 0.5
assert message["verbose"] is False
def test_serialization_deserialization_unique_value_proportion_between_constraint():
u1 = columnUniqueValueProportionBetweenConstraint(lower_fraction=0.6, upper_fraction=0.7, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == f"proportion of unique values is between 0.6 and 0.7"
assert json_value["firstField"] == "unique_proportion"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.001) == 0.6
assert pytest.approx(json_value["between"]["upperValue"], 0.001) == 0.7
assert json_value["verbose"] is True
def test_unique_proportion_between_constraint_wrong_datatype():
with pytest.raises(ValueError):
columnUniqueValueProportionBetweenConstraint(lower_fraction=0, upper_fraction=1.0, verbose=True)
with pytest.raises(ValueError):
columnUniqueValueProportionBetweenConstraint(lower_fraction=0.2, upper_fraction=0.1, verbose=True)
with pytest.raises(ValueError):
columnUniqueValueProportionBetweenConstraint(lower_fraction=0.4, upper_fraction=2)
def test_table_shape_constraints(df_lending_club, local_config_path):
rows = numberOfRowsConstraint(n_rows=10)
rows_2 = numberOfRowsConstraint(n_rows=len(df_lending_club.index))
column_exist = columnExistsConstraint("no_WAY")
column_exist2 = columnExistsConstraint("loan_amnt")
set1 = set(["col1", "col2"])
set2 = set(df_lending_club.columns)
columns_match = columnsMatchSetConstraint(set1)
columns_match2 = columnsMatchSetConstraint(set2)
table_shape_constraints = [rows, rows_2, column_exist, column_exist2, columns_match, columns_match2]
dc = DatasetConstraints(None, table_shape_constraints=table_shape_constraints)
config = load_config(local_config_path)
session = session_from_config(config)
report = None
logger = session.logger(dataset_name="test.data", constraints=dc)
logger.log_dataframe(df_lending_club)
report = logger.profile.apply_table_shape_constraints()
assert len(report) == 6
# table shape {self.value} {Op.Name(self.op)} {self.first_field}
assert report[0][0] == f"The number of rows in the table equals 10"
assert report[0][1] == 1
assert report[0][2] == 1
assert report[1][0] == f"The number of rows in the table equals {len(df_lending_club.index)}"
assert report[1][1] == 1
assert report[1][2] == 0
assert report[2][0] == f"The column no_WAY exists in the table"
assert report[2][1] == 1
assert report[2][2] == 1
assert report[3][0] == f"The column loan_amnt exists in the table"
assert report[3][1] == 1
assert report[3][2] == 0
assert report[4][0] == f"The columns of the table are equal to the set {set1}"
assert report[4][1] == 1
assert report[4][2] == 1
reference_set_str = ""
if len(set2) > MAX_SET_DISPLAY_MESSAGE_LENGTH:
tmp_set = set(list(set2)[:MAX_SET_DISPLAY_MESSAGE_LENGTH])
reference_set_str = f"{str(tmp_set)[:-1]}, ...}}"
else:
reference_set_str = str(set2)
assert report[5][0] == f"The columns of the table are equal to the set {reference_set_str}"
assert report[5][1] == 1
assert report[5][2] == 0
logger.log({"no_WAY": 1}) # logging a new non existent column
report2 = logger.profile.apply_table_shape_constraints()
assert report2[1][0] == f"The number of rows in the table equals {len(df_lending_club.index)}"
assert report2[1][1] == 2
assert report2[1][2] == 0
# applying the table shape constraints the second time, after adding a new column the constraint passes since
# the total row number stays the same
assert report2[2][0] == f"The column no_WAY exists in the table"
assert report2[2][1] == 2
assert report2[2][2] == 1
# after logging the new column 'no_WAY', this constraint DOES NOT fail the second time because the column 'no_way'
# is now present
assert report2[5][0] == f"The columns of the table are equal to the set {reference_set_str}"
assert report2[5][1] == 2
assert report2[5][2] == 1
# after logging the new column 'no_WAY', this constraint fails the second time because the reference set
# does not contain the new column 'no_WAY'
set3 = set(set2)
set3.add("no_WAY")
columns_match3 = columnsMatchSetConstraint(set3)
report3 = logger.profile.apply_table_shape_constraints(SummaryConstraints([columns_match3]))
reference_set_str2 = ""
if len(set3) > MAX_SET_DISPLAY_MESSAGE_LENGTH:
tmp_set = set(list(set3)[:MAX_SET_DISPLAY_MESSAGE_LENGTH])
reference_set_str2 = f"{str(tmp_set)[:-1]}, ...}}"
else:
reference_set_str2 = str(set3)
assert report3[0][0] == f"The columns of the table are equal to the set {reference_set_str2}"
assert report3[0][1] == 1
assert report3[0][2] == 0
# adding the new column to 'set3' and creating a constraint with it, now it doesn't fail
log_dict = dict()
# logging a new value for every column (one more row)
for column in df_lending_club.columns:
value = df_lending_club[column][10] # sample from the column
log_dict[column] = value
logger.log(log_dict)
report4 = logger.profile.apply_table_shape_constraints()
assert report4[1][0] == f"The number of rows in the table equals {len(df_lending_club.index)}"
assert report4[1][1] == 3
assert report4[1][2] == 1
# applying the table shape constraints the third time, and only this time it fails as the new row was logged
rows_3 = numberOfRowsConstraint(n_rows=len(df_lending_club.index) + 1) # 1 new row
report5 = logger.profile.apply_table_shape_constraints(SummaryConstraints([rows_3]))
assert report5[0][0] == f"The number of rows in the table equals {len(df_lending_club.index)+1}"
assert report5[0][1] == 1
assert report5[0][2] == 0
# the new numberOfRowsConstraint with n_rows=previous_n_rows+1 passes
profile = logger.close() # closing the logger and getting the DatasetProfile
assert profile.total_row_number == rows_3.value
def test_table_shape_constraint_invalid_init():
with pytest.raises(TypeError):
SummaryConstraint("columns", Op.EQ, reference_set=1)
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.CONTAIN, reference_set=1)
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.EQ, reference_set=1)
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.CONTAIN, reference_set=1)
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.CONTAIN, 1)
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.CONTAIN, second_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.EQ, 1)
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.EQ, second_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.CONTAIN, 1)
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.CONTAIN, second_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.EQ, second_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.CONTAIN, third_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.EQ, third_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.CONTAIN, third_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.EQ, third_field="aaa")
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.CONTAIN, upper_value=2)
with pytest.raises(ValueError):
SummaryConstraint("columns", Op.EQ, upper_value=2)
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.CONTAIN, upper_value=2)
with pytest.raises(ValueError):
SummaryConstraint("total_row_number", Op.EQ, upper_value=2)
def test_table_shape_no_merge_different_set():
set_c_1 = SummaryConstraint("columns", Op.EQ, reference_set=[1, 2, 3])
set_c_2 = SummaryConstraint("columns", Op.EQ, reference_set=[2, 3, 4, 5])
with pytest.raises(AssertionError):
set_c_1.merge(set_c_2)
def test_table_shape_merge():
set_c_1 = SummaryConstraint("columns", Op.EQ, reference_set=[1, 2, 3])
set_c_2 = columnsMatchSetConstraint(reference_set=[1, 2, 3])
set_c_1._name = set_c_2.name
merged = set_c_1.merge(set_c_2)
pre_merge_json = json.loads(message_to_json(set_c_1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pre_merge_json["referenceSet"] == merge_json["referenceSet"]
assert pre_merge_json["firstField"] == merge_json["firstField"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
set_c_1 = SummaryConstraint("columns", Op.CONTAIN, "c1")
set_c_2 = columnExistsConstraint(column="c1")
set_c_1._name = set_c_2.name
merged = set_c_1.merge(set_c_2)
pre_merge_json = json.loads(message_to_json(set_c_1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pre_merge_json["valueStr"] == merge_json["valueStr"]
assert pre_merge_json["firstField"] == merge_json["firstField"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
set_c_1 = SummaryConstraint("total_row_number", Op.EQ, 2)
set_c_2 = numberOfRowsConstraint(n_rows=2)
set_c_1._name = set_c_2.name
merged = set_c_1.merge(set_c_2)
pre_merge_json = json.loads(message_to_json(set_c_1.to_protobuf()))
merge_json = json.loads(message_to_json(merged.to_protobuf()))
assert pre_merge_json["name"] == merge_json["name"]
assert pre_merge_json["value"] == merge_json["value"]
assert pre_merge_json["firstField"] == merge_json["firstField"]
assert pre_merge_json["op"] == merge_json["op"]
assert pre_merge_json["verbose"] == merge_json["verbose"]
def test_table_shape_serialization():
ts1 = columnsMatchSetConstraint([1, 2, 3])
ts2 = SummaryConstraint.from_protobuf(ts1.to_protobuf())
ts1_json = json.loads(message_to_json(ts1.to_protobuf()))
ts2_json = json.loads(message_to_json(ts2.to_protobuf()))
ts1.merge(ts2)
ts2.merge(ts1)
assert ts1_json["name"] == ts2_json["name"]
assert ts1_json["referenceSet"] == ts2_json["referenceSet"]
assert ts1_json["firstField"] == ts2_json["firstField"]
assert ts1_json["op"] == ts2_json["op"]
assert ts1_json["verbose"] == ts2_json["verbose"]
ts1 = columnExistsConstraint("c1")
ts1.to_protobuf()
ts2 = SummaryConstraint.from_protobuf(ts1.to_protobuf())
ts1_json = json.loads(message_to_json(ts1.to_protobuf()))
ts2_json = json.loads(message_to_json(ts2.to_protobuf()))
ts1.merge(ts2)
ts2.merge(ts1)
assert ts1_json["name"] == ts2_json["name"]
assert ts1_json["valueStr"] == ts2_json["valueStr"]
assert ts1_json["firstField"] == ts2_json["firstField"]
assert ts1_json["op"] == ts2_json["op"]
assert ts1_json["verbose"] == ts2_json["verbose"]
ts1 = numberOfRowsConstraint(2)
ts2 = SummaryConstraint.from_protobuf(ts1.to_protobuf())
ts1_json = json.loads(message_to_json(ts1.to_protobuf()))
ts2_json = json.loads(message_to_json(ts2.to_protobuf()))
ts1.merge(ts2)
ts2.merge(ts1)
assert ts1_json["name"] == ts2_json["name"]
assert ts1_json["value"] == ts2_json["value"]
assert ts1_json["firstField"] == ts2_json["firstField"]
assert ts1_json["op"] == ts2_json["op"]
assert ts1_json["verbose"] == ts2_json["verbose"]
def _get_sample_dataset_constraints():
cvisc = columnValuesInSetConstraint(value_set={2, 5, 8})
ltc = ValueConstraint(Op.LT, 1)
min_gt_constraint = SummaryConstraint("min", Op.GT, value=100)
max_le_constraint = SummaryConstraint("max", Op.LE, value=5)
set1 = set(["col1", "col2"])
columns_match_constraint = columnsMatchSetConstraint(set1)
val_set = {(1, 2), (3, 5)}
col_set = ["A", "B"]
mcv_constraints = [
columnValuesUniqueWithinRow(column_A="A", verbose=True),
columnPairValuesInSetConstraint(column_A="A", column_B="B", value_set=val_set),
sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=col_set, value=100),
]
return DatasetConstraints(
None,
value_constraints={"annual_inc": [cvisc, ltc]},
summary_constraints={"annual_inc": [max_le_constraint, min_gt_constraint]},
table_shape_constraints=[columns_match_constraint],
multi_column_value_constraints=mcv_constraints,
)
def _assert_dc_props_equal(dc, dc_deserialized):
props = dc.dataset_properties
deser_props = dc_deserialized.dataset_properties
if all([props, deser_props]):
pm_json = json.loads(message_to_json(props))
deser_pm_json = json.loads(message_to_json(deser_props))
for (k, v), (k_deser, v_deser) in zip(pm_json.items(), deser_pm_json.items()):
assert k == k_deser
if all([v, v_deser]):
v = v.sort() if isinstance(v, list) else v
v_deser = v_deser.sort() if isinstance(v_deser, list) else v_deser
assert v == v_deser
def _assert_constraints_equal(constraints, deserialized_constraints):
for (name, c), (deser_name, deser_c) in zip(constraints.items(), deserialized_constraints.items()):
assert name == deser_name
a = json.loads(message_to_json(c.to_protobuf()))
b = json.loads(message_to_json(deser_c.to_protobuf()))
for (k, v), (k_deser, v_deser) in zip(a.items(), b.items()):
assert k == k_deser
if all([v, v_deser]):
v = v.sort() if isinstance(v, list) else v
v_deser = v_deser.sort() if isinstance(v_deser, list) else v_deser
assert v == v_deser
def _get_all_value_constraints(constraints):
all_v_constraints = dict()
all_v_constraints.update(constraints.raw_value_constraints)
all_v_constraints.update(constraints.coerced_type_constraints)
return all_v_constraints
def test_dataset_constraints_serialization():
dc = _get_sample_dataset_constraints()
dc_deser = DatasetConstraints.from_protobuf(dc.to_protobuf())
_assert_dc_props_equal(dc, dc_deser)
value_constraints = dc.value_constraint_map
summary_constraints = dc.summary_constraint_map
table_shape_constraints = dc.table_shape_constraints
multi_column_value_constraints = dc.multi_column_value_constraints
deser_v_c = dc_deser.value_constraint_map
deser_s_c = dc_deser.summary_constraint_map
deser_ts_c = dc_deser.table_shape_constraints
deser_mcv_c = dc_deser.multi_column_value_constraints
for (column, constraints), (deser_column, deser_constraints) in zip(value_constraints.items(), deser_v_c.items()):
assert column == deser_column
all_v_constraints = _get_all_value_constraints(constraints)
all_v_constraints_deser = _get_all_value_constraints(deser_constraints)
_assert_constraints_equal(all_v_constraints, all_v_constraints_deser)
for (column, constraints), (deser_column, deser_constraints) in zip(summary_constraints.items(), deser_s_c.items()):
assert column == deser_column
_assert_constraints_equal(constraints.constraints, deser_constraints.constraints)
_assert_constraints_equal(table_shape_constraints.constraints, deser_ts_c.constraints)
all_mc_constraints = _get_all_value_constraints(multi_column_value_constraints)
all_mc_constraints_deser = _get_all_value_constraints(deser_mcv_c)
_assert_constraints_equal(all_mc_constraints, all_mc_constraints_deser)
report = dc.report()
report_deser = dc_deser.report()
assert report == report_deser
def test_most_common_value_in_set_constraint_apply(local_config_path, df_lending_club):
val_set1 = {2, 3.5, 5000, 52000.0}
val_set2 = {1, 2.3, "abc"}
mcvc1 = columnMostCommonValueInSetConstraint(value_set=val_set1)
mcvc2 = columnMostCommonValueInSetConstraint(value_set=val_set2)
summary_constraints = {"loan_amnt": [mcvc1], "funded_amnt": [mcvc2]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints)
assert report[0][1][0][0] == f"most common value is in {val_set1}"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
assert report[1][1][0][0] == f"most common value is in {val_set2}"
assert report[1][1][0][1] == 1
assert report[1][1][0][2] == 1
def test_merge_most_common_value_in_set_constraint_different_values():
c1 = columnMostCommonValueInSetConstraint(value_set={1, 3})
c2 = columnMostCommonValueInSetConstraint(value_set={1, 5.0})
with pytest.raises(AssertionError):
c1.merge(c2)
def test_merge_most_common_value_in_set_constraint_same_values():
val_set = {1, 2, 3}
u1 = columnMostCommonValueInSetConstraint(value_set=val_set)
u2 = columnMostCommonValueInSetConstraint(value_set=val_set)
merged = u1.merge(u2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"most common value is in {val_set}"
assert message["firstField"] == "most_common_value"
assert message["op"] == Op.Name(Op.IN)
assert message["referenceSet"] == list(val_set)
assert message["verbose"] is False
def test_serialization_deserialization_most_common_value_in_set_constraint():
val_set = {1, "a", "abc"}
u1 = columnMostCommonValueInSetConstraint(value_set=val_set, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == f"most common value is in {val_set}"
assert json_value["firstField"] == "most_common_value"
assert json_value["op"] == Op.Name(Op.IN)
assert json_value["referenceSet"] == list(val_set)
assert json_value["verbose"] is True
def test_most_common_value_in_set_constraint_wrong_datatype():
with pytest.raises(TypeError):
columnMostCommonValueInSetConstraint(value_set=2.3, verbose=True)
def test_column_values_not_null_constraint_apply_pass(local_config_path, df_lending_club):
nnc1 = columnValuesNotNullConstraint()
summary_constraints = {"annual_inc": [nnc1]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints)
TEST_LOGGER.info(f"Apply columnValuesNotNullConstraint report:\n{report}")
assert report[0][1][0][0] == f"does not contain missing values"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_column_values_not_null_constraint_apply_fail(local_config_path):
nnc2 = columnValuesNotNullConstraint()
df = pd.DataFrame([{"value": 1}, {"value": 5.2}, {"value": None}, {"value": 2.3}, {"value": None}])
summary_constraints = {"value": [nnc2]}
report = _apply_summary_constraints_on_dataset(df, local_config_path, summary_constraints)
TEST_LOGGER.info(f"Apply columnValuesNotNullConstraint report:\n{report}")
assert report[0][1][0][0] == f"does not contain missing values"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 1
def test_merge_column_values_not_null_constraint_different_values(local_config_path, df_lending_club):
nnc1 = columnValuesNotNullConstraint()
nnc2 = columnValuesNotNullConstraint()
summary_constraints1 = {"annual_inc": [nnc1]}
summary_constraints2 = {"annual_inc": [nnc2]}
report1 = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints1)
report2 = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints2)
assert report1[0][1][0][0] == f"does not contain missing values"
assert report1[0][1][0][1] == 1
assert report1[0][1][0][2] == 0
assert report2[0][1][0][0] == f"does not contain missing values"
assert report2[0][1][0][1] == 1
assert report2[0][1][0][2] == 0
merged = nnc1.merge(nnc2)
report_merged = merged.report()
print(report_merged)
TEST_LOGGER.info(f"Merged report of columnValuesNotNullConstraint: {report_merged}")
assert merged.total == 2
assert merged.failures == 0
def test_serialization_deserialization_column_values_not_null_constraint():
nnc = columnValuesNotNullConstraint(verbose=True)
nnc.from_protobuf(nnc.to_protobuf())
json_value = json.loads(message_to_json(nnc.to_protobuf()))
assert json_value["name"] == f"does not contain missing values"
assert json_value["firstField"] == "null_count"
assert json_value["op"] == Op.Name(Op.EQ)
assert pytest.approx(json_value["value"], 0.01) == 0
assert json_value["verbose"] is True
def test_missing_values_proportion_between_constraint_apply_pass(local_config_path, df_lending_club):
mvpbc = missingValuesProportionBetweenConstraint(lower_fraction=0.0, upper_fraction=0.3)
summary_constraint = {"annual_inc": [mvpbc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
assert report[0][1][0][0] == f"missing values proportion is between 0.0% and 30.0%"
assert report[0][1][0][1] == 1 # number of executions
assert report[0][1][0][2] == 0 # number of failures
def test_missing_values_proportion_between_constraint_apply_fail(local_config_path, df_lending_club):
mvpbc = missingValuesProportionBetweenConstraint(lower_fraction=0.3, upper_fraction=0.8)
summary_constraint = {"annual_inc": [mvpbc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
assert report[0][1][0][0] == f"missing values proportion is between 30.0% and 80.0%"
assert report[0][1][0][1] == 1 # number of executions
assert report[0][1][0][2] == 1 # number of failures
def test_merge_missing_values_proportion_between_constraint_different_values():
m1 = missingValuesProportionBetweenConstraint(lower_fraction=0.2, upper_fraction=0.3)
m2 = missingValuesProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.3)
with pytest.raises(AssertionError):
m1.merge(m2)
def test_merge_missing_values_proportion_between_constraint_same_values():
m1 = missingValuesProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.5)
m2 = missingValuesProportionBetweenConstraint(lower_fraction=0.1, upper_fraction=0.5)
merged = m1.merge(m2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == "missing values proportion is between 10.0% and 50.0%"
assert message["firstField"] == "missing_values_proportion"
assert message["op"] == Op.Name(Op.BTWN)
assert pytest.approx(message["between"]["lowerValue"], 0.001) == 0.1
assert pytest.approx(message["between"]["upperValue"], 0.001) == 0.5
assert message["verbose"] is False
def test_serialization_deserialization_missing_values_proportion_between_constraint():
u1 = missingValuesProportionBetweenConstraint(lower_fraction=0.4, upper_fraction=0.7, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == "missing values proportion is between 40.0% and 70.0%"
assert json_value["firstField"] == "missing_values_proportion"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.001) == 0.4
assert pytest.approx(json_value["between"]["upperValue"], 0.001) == 0.7
assert json_value["verbose"] is True
def test_serialization_deserialization_missing_values_proportion_between_constraint_with_provided_name():
u1 = missingValuesProportionBetweenConstraint(lower_fraction=0.05, upper_fraction=0.1, name="missing values constraint", verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == "missing values constraint"
assert json_value["firstField"] == "missing_values_proportion"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.001) == 0.05
assert pytest.approx(json_value["between"]["upperValue"], 0.001) == 0.1
assert json_value["verbose"] is True
def test_missing_values_proportion_between_constraint_wrong_datatype():
with pytest.raises(ValueError):
missingValuesProportionBetweenConstraint(lower_fraction=0, upper_fraction=1.0, verbose=True)
with pytest.raises(ValueError):
missingValuesProportionBetweenConstraint(lower_fraction=0.2, upper_fraction=0.1, verbose=True)
with pytest.raises(ValueError):
missingValuesProportionBetweenConstraint(lower_fraction=0.4, upper_fraction=2)
with pytest.raises(ValueError):
missingValuesProportionBetweenConstraint(lower_fraction="1", upper_fraction=2.0, verbose=False)
def test_column_values_type_equals_constraint_apply(local_config_path, df_lending_club):
cvtc = columnValuesTypeEqualsConstraint(expected_type=InferredType.Type.FRACTIONAL)
summary_constraints = {"annual_inc": [cvtc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints)
assert report[0][1][0][0] == f"type of the column values is {InferredType.Type.Name(InferredType.Type.FRACTIONAL)}"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_merge_column_values_type_equals_constraint_different_values():
c1 = columnValuesTypeEqualsConstraint(expected_type=InferredType.Type.FRACTIONAL)
c2 = columnValuesTypeEqualsConstraint(expected_type=InferredType.Type.NULL)
with pytest.raises(AssertionError):
c1.merge(c2)
def test_merge_column_values_type_equals_constraint_same_values():
u1 = columnValuesTypeEqualsConstraint(expected_type=1)
u2 = columnValuesTypeEqualsConstraint(expected_type=1)
merged = u1.merge(u2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"type of the column values is {InferredType.Type.Name(1)}"
assert message["firstField"] == "column_values_type"
assert message["op"] == Op.Name(Op.EQ)
assert message["value"] == 1
assert message["verbose"] is False
def test_serialization_deserialization_column_values_type_equals_constraint():
u1 = columnValuesTypeEqualsConstraint(expected_type=InferredType.Type.STRING, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
assert json_value["name"] == f"type of the column values is {InferredType.Type.Name(InferredType.Type.STRING)}"
assert json_value["firstField"] == "column_values_type"
assert json_value["op"] == Op.Name(Op.EQ)
assert json_value["value"] == InferredType.Type.STRING
assert json_value["verbose"] is True
def test_column_values_type_equals_constraint_wrong_datatype():
with pytest.raises(ValueError):
columnValuesTypeEqualsConstraint(expected_type=2.3, verbose=True)
with pytest.raises(ValueError):
columnValuesTypeEqualsConstraint(expected_type="FRACTIONAL", verbose=True)
def test_column_values_type_in_set_constraint_apply(local_config_path, df_lending_club):
type_set = {InferredType.Type.FRACTIONAL, InferredType.Type.INTEGRAL}
cvtc = columnValuesTypeInSetConstraint(type_set=type_set)
summary_constraint = {"annual_inc": [cvtc]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraint)
type_names = {InferredType.Type.Name(t) for t in type_set}
assert report[0][1][0][0] == f"type of the column values is in {type_names}"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_merge_column_values_type_in_set_constraint_different_values():
c1 = columnValuesTypeInSetConstraint(type_set={InferredType.Type.INTEGRAL, InferredType.Type.STRING})
c2 = columnValuesTypeInSetConstraint(type_set={InferredType.Type.INTEGRAL, InferredType.Type.NULL})
with pytest.raises(AssertionError):
c1.merge(c2)
def test_merge_column_values_type_in_set_constraint_same_values():
type_set = {InferredType.Type.INTEGRAL, InferredType.Type.STRING}
c1 = columnValuesTypeInSetConstraint(type_set=type_set)
c2 = columnValuesTypeInSetConstraint(type_set=type_set)
merged = c1.merge(c2)
message = json.loads(message_to_json(merged.to_protobuf()))
type_names = {InferredType.Type.Name(t) for t in type_set}
assert message["name"] == f"type of the column values is in {type_names}"
assert message["firstField"] == "column_values_type"
assert message["op"] == Op.Name(Op.IN)
assert message["referenceSet"] == list(type_set)
assert message["verbose"] is False
def test_serialization_deserialization_column_values_type_in_set_constraint():
type_set = {InferredType.Type.STRING, InferredType.Type.INTEGRAL}
u1 = columnValuesTypeInSetConstraint(type_set=type_set, verbose=True)
u1.from_protobuf(u1.to_protobuf())
json_value = json.loads(message_to_json(u1.to_protobuf()))
type_names = {InferredType.Type.Name(t) if isinstance(t, int) else InferredType.Type.Name(t.type) for t in type_set}
assert json_value["name"] == f"type of the column values is in {type_names}"
assert json_value["firstField"] == "column_values_type"
assert json_value["op"] == Op.Name(Op.IN)
assert json_value["referenceSet"] == list(type_set)
assert json_value["verbose"] is True
def test_column_values_type_in_set_constraint_wrong_datatype():
with pytest.raises(TypeError):
columnValuesTypeInSetConstraint(type_set={2.3, 1}, verbose=True)
with pytest.raises(TypeError):
columnValuesTypeInSetConstraint(type_set={"FRACTIONAL", 2}, verbose=True)
with pytest.raises(TypeError):
columnValuesTypeInSetConstraint(type_set="ABCD")
def test_entropy_between_constraint_numeric_apply(local_config_path, df_lending_club):
ec = approximateEntropyBetweenConstraint(lower_value=0.4, upper_value=0.5)
summary_constraint = {"annual_inc": [ec]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints=summary_constraint)
# numeric
assert report[0][1][0][0] == f"approximate entropy is between 0.4 and 0.5"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 1
def test_entropy_between_constraint_categorical_apply(local_config_path, df_lending_club):
ec = approximateEntropyBetweenConstraint(lower_value=0.6, upper_value=1.5)
summary_constraint = {"grade": [ec]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints=summary_constraint)
# categorical
assert report[0][1][0][0] == f"approximate entropy is between 0.6 and 1.5"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 0
def test_entropy_between_constraint_null_apply(local_config_path, df_lending_club):
ec = approximateEntropyBetweenConstraint(lower_value=0.6, upper_value=1.5)
summary_constraint = {"member_id": [ec]}
report = _apply_summary_constraints_on_dataset(df_lending_club, local_config_path, summary_constraints=summary_constraint)
# categorical
assert report[0][1][0][0] == f"approximate entropy is between 0.6 and 1.5"
assert report[0][1][0][1] == 1
assert report[0][1][0][2] == 1
def test_merge_entropy_between_constraint_different_values():
e1 = approximateEntropyBetweenConstraint(lower_value=1, upper_value=2.4)
e2 = approximateEntropyBetweenConstraint(lower_value=1, upper_value=2.6)
with pytest.raises(AssertionError):
e1.merge(e2)
def test_merge_entropy_between_constraint_same_values():
e1 = approximateEntropyBetweenConstraint(lower_value=1, upper_value=3.2)
e2 = approximateEntropyBetweenConstraint(lower_value=1, upper_value=3.2)
merged = e1.merge(e2)
message = json.loads(message_to_json(merged.to_protobuf()))
assert message["name"] == f"approximate entropy is between 1 and 3.2"
assert message["firstField"] == "entropy"
assert message["op"] == Op.Name(Op.BTWN)
assert pytest.approx(message["between"]["lowerValue"], 0.01) == 1
assert pytest.approx(message["between"]["upperValue"], 0.01) == 3.2
assert message["verbose"] is False
def test_serialization_deserialization_entropy_between_constraint():
e1 = approximateEntropyBetweenConstraint(lower_value=0.3, upper_value=1.2, verbose=True)
e1.from_protobuf(e1.to_protobuf())
json_value = json.loads(message_to_json(e1.to_protobuf()))
assert json_value["name"] == f"approximate entropy is between 0.3 and 1.2"
assert json_value["firstField"] == "entropy"
assert json_value["op"] == Op.Name(Op.BTWN)
assert pytest.approx(json_value["between"]["lowerValue"], 0.01) == 0.3
assert pytest.approx(json_value["between"]["upperValue"], 0.01) == 1.2
assert json_value["verbose"] is True
def test_entropy_between_constraint_wrong_datatype():
with pytest.raises(TypeError):
approximateEntropyBetweenConstraint(lower_value="2", upper_value=4, verbose=True)
with pytest.raises(ValueError):
approximateEntropyBetweenConstraint(lower_value=-2, upper_value=3, verbose=True)
with pytest.raises(ValueError):
approximateEntropyBetweenConstraint(lower_value=1, upper_value=0.9)
def test_ks_test_p_value_greater_than_constraint_false(df_lending_club, local_config_path):
norm_values = np.random.normal(loc=10000, scale=2.0, size=15000)
kspval = parametrizedKSTestPValueGreaterThanConstraint(norm_values, p_value=0.1)
dc = DatasetConstraints(None, summary_constraints={"loan_amnt": [kspval]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint failed once
assert report[0][1][0][2] == 1
def test_ks_test_p_value_greater_than_constraint_true(df_lending_club, local_config_path):
kspval = parametrizedKSTestPValueGreaterThanConstraint(df_lending_club["loan_amnt"].values, p_value=0.1)
dc = DatasetConstraints(None, summary_constraints={"loan_amnt": [kspval]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint was successfully executed
assert report[0][1][0][2] == 0
def test_ks_test_p_value_greater_than_constraint_merge_different_values():
ks1 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0])
ks2 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 4.0])
with pytest.raises(AssertionError):
ks1.merge(ks2)
ks1 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0], p_value=0.1)
ks2 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0], p_value=0.5)
with pytest.raises(AssertionError):
ks1.merge(ks2)
def test_ks_test_p_value_greater_than_constraint_merge_same_values():
ks1 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0])
ks2 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0])
merged = ks1.merge(ks2)
TEST_LOGGER.info(f"Serialize the merged parametrizedKSTestPValueGreaterThanConstraint:\n {merged.to_protobuf()}")
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == f"parametrized KS test p-value is greater than 0.05"
assert json_value["op"] == Op.Name(Op.GT)
assert json_value["firstField"] == "ks_test"
assert pytest.approx(json_value["value"], 0.01) == 0.05
assert len(json_value["continuousDistribution"]["sketch"]["sketch"]) > 0
assert json_value["verbose"] is False
def test_serialization_deserialization_ks_test_p_value_greater_than_constraint():
ks1 = parametrizedKSTestPValueGreaterThanConstraint([1.0, 2.0, 3.0], p_value=0.15, verbose=True)
ks1.from_protobuf(ks1.to_protobuf())
json_value = json.loads(message_to_json(ks1.to_protobuf()))
TEST_LOGGER.info(f"Serialize parametrizedKSTestPValueGreaterThanConstraint from deserialized representation:\n {ks1.to_protobuf()}")
assert json_value["name"] == f"parametrized KS test p-value is greater than 0.15"
assert json_value["op"] == Op.Name(Op.GT)
assert json_value["firstField"] == "ks_test"
assert pytest.approx(json_value["value"], 0.01) == 0.15
assert len(json_value["continuousDistribution"]["sketch"]["sketch"]) > 0
assert json_value["verbose"] is True
def test_ks_test_p_value_greater_than_constraint_wrong_datatype():
with pytest.raises(ValueError):
parametrizedKSTestPValueGreaterThanConstraint([1, 2, 3], p_value=0.15, verbose=True)
with pytest.raises(TypeError):
parametrizedKSTestPValueGreaterThanConstraint("abc", p_value=0.15, verbose=True)
with pytest.raises(ValueError):
parametrizedKSTestPValueGreaterThanConstraint([1, 2, 3], p_value=1.2, verbose=True)
def test_column_kl_divergence_less_than_constraint_continuous_false(df_lending_club, local_config_path):
norm_values = np.random.normal(loc=10000, scale=2.0, size=15000)
kld = columnKLDivergenceLessThanConstraint(norm_values, threshold=2.1)
dc = DatasetConstraints(None, summary_constraints={"loan_amnt": [kld]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint failed once
assert report[0][1][0][2] == 1
def test_column_kl_divergence_less_than_constraint_continuous_true(df_lending_club, local_config_path):
kld = columnKLDivergenceLessThanConstraint(df_lending_club["loan_amnt"].values, threshold=0.1)
dc = DatasetConstraints(None, summary_constraints={"loan_amnt": [kld]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint was successfully executed
assert report[0][1][0][2] == 0
def test_column_kl_divergence_less_than_constraint_discrete_true(df_lending_club, local_config_path):
np.random.seed(2)
dist_data = np.random.choice(list(set(df_lending_club["grade"].values)), 100)
kld = columnKLDivergenceLessThanConstraint(dist_data, threshold=1.3)
dc = DatasetConstraints(None, summary_constraints={"grade": [kld]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint was successfully executed
assert report[0][1][0][2] == 0
def test_column_kl_divergence_less_than_constraint_discrete_false(df_lending_club, local_config_path):
np.random.seed(2)
dist_data = np.random.choice(list(set(df_lending_club["grade"].values)), 1000, p=[0.005, 0.005, 0.005, 0.03, 0.19, 0.765])
kld = columnKLDivergenceLessThanConstraint(dist_data, threshold=0.4)
dc = DatasetConstraints(None, summary_constraints={"grade": [kld]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint was not successfully executed
assert report[0][1][0][2] == 1
def test_column_kl_divergence_less_than_constraint_merge_different_values():
ks1 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 3.0])
ks2 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 4.0])
with pytest.raises(AssertionError):
ks1.merge(ks2)
ks1 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 3.0], threshold=0.1)
ks2 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 3.0], threshold=0.5)
with pytest.raises(AssertionError):
ks1.merge(ks2)
def test_column_kl_divergence_less_than_constraint_merge_same_values():
ks1 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 3.0])
ks2 = columnKLDivergenceLessThanConstraint([1.0, 2.0, 3.0])
merged = ks1.merge(ks2)
TEST_LOGGER.info(f"Serialize the merged parametrizedKSTestPValueGreaterThanConstraint:\n {merged.to_protobuf()}")
json_value = json.loads(message_to_json(merged.to_protobuf()))
print(json_value)
assert json_value["name"] == f"KL Divergence is less than 0.5"
assert json_value["op"] == Op.Name(Op.LT)
assert json_value["firstField"] == "kl_divergence"
assert pytest.approx(json_value["value"], 0.01) == 0.5
assert len(json_value["continuousDistribution"]["sketch"]["sketch"]) > 0
assert json_value["verbose"] is False
def test_serialization_deserialization_column_kl_divergence_less_than_constraint_discrete():
ks1 = columnKLDivergenceLessThanConstraint([1, 2, 3], threshold=0.15, verbose=True)
ks1.from_protobuf(ks1.to_protobuf())
json_value = json.loads(message_to_json(ks1.to_protobuf()))
TEST_LOGGER.info(f"Serialize columnKLDivergenceLessThanConstraint from deserialized representation:\n {ks1.to_protobuf()}")
assert json_value["name"] == f"KL Divergence is less than 0.15"
assert json_value["op"] == Op.Name(Op.LT)
assert json_value["firstField"] == "kl_divergence"
assert pytest.approx(json_value["value"], 0.01) == 0.15
assert len(json_value["discreteDistribution"]["frequentItems"]["items"]) == 3
assert json_value["discreteDistribution"]["totalCount"] == 3
assert json_value["verbose"] is True
def test_serialization_deserialization_column_kl_divergence_less_than_constraint_continuous():
ks1 = columnKLDivergenceLessThanConstraint([2.0, 2.0, 3.0], threshold=0.15, verbose=True)
ks1.from_protobuf(ks1.to_protobuf())
json_value = json.loads(message_to_json(ks1.to_protobuf()))
TEST_LOGGER.info(f"Serialize columnKLDivergenceLessThanConstraint from deserialized representation:\n {ks1.to_protobuf()}")
assert json_value["name"] == f"KL Divergence is less than 0.15"
assert json_value["op"] == Op.Name(Op.LT)
assert json_value["firstField"] == "kl_divergence"
assert pytest.approx(json_value["value"], 0.01) == 0.15
assert len(json_value["continuousDistribution"]["sketch"]["sketch"]) > 0
assert json_value["verbose"] is True
def test_column_kl_divergence_less_than_constraint_wrong_datatype():
with pytest.raises(TypeError):
columnKLDivergenceLessThanConstraint([1.0, "abc", 3], threshold=0.15, verbose=True)
with pytest.raises(TypeError):
columnKLDivergenceLessThanConstraint("abc", threshold=0.5, verbose=True)
with pytest.raises(TypeError):
columnKLDivergenceLessThanConstraint([1, 2, 3], threshold="1.2", verbose=True)
def test_chi_squared_test_p_value_greater_than_constraint_true(df_lending_club, local_config_path):
test_values = ["A"] * 6 + ["B"] * 13 + ["C"] * 25 + ["D"] * 3 + ["E"] + ["F"] * 2
kspval = columnChiSquaredTestPValueGreaterThanConstraint(test_values, p_value=0.1)
dc = DatasetConstraints(None, summary_constraints={"grade": [kspval]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint was successful
assert report[0][1][0][2] == 0
def test_chi_squared_test_p_value_greater_than_constraint_false(df_lending_club, local_config_path):
test_values = {"C": 1, "B": 5, "A": 2, "D": 18, "E": 32, "F": 1}
chi = columnChiSquaredTestPValueGreaterThanConstraint(test_values, p_value=0.05)
dc = DatasetConstraints(None, summary_constraints={"grade": [chi]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
report = profile.apply_summary_constraints()
# check if all of the rows have been reported
assert report[0][1][0][1] == 1
# check if the constraint failed
assert report[0][1][0][2] == 1
def test_chi_squared_test_p_value_greater_than_constraint_merge_different_values():
ks1 = columnChiSquaredTestPValueGreaterThanConstraint([1, 2, 3])
ks2 = columnChiSquaredTestPValueGreaterThanConstraint([1, 2, 4])
with pytest.raises(AssertionError):
ks1.merge(ks2)
ks1 = columnChiSquaredTestPValueGreaterThanConstraint([1, 2, 3], p_value=0.1)
ks2 = columnChiSquaredTestPValueGreaterThanConstraint([1, 2, 3], p_value=0.5)
with pytest.raises(AssertionError):
ks1.merge(ks2)
def test_column_chi_squared_test_p_value_greater_than_constraint_merge_same_values():
ks1 = columnChiSquaredTestPValueGreaterThanConstraint([1, 3, "A"])
ks2 = columnChiSquaredTestPValueGreaterThanConstraint([1, "A", 3])
merged = ks1.merge(ks2)
TEST_LOGGER.info(f"Serialize the merged columnChiSquaredTestPValueGreaterThanConstraint:\n {merged.to_protobuf()}")
json_value = json.loads(message_to_json(merged.to_protobuf()))
assert json_value["name"] == f"Chi-Squared test p-value is greater than 0.05"
assert json_value["op"] == Op.Name(Op.GT)
assert json_value["firstField"] == "chi_squared_test"
assert pytest.approx(json_value["value"], 0.01) == 0.05
assert len(json_value["discreteDistribution"]["frequentItems"]["items"]) == 3
assert json_value["verbose"] is False
def test_serialization_deserialization_chi_squared_test_p_value_greater_than_constraint():
ks1 = columnChiSquaredTestPValueGreaterThanConstraint([1, 2, "A", "B"], p_value=0.15, verbose=True)
ks1.from_protobuf(ks1.to_protobuf())
json_value = json.loads(message_to_json(ks1.to_protobuf()))
TEST_LOGGER.info(f"Serialize columnChiSquaredTestPValueGreaterThanConstraint from deserialized representation:\n {ks1.to_protobuf()}")
assert json_value["name"] == f"Chi-Squared test p-value is greater than 0.15"
assert json_value["op"] == Op.Name(Op.GT)
assert json_value["firstField"] == "chi_squared_test"
assert pytest.approx(json_value["value"], 0.01) == 0.15
assert len(json_value["discreteDistribution"]["frequentItems"]["items"]) == 4
assert json_value["verbose"] is True
def test_chi_squared_test_p_value_greater_than_constraint_wrong_datatype():
with pytest.raises(ValueError):
columnChiSquaredTestPValueGreaterThanConstraint([1.0, 2, 3], p_value=0.15, verbose=True)
with pytest.raises(TypeError):
columnChiSquaredTestPValueGreaterThanConstraint("abc", p_value=0.15, verbose=True)
with pytest.raises(ValueError):
columnChiSquaredTestPValueGreaterThanConstraint({"A": 0.3, "B": 1, "C": 12}, p_value=0.2, verbose=True)
with pytest.raises(TypeError):
columnChiSquaredTestPValueGreaterThanConstraint(["a", "b", "c"], p_value=1.2, verbose=True)
def test_generate_default_constraints_categorical(local_config_path):
usernames = ["jd123", "<EMAIL>", "bobsmith", "_anna_"]
emails = ["<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>"]
data = pd.DataFrame(
{
"username": usernames,
"email": emails,
}
)
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(data, "test.data")
generated_constraints = profile.generate_constraints()
json_summ = json.loads(message_to_json(generated_constraints.to_protobuf()))
constraints_username = json_summ["summaryConstraints"]["username"]["constraints"]
constraints_email = json_summ["summaryConstraints"]["email"]["constraints"]
# username constraints
assert len(constraints_username) == 3 # column value type equals, unique count between and most common value in set
assert constraints_username[0]["name"] == "type of the column values is STRING"
assert constraints_username[0]["firstField"] == "column_values_type"
assert constraints_username[0]["value"] == InferredType.STRING
assert constraints_username[0]["op"] == Op.Name(Op.EQ)
assert constraints_username[0]["verbose"] is False
# there are 4 unique values in the df for username, so the unique count between is in the range 4-1 and 4+1
assert constraints_username[1]["name"] == "number of unique values is between 3 and 5"
assert constraints_username[1]["firstField"] == "unique_count"
assert constraints_username[1]["op"] == Op.Name(Op.BTWN)
assert pytest.approx(constraints_username[1]["between"]["lowerValue"], 0.001) == 3
assert pytest.approx(constraints_username[1]["between"]["upperValue"], 0.001) == 5
assert constraints_username[1]["verbose"] is False
assert f"most common value is in" in constraints_username[2]["name"] # set has different order
assert constraints_username[2]["firstField"] == "most_common_value"
assert constraints_username[2]["op"] == Op.Name(Op.IN)
assert set(constraints_username[2]["referenceSet"]) == set(usernames)
assert constraints_username[2]["verbose"] is False
# email constraints
assert len(constraints_email) == 3 # column value type equals, unique count between and most common value in set
assert constraints_email[0]["name"] == "type of the column values is STRING"
assert constraints_email[0]["firstField"] == "column_values_type"
assert constraints_email[0]["value"] == InferredType.STRING
assert constraints_email[0]["op"] == Op.Name(Op.EQ)
assert constraints_email[0]["verbose"] is False
# there are 4 unique values in the df for username, so the unique count between is in the range 4-1 and 4+1
assert constraints_email[1]["name"] == "number of unique values is between 3 and 5"
assert constraints_email[1]["firstField"] == "unique_count"
assert constraints_email[1]["op"] == Op.Name(Op.BTWN)
assert pytest.approx(constraints_email[1]["between"]["lowerValue"], 0.001) == 3
assert pytest.approx(constraints_email[1]["between"]["upperValue"], 0.001) == 5
assert constraints_email[1]["verbose"] is False
assert f"most common value is in" in constraints_email[2]["name"] # set has different order
assert constraints_email[2]["firstField"] == "most_common_value"
assert constraints_email[2]["op"] == Op.Name(Op.IN)
assert set(constraints_email[2]["referenceSet"]) == set(emails)
assert constraints_email[2]["verbose"] is False
def test_generate_default_constraints_numeric(local_config_path):
data = pd.DataFrame(
{
"followers": [1525, 12268, 51343, 867, 567, 100265, 22113, 3412],
"points": [23.4, 123.2, 432.22, 32.1, 44.1, 42.2, 344.2, 42.1],
}
)
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(data, "test.data")
generated_constraints = profile.generate_constraints()
json_summ = json.loads(message_to_json(generated_constraints.to_protobuf()))
followers_constraints = json_summ["summaryConstraints"]["followers"]["constraints"]
points_constraints = json_summ["summaryConstraints"]["points"]["constraints"]
assert len(followers_constraints) == 5
# min greater than 0, mean between mean-stddev and mean+stddev,
# column values type, most common value in set, unique count between
followers_mean = data["followers"].mean()
followers_stddev = data["followers"].std()
lower_followers = followers_mean - followers_stddev
upper_followers = followers_mean + followers_stddev
assert followers_constraints[0]["name"] == "minimum is greater than or equal to 0"
assert followers_constraints[1]["name"] == f"mean is between {lower_followers} and {upper_followers}"
assert followers_constraints[2]["name"] == "type of the column values is INTEGRAL"
assert followers_constraints[3]["name"] == "number of unique values is between 7 and 9" # we have 8 unique values in the df
assert "most common value is in" in followers_constraints[4]["name"]
assert len(points_constraints) == 4
# min greater than 0, mean between mean-stddev and mean+stddev,
# column values type, most common value in set
points_mean = data["points"].mean()
points_stddev = data["points"].std()
lower_points = points_mean - points_stddev
upper_points = points_mean + points_stddev
assert points_constraints[0]["name"] == "minimum is greater than or equal to 0"
assert points_constraints[1]["name"] == f"mean is between {lower_points} and {upper_points}"
assert points_constraints[2]["name"] == "type of the column values is FRACTIONAL"
assert "most common value is in" in points_constraints[3]["name"]
def test_generate_default_constraints_mixed(local_config_path):
users = ["jd123", "<EMAIL>", "bobsmith", "_anna_"]
followers = [1525, 12268, 51343, 867]
data = pd.DataFrame({"username": users, "followers": followers, "null": [None, None, None, None]})
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(data, "test.data")
generated_constraints = profile.generate_constraints()
json_summ = json.loads(message_to_json(generated_constraints.to_protobuf()))
username_constraints = json_summ["summaryConstraints"]["username"]["constraints"]
followers_constraints = json_summ["summaryConstraints"]["followers"]["constraints"]
# no constraints should be generated for the null column since all values are None
assert "null" not in json_summ["summaryConstraints"]
assert len(username_constraints) == 3 # column value type equals, unique count between and most common value in set
assert username_constraints[0]["name"] == "type of the column values is STRING"
assert username_constraints[1]["name"] == "number of unique values is between 3 and 5" # we have 4 unique values in df
assert f"most common value is in" in username_constraints[2]["name"]
assert len(followers_constraints) == 5
# min greater than 0, mean between mean-stddev and mean+stddev,
# column values type, most common value in set, unique count between
followers_mean = data["followers"].mean()
followers_stddev = data["followers"].std()
lower_followers = followers_mean - followers_stddev
upper_followers = followers_mean + followers_stddev
assert followers_constraints[0]["name"] == "minimum is greater than or equal to 0"
assert followers_constraints[1]["name"] == f"mean is between {lower_followers} and {upper_followers}"
assert followers_constraints[2]["name"] == "type of the column values is INTEGRAL"
assert followers_constraints[3]["name"] == "number of unique values is between 3 and 5" # we have 4 unique values in the df
assert f"most common value is in" in followers_constraints[4]["name"]
def _apply_value_constraints_on_dataset(df_lending_club, local_config_path, value_constraints=None, multi_column_value_constraints=None):
dc = DatasetConstraints(None, value_constraints=value_constraints, multi_column_value_constraints=multi_column_value_constraints)
config = load_config(local_config_path)
session = session_from_config(config)
profile = session.log_dataframe(df_lending_club, "test.data", constraints=dc)
session.close()
return dc.report()
def test_sum_of_row_values_of_multiple_columns_constraint_apply(local_config_path, df_lending_club):
col_set = ["loan_amnt", "int_rate"]
srveq = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=col_set, value="total_pymnt", verbose=False)
multi_column_value_constraints = [srveq]
report = _apply_value_constraints_on_dataset(df_lending_club, local_config_path, multi_column_value_constraints=multi_column_value_constraints)
assert report[0][0] == f"The sum of the values of loan_amnt and int_rate is equal to the corresponding value of the column total_pymnt"
assert report[0][1] == 50
assert report[0][2] == 50
def test_sum_of_row_values_of_multiple_columns_constraint_apply_true(local_config_path):
colset = ["A", "B"]
srveq = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=colset, value=100, verbose=False)
dc = DatasetConstraints(None, multi_column_value_constraints=[srveq])
config = load_config(local_config_path)
session = session_from_config(config)
df = pd.DataFrame(
[
{"A": 1, "B": 2}, # fail
{"A": 99, "B": 1}, # pass
{"A": 32, "B": 68}, # pass
{"A": 100, "B": 2}, # fail
{"A": 83, "B": 18}, # fail
]
)
profile = session.log_dataframe(df, "test.data", constraints=dc)
session.close()
report = dc.report()
assert report[0][0] == f"The sum of the values of A and B is equal to 100"
assert report[0][1] == 5
assert report[0][2] == 3
def test_merge_sum_of_row_values_different_values():
cpvis1 = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=["annual_inc", "loan_amnt"], value="grade")
cpvis2 = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=["annual_inc", "total_pymnt"], value="grade")
cpvis3 = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=["annual_inc", "total_pymnt"], value="loan_amnt")
with pytest.raises(AssertionError):
cpvis1.merge(cpvis2)
with pytest.raises(AssertionError):
cpvis2.merge(cpvis3)
def test_merge_sum_of_row_values_constraint_valid():
col_set = ["loan_amnt", "int_rate"]
srveq1 = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=col_set, value="total_pymnt", verbose=False)
srveq1.total = 5
srveq1.failures = 1
srveq2 = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=col_set, value="total_pymnt", verbose=False)
srveq2.total = 3
srveq2.failures = 2
srveq_merged = srveq1.merge(srveq2)
json_value = json.loads(message_to_json(srveq_merged.to_protobuf()))
assert json_value["name"] == f"The sum of the values of loan_amnt and int_rate is equal to the corresponding value of the column total_pymnt"
assert json_value["dependentColumns"] == col_set
assert json_value["op"] == Op.Name(Op.EQ)
assert json_value["referenceColumns"][0] == "total_pymnt"
assert json_value["verbose"] is False
report = srveq_merged.report()
assert report[1] == 8
assert report[2] == 3
def test_serialization_deserialization_sum_of_row_values_constraint():
columns = ["A", "B", "C"]
c = sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=columns, value=6, verbose=True)
c.from_protobuf(c.to_protobuf())
json_value = json.loads(message_to_json(c.to_protobuf()))
assert json_value["name"] == f"The sum of the values of A, B and C is equal to 6"
assert json_value["dependentColumns"] == columns
assert json_value["op"] == Op.Name(Op.EQ)
assert pytest.approx(json_value["value"], 0.01) == 6
assert json_value["internalDependentColumnsOp"] == Op.Name(Op.SUM)
assert json_value["verbose"] is True
def test_sum_of_row_values_constraint_invalid_params():
with pytest.raises(TypeError):
sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=1, value="B")
with pytest.raises(TypeError):
sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=[1, 2], value="B")
with pytest.raises(TypeError):
sumOfRowValuesOfMultipleColumnsEqualsConstraint(columns=1, value=["b"])
def test_multi_column_value_constraints_logical_operation(local_config_path):
a_gt_b = columnValuesAGreaterThanBConstraint("col1", "col2")
df = | pd.DataFrame({"col1": [4, 5, 6, 7], "col2": [0, 1, 2, 3]}) | pandas.DataFrame |
import os
from warnings import warn
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from recipe_similarities.utils.clean_data import prepare_data
from recipe_similarities.config.defaults import raw_data_files
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
class SimilarityFactory:
"""
This class produces a hybrid similarity class. To enable it to use non-example data prodided
as a part of the exercise, you must specify the full explcit file paths in
recipes_info_file_path and similarity_score_file_path upon intiating the class.
Basic Usage:
from recipe_similarities.similarities import SimilarityFactory
sf = SimilarityFactory()
sf.load_data()
hsims = sf.hybrid_similarities()
:returns dict {'similarity_matrix': NxN matics as a numpy array
, 'index': Pandas Index object highlightin recipe IDs to cross reference with the similarity matrix}
"""
def __init__(self,
recipes_info_file_path=None,
similarity_score_file_path=None):
self.recipes_info_file_path = recipes_info_file_path
self.similarity_score_file_path = similarity_score_file_path
self.recipes_df = None
self.sim_scores_df = None
def load_data(self):
""" This function loads example data or that specifed in the path specified on class instantiation """
if self.recipes_info_file_path is None or self.similarity_score_file_path is None:
warn("You have not provided a recipes.csv AND a similarity_scores.csv. Default raw data will be used. ")
raw_data = raw_data_files()
self.recipes_info_file_path = os.path.join(BASE_DIR,
'data',
'raw_data',
raw_data['recipes_info'])
self.similarity_score_file_path = os.path.join(BASE_DIR,
'data',
'raw_data',
raw_data['similarity_scores'])
self.recipes_df, self.sim_scores_df = prepare_data(self.recipes_info_file_path,
self.similarity_score_file_path)
@staticmethod
def _prep_time_class(field):
"""
This function converts preparation time to a class
nb highly subjective, should ideally be validated by understanding customer perception
"""
if field <= 20:
return 'fast'
elif 20 < field <= 40:
return 'medium'
elif field > 40:
return 'slow'
def _recipes_prepare_data(self):
""" This function transforms recipe data for content based methods """
df = self.recipes_df.copy()
# enables simple indexing of recipes throughout processes
df.index = df.recipe_id
del df['recipe_id']
# removed as often duplicate info
del df['country_secondary']
# treat missing content as information
df.fillna('missing', inplace=True)
# update lalling for consitency and to simplfy string vectoriations
df['family_friendly'].replace(to_replace={'no': 'family unfriendly',
'yes': 'family friendly'},
inplace=True)
df['dish_category'].replace(to_replace={'protein&veg': 'protein & veg'},
inplace=True)
# simplfies prep time into a categry with lower cardinality
df['prep_time'] = df['prep_time'].apply(self._prep_time_class)
return df
def recipes_jaccard_similarities(self):
""" This function computes Jaccards similarity
uses numpy's matrix operations for fast computation of jaccards similarity"""
df = self._recipes_prepare_data()
a = df.values.copy()
b = df.values.copy()
all_recipes_by_n_recipes = np.repeat(a[np.newaxis, :, :],
a.shape[0],
axis=0)
all_recipes = b.reshape(b.shape[0],
1,
b.shape[1])
intersect = np.sum(all_recipes_by_n_recipes == all_recipes, axis=2)
union = np.sum(all_recipes_by_n_recipes != all_recipes, axis=2) * 2 + intersect
jaccard_sim = intersect / union
jaccard_sim_df = | pd.DataFrame(jaccard_sim, index=df.index, columns=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pre_deal,model
from mxnet import autograd
from mxnet import gluon
from mxnet import image
from mxnet import init
from mxnet import nd
from mxnet.gluon.data import vision
import numpy as np
from mxnet.gluon import nn
from matplotlib import pyplot as plt
from utils import Visualizer
train_dir = 'train'
test_dir = 'test'
batch_size = 128
data_dir = '/media/yijie/娱乐/tmp/kaggle_cifar10'
label_file = 'D:/dataset/gluon/train_valid_test/trainLabels.csv'
input_dir = 'D:/dataset/gluon/train_valid_test'
valid_ratio = 0.1
pre_deal_flag = True
vis = Visualizer(env='CIFAR10')
# sorting the dataset and transform
if not pre_deal_flag:
pre_deal.reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir, valid_ratio)
input_str = input_dir + '/'
# 读取原始图像文件。flag=1说明输入图像有三个通道(彩色)。
train_ds = vision.ImageFolderDataset(input_str + 'train', flag=1,
transform=pre_deal.transform_train)
valid_ds = vision.ImageFolderDataset(input_str + 'valid', flag=1,
transform=pre_deal.transform_test)
train_valid_ds = vision.ImageFolderDataset(input_str + 'train_valid',
flag=1, transform=pre_deal.transform_train)
test_ds = vision.ImageFolderDataset(input_str + 'test', flag=1,
transform=pre_deal.transform_test)
loader = gluon.data.DataLoader
train_data = loader(train_ds, batch_size, shuffle=True, last_batch='keep')
valid_data = loader(valid_ds, batch_size, shuffle=True, last_batch='keep')
train_valid_data = loader(train_valid_ds, batch_size, shuffle=True, last_batch='keep')
test_data = loader(test_ds, batch_size, shuffle=False, last_batch='keep')
# 交叉熵损失函数。
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
import datetime
import sys
sys.path.append('..')
import utils
def train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period, lr_decay):
trainer = gluon.Trainer(
net.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
prev_time = datetime.datetime.now()
plt_train_acc = []
plt_valid_acc = []
for epoch in range(num_epochs):
train_loss = 0.0
train_acc = 0.0
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
if epoch > 161 and epoch % 10 == 0:
trainer.set_learning_rate(trainer.learning_rate * 0.4)
for data, label in train_data:
label = label.as_in_context(ctx)
with autograd.record():
output = net(data.as_in_context(ctx))
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += utils.accuracy(output, label)
cur_time = datetime.datetime.now()
h, remainder = divmod((cur_time - prev_time).seconds, 3600)
m, s = divmod(remainder, 60)
time_str = "Time %02d:%02d:%02d" % (h, m, s)
if valid_data is not None:
valid_acc = utils.evaluate_accuracy(valid_data, net, ctx)
epoch_str = ("Epoch %d. Loss: %f, Train acc %f, Valid acc %f, "
% (epoch, train_loss / len(train_data),
train_acc / len(train_data), valid_acc))
plt_train_acc.append(train_acc / len(train_data))
plt_valid_acc.append(valid_acc)
else:
epoch_str = ("Epoch %d. Loss: %f, Train acc %f, "
% (epoch, train_loss / len(train_data),
train_acc / len(train_data)))
prev_time = cur_time
print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))
# plot
if valid_data is not None:
plt.plot(plt_train_acc)
plt.plot(plt_valid_acc)
plt.legend(['train_acc','test_acc'])
plt.savefig("Loss.png")
ctx = utils.try_gpu()
num_epochs = 200
learning_rate = 0.1
weight_decay = 5e-4
lr_period = 80
lr_decay = 0.1
net = model.get_net(ctx)
net.hybridize()
train(net, train_data, valid_data, num_epochs, learning_rate,
weight_decay, ctx, lr_period, lr_decay)
import numpy as np
import pandas as pd
net = model.get_net(ctx)
net.hybridize()
train(net, train_valid_data, None, num_epochs, learning_rate,
weight_decay, ctx, lr_period, lr_decay)
preds = []
for data, label in test_data:
output = net(data.as_in_context(ctx))
preds.extend(output.argmax(axis=1).astype(int).asnumpy())
sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key = lambda x:str(x))
df = | pd.DataFrame({'id': sorted_ids, 'label': preds}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
pdt.assert_frame_equal(meta_partition.data["core"], df)
assert meta_partition.metadata_version == DEFAULT_METADATA_VERSION
def test_eq():
df = pd.DataFrame({"a": [1]})
df_same = pd.DataFrame({"a": [1]})
df_other = pd.DataFrame({"a": [2]})
df_diff_col = pd.DataFrame({"b": [1]})
df_diff_type = pd.DataFrame({"b": [1.0]})
meta_partition = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df}}
)
assert meta_partition == meta_partition
meta_partition_same = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_same}}
)
assert meta_partition == meta_partition_same
meta_partition_diff_label = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}}
)
assert meta_partition != meta_partition_diff_label
assert meta_partition_diff_label != meta_partition
meta_partition_diff_files = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}, "files": {"core": "something"}}
)
assert meta_partition != meta_partition_diff_files
assert meta_partition_diff_files != meta_partition
meta_partition_diff_col = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_col}}
)
assert meta_partition != meta_partition_diff_col
assert meta_partition_diff_col != meta_partition
meta_partition_diff_type = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_type}}
)
assert meta_partition != meta_partition_diff_type
assert meta_partition_diff_type != meta_partition
meta_partition_diff_metadata = MetaPartition.from_dict(
{
"label": "test_label",
"data": {"core": df_diff_type},
"dataset_metadata": {"some": "metadata"},
}
)
assert meta_partition != meta_partition_diff_metadata
assert meta_partition_diff_metadata != meta_partition
meta_partition_different_df = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_other}}
)
assert not meta_partition == meta_partition_different_df
meta_partition_different_label = MetaPartition.from_dict(
{"label": "test_label", "data": {"not_core": df_same}}
)
assert not meta_partition == meta_partition_different_label
meta_partition_empty_data = MetaPartition.from_dict(
{"label": "test_label", "data": {}}
)
assert meta_partition_empty_data == meta_partition_empty_data
meta_partition_more_data = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df, "not_core": df}}
)
assert not (meta_partition == meta_partition_more_data)
assert not meta_partition == "abc"
def test_add_nested_to_plain():
mp = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
to_nest = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mp_nested = to_nest[0].add_metapartition(to_nest[1])
mp_add_nested = mp.add_metapartition(mp_nested)
mp_iter = mp.add_metapartition(to_nest[0]).add_metapartition(to_nest[1])
assert mp_add_nested == mp_iter
def test_add_nested_to_nested():
mps1 = [
MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
MetaPartition(
label="label_33",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
]
mpn_1 = mps1[0].add_metapartition(mps1[1])
mps2 = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mpn_2 = mps2[0].add_metapartition(mps2[1])
mp_nested_merge = mpn_1.add_metapartition(mpn_2)
mp_iter = mps1.pop()
for mp_ in [*mps1, *mps2]:
mp_iter = mp_iter.add_metapartition(mp_)
assert mp_nested_merge == mp_iter
def test_eq_nested():
mp_1 = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
)
mp = mp_1.add_metapartition(mp_2)
assert mp == mp
assert mp != mp_2
assert mp_2 != mp
mp_other = MetaPartition(
label="label_3", data={"core": pd.DataFrame({"test": [4, 5, 6]})}
)
mp_other = mp_1.add_metapartition(mp_other)
assert mp != mp_other
assert mp_other != mp
def test_nested_incompatible_meta():
mp = MetaPartition(
label="label_1",
data={"core": pd.DataFrame({"test": np.array([1, 2, 3], dtype=np.int8)})},
metadata_version=4,
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": np.array([4, 5, 6], dtype=np.float64)})},
metadata_version=4,
)
with pytest.raises(ValueError):
mp.add_metapartition(mp_2)
def test_concatenate_no_change():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes()
assert result == meta_partition
def test_concatenate_identical_col_df():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"first_1": pd.DataFrame({"A": [2], "B": [2]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes().data
assert len(result) == 2
assert "first" in result
first_expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]})
pdt.assert_frame_equal(result["first"], first_expected)
assert "second" in result
first_expected = pd.DataFrame({"A": [3], "B": [3], "C": [3]})
pdt.assert_frame_equal(result["second"], first_expected)
def test_concatenate_identical_col_df_naming():
input_dct = {
"some": pd.DataFrame({"A": [1], "B": [1]}),
"name": pd.DataFrame({"A": [2], "B": [2]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes().data
assert len(result) == 2
assert "some_name" in result
first_expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]})
pdt.assert_frame_equal(result["some_name"], first_expected)
assert "second" in result
first_expected = pd.DataFrame({"A": [3], "B": [3], "C": [3]})
pdt.assert_frame_equal(result["second"], first_expected)
def test_unique_label():
label_list = ["first_0", "first_1"]
assert _unique_label(label_list) == "first"
label_list = ["test_0", "test_1"]
assert _unique_label(label_list) == "test"
label_list = ["test_0", "test_1", "test_2"]
assert _unique_label(label_list) == "test"
label_list = ["something", "else"]
assert _unique_label(label_list) == "something_else"
def test_merge_dataframes():
df_core = pd.DataFrame(
{
"P": [1, 1, 1, 1, 3],
"L": [1, 2, 1, 2, 3],
"C": [1, 1, 2, 2, 3],
"TARGET": [1, 2, 3, 4, -1],
"info": ["a", "b", "c", "d", "e"],
}
)
df_preds = pd.DataFrame(
{
"P": [1, 1, 1, 1],
"L": [1, 2, 1, 2],
"C": [1, 1, 2, 2],
"PRED": [11, 22, 33, 44],
"HORIZONS": [1, 1, 2, 2],
}
)
mp = MetaPartition(label="part_label", data={"core": df_core, "pred": df_preds})
mp = mp.merge_dataframes(left="core", right="pred", output_label="merged")
assert len(mp.data) == 1
df_result = mp.data["merged"]
df_expected = pd.DataFrame(
{
"P": [1, 1, 1, 1],
"L": [1, 2, 1, 2],
"C": [1, 1, 2, 2],
"PRED": [11, 22, 33, 44],
"TARGET": [1, 2, 3, 4],
"info": ["a", "b", "c", "d"],
"HORIZONS": [1, 1, 2, 2],
}
)
pdt.assert_frame_equal(df_expected, df_result, check_like=True)
def test_merge_dataframes_kwargs():
df_core = pd.DataFrame(
{
"P": [1, 1, 1, 1, 3],
"L": [1, 2, 1, 2, 3],
"C": [1, 1, 2, 2, 3],
"TARGET": [1, 2, 3, 4, -1],
"info": ["a", "b", "c", "d", "e"],
}
)
df_preds = pd.DataFrame(
{
"P": [1, 1, 1, 1],
"L": [1, 2, 1, 2],
"C": [1, 1, 2, 2],
"PRED": [11, 22, 33, 44],
"HORIZONS": [1, 1, 2, 2],
}
)
mp = MetaPartition(label="part_label", data={"core": df_core, "pred": df_preds})
mp = mp.merge_dataframes(
left="core", right="pred", output_label="merged", merge_kwargs={"how": "left"}
)
assert len(mp.data) == 1
df_result = mp.data["merged"]
df_expected = pd.DataFrame(
{
"P": [1, 1, 1, 1, 3],
"L": [1, 2, 1, 2, 3],
"C": [1, 1, 2, 2, 3],
"TARGET": [1, 2, 3, 4, -1],
"info": ["a", "b", "c", "d", "e"],
"PRED": [11, 22, 33, 44, np.NaN],
"HORIZONS": [1, 1, 2, 2, np.NaN],
}
)
pdt.assert_frame_equal(df_expected, df_result, check_like=True)
def test_merge_indices():
indices = [
MetaPartition(
label="label1",
indices={"location": {"Loc1": ["label1"], "Loc2": ["label1"]}},
),
MetaPartition(
label="label2",
indices={
"location": {"Loc3": ["label2"], "Loc2": ["label2"]},
"product": {"Product1": ["label2"], "Product2": ["label2"]},
},
),
]
result = MetaPartition.merge_indices(indices)
expected = {
"location": ExplicitSecondaryIndex(
"location",
{"Loc1": ["label1"], "Loc2": ["label1", "label2"], "Loc3": ["label2"]},
),
"product": ExplicitSecondaryIndex(
"product", {"Product1": ["label2"], "Product2": ["label2"]}
),
}
assert result == expected
def test_build_indices():
columns = ["location", "product"]
df = pd.DataFrame(
OrderedDict(
[("location", ["Loc1", "Loc2"]), ("product", ["Product1", "Product2"])]
)
)
mp = MetaPartition(label="partition_label", data={"core": df})
result_mp = mp.build_indices(columns)
result = result_mp.indices
loc_index = ExplicitSecondaryIndex(
"location", {"Loc1": ["partition_label"], "Loc2": ["partition_label"]}
)
prod_index = ExplicitSecondaryIndex(
"product", {"Product1": ["partition_label"], "Product2": ["partition_label"]}
)
assert result["location"] == loc_index
assert result["product"] == prod_index
def test_add_metapartition():
mp = MetaPartition(
label="label_1",
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
)
new_mp = mp.add_metapartition(mp_2)
# Cannot access single object attributes
with pytest.raises(AttributeError):
new_mp.indices
with pytest.raises(AttributeError):
new_mp.label
with pytest.raises(AttributeError):
new_mp.data
with pytest.raises(AttributeError):
new_mp.files
with pytest.raises(AttributeError):
new_mp.indices
with pytest.raises(AttributeError):
new_mp.indices
partition_list = new_mp.metapartitions
assert len(partition_list) == 2
first_mp = partition_list[0]
assert first_mp["label"] == "label_1"
assert first_mp["indices"] == {"test": [1, 2, 3]}
first_mp = partition_list[1]
assert first_mp["label"] == "label_2"
assert first_mp["indices"] == {"test": [4, 5, 6]}
# This tests whether it is possible to add to an already nested MetaPartition
mp_3 = MetaPartition(
label="label_3",
data={"core": | pd.DataFrame({"test": [7, 8, 9]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from sklearn.metrics import mean_squared_error
from math import sqrt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
# 1. 抽取2012年8月至2013年12月的数据,总共14个月
# Index 11856 marks the end of year 2013
df = pd.read_csv("./jetrail_train.csv", nrows=11856)
print("===== df.head():\n", df.head())
print("===== df.tail():\n", df.tail())
# 2. 构建模型的训练数据和测试数据,前12个月(2012年8月到2013年10月)作为训练数据,后两个月的数据作为预测数据(2013-11 2013-12)
#Index 10392 marks the end of October 2013
train=df[0:10392] #train data : 2012-08 ~ 2013-10
test=df[10392:] #test data : 2013-11 2013-12
print("===== train data:", train)
print("===== test data:", test)
# 3. 聚合数据到天级别
# 抽取的14个月的数据按天进行聚合(取平均)
# D - day, 可选参数: 表示重采样频率,例如‘M’、‘5min’,Second(15)
# mean() : 聚合函数 - 取平均
# df.Datetime : 01-11-2013 01:00
df['Timestamp'] = pd.to_datetime(df.Datetime,format='%d-%m-%Y %H:%M')
print("===== df.Timestamp:\n", df['Timestamp'])
df.index = df.Timestamp
print("===== df.index:\n", df.index)
df = df.resample('D').mean()
print("===== df:", df)
#output: 2012-08-25 11.5 3.166667
# 训练数据按天进行聚合
train['Timestamp'] = pd.to_datetime(train.Datetime,format='%d-%m-%Y %H:%M')
train.index = train.Timestamp
train = train.resample('D').mean()
print("===== train.resample('D').mean():", train)
# 测试数据按天进行聚合
test['Timestamp'] = | pd.to_datetime(test.Datetime,format='%d-%m-%Y %H:%M') | pandas.to_datetime |
from pathlib import Path
from pandas.core.frame import DataFrame
import pytest
import pandas as pd
import datetime
from data_check import DataCheck # noqa E402
from data_check.config import DataCheckConfig # noqa E402
# These tests should work on any database.
# The tests are generic, but in integration tests each database uses specific SQL files.
@pytest.fixture
def dc() -> DataCheck:
config = DataCheckConfig().load_config().set_connection("test")
config.parallel_workers = 1
_dc = DataCheck(config)
_dc.load_template()
_dc.output.configure_output(
verbose=True,
traceback=True,
print_failed=True,
print_format="json",
)
return _dc
@pytest.fixture
def data_types_check(dc: DataCheck):
res = dc.get_check(Path("checks/basic/data_types.sql")).run_test(return_all=True)
assert isinstance(res.result, DataFrame)
assert not res.result.empty
return res.result.iloc[0]
def test_data_types_string(data_types_check):
assert data_types_check.string_test == "string"
def test_data_types_int(data_types_check):
assert data_types_check.int_test == 42
def test_data_types_float(data_types_check):
assert data_types_check.float_test == 42.1
def test_data_types_date(data_types_check):
assert data_types_check.date_test == datetime.datetime(2020, 12, 20)
def test_data_types_huge_date(data_types_check):
assert data_types_check.inf_date_test == datetime.datetime(9999, 12, 31)
def test_data_types_null(data_types_check):
assert | pd.isna(data_types_check.null_test) | pandas.isna |
import pandas as pd
from calendar import monthrange
from datetime import date, datetime
from argparse import ArgumentParser
PROJECT_TASK_CSVFILE = "project_task.csv"
def main():
parser = ArgumentParser()
parser.add_argument('year', type=int, help='year')
parser.add_argument('month', type=int, help='month')
args = parser.parse_args()
year = args.year
month = args.month
df_project = | pd.read_csv(PROJECT_TASK_CSVFILE) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combine_first(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_corr(self):
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if int(scipy.__version__.split('.')[1]) < 9:
raise nose.SkipTest
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std()**2)
# partial overlap
self.assertAlmostEqual(self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std()**2)
# No overlap
self.assert_(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.cov(cp)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_value_counts_nunique(self):
s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a'])
hist = s.value_counts()
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
assert_series_equal(hist, expected)
self.assertEquals(s.nunique(), 4)
# handle NA's properly
s[5:7] = np.nan
hist = s.value_counts()
expected = s.dropna().value_counts()
assert_series_equal(hist, expected)
s = Series({})
hist = s.value_counts()
expected = Series([])
assert_series_equal(hist, expected)
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
def test_sort_index(self):
import random
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assert_(np.isnan(result[-5:]).all())
self.assert_(np.array_equal(result[:-5], np.sort(vals[5:])))
result = ts.order(na_last=False)
self.assert_(np.isnan(result[:5]).all())
self.assert_(np.array_equal(result[5:], np.sort(vals[5:])))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
# ascending=False
ordered = ts.order(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.order(ascending=False, na_last=False)
assert_almost_equal(expected, ordered.valid().values)
def test_rank(self):
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
exp = rankdata(filled)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
def test_from_csv(self):
self.ts.to_csv('_foo')
ts = Series.from_csv('_foo')
assert_series_equal(self.ts, ts)
self.series.to_csv('_foo')
series = Series.from_csv('_foo')
assert_series_equal(self.series, series)
outfile = open('_foo', 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv('_foo',sep='|')
checkseries = Series({datetime(1998,1,1): 1.0, datetime(1999,1,1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv('_foo',sep='|',parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
os.remove('_foo')
def test_to_csv(self):
self.ts.to_csv('_foo')
lines = open('_foo', 'U').readlines()
assert(lines[1] != '\n')
os.remove('_foo')
def test_to_dict(self):
self.assert_(np.array_equal(Series(self.ts.to_dict()), self.ts))
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
self.assert_(isinstance(expected, Series))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
| tm.assert_dict_equal(result, ts, compare_keys=False) | pandas.util.testing.assert_dict_equal |
import csv
import re
import string
import math
import warnings
import pandas as pd
import numpy as np
import ipywidgets as wg
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mtick
from itertools import product
from scipy.optimize import curve_fit
from plate_mapping import plate_mapping as pm
# define custom errors
class DataError(Exception):
pass
class PlateSizeError(Exception):
pass
class DataTypeError(Exception):
pass
# define well plate dimensions
plate_dim = {96:(8, 12), 384:(16, 24)}
# define header names for platemapping module
pm.header_names = {'Well ID': {'dtype':str, 'long':True, 'short_row': False, 'short_col':False},
'Type': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Contents': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Concentration Units':{'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
}
class FA:
"""Class used for the analysis of fluorescence anisotropy data.
:param data_dict: A dictionary contaning data frames with pre-processed data and metadata
:type data_dict: dict
:param g_factor: G-factor
:type g_factor: float
:param plate_map: dataframe from a plate map csv file that defines each and every well
:type plate_map: pandas df"""
def __init__(self, data_dict, g_factor, plate_map):
self.data_dict = data_dict
self.g_factor = g_factor
self.plate_map = plate_map
# create list of all p and s data frames to run some stats
frames = []
for repeat in self.data_dict.values():
metadata, data = repeat.values()
p_channel, s_channel = data.values()
frames.append(p_channel)
frames.append(s_channel)
new = pd.concat(frames, axis=1) # join all p and s data frames into one df
nan = new.size - new.describe().loc['count'].sum() # find number of 'nan' cells
# create a data frame to store the final fitting parameters
p_names = self.plate_map['Protein Name'].dropna().unique() # get all protein names
t_names = self.plate_map['Tracer Name'].dropna().unique() # get all trcaer names
final_fit = pd.DataFrame(index=pd.MultiIndex.from_product([p_names, t_names]),
columns=['rmin', 'rmin error', 'rmax', 'rmax error', 'lambda', 'Kd', 'Kd error'])
final_fit["lambda"] = 1 # set the default lambda value as 1
FA.final_fit = final_fit # add the final_fit df as a class vriable
print("Data has been uploaded!\n")
print(f"Number of repeats: {len(self.data_dict)} \nValue of g-factor: {self.g_factor} \nOverall number of empty cells is {int(nan)} in {len(frames)} data frames.\nProteins: {p_names}\nTracers: {t_names}")
@classmethod
def read_in_envision(cls, data_csv, platemap_csv, data_type='plate', size=384):
"""Reads in the raw data from csv file along with a platemap and constructs the FA class boject.
:param data_csv: File path of the raw data file in .csv format.
:type data_csv: str
:param platemap_csv: File path of the platemap file in .csv format.
:type platemap_csv: str
:param data_type: Format in which the raw data was exported (plate or list), defaults to plate.
:type data_type: str
:param size: Size of the well plate (384 or 96), defaults to 384.
:type size: int
:return: A dictionary contaning data frames with pre-processed data, g-factor, data frame containing platemap.
:rtype: dict, float, pandas df """
# ensure the plate size is either 384 or 96
if size not in plate_dim:
raise PlateSizeError('Invalid size of the well plate, should be 384 or 96.')
# try to read in data in plate format
if data_type == 'plate':
try:
data_dict, g_factor = FA._read_in_plate(data_csv, size=size)
plate_map_df = pm.plate_map(platemap_csv, size=size)
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError, ValueError):
raise DataError(f"Error occured during data read in. Check your file contains data in the 'plate' format and plate size is {size}.")
# try to read in data in list format
if data_type == 'list':
try:
data_dict, g_factor = FA._read_in_list(data_csv, size=size)
plate_map_df = pm.plate_map(platemap_csv, size=size)
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError):
raise DataError("Error occured during data read in. Check your file contains data in the 'list' format.")
else:
raise DataTypeError(f"'{data_type}' is not one of the two valid data types: plate or list.")
def _read_in_plate(csv_file, size):
"""Reads the raw data file and finds the information needed to extract data. Passes those parameters to pre_process_plate function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: tuple """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indices of all blank rows
if blank_indexes == []:
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''})
blanks = np.array(blank_indexes) # convert the list of blank indices to a numpy array
read_in_info = [] # list to store the tuples with parameters needed for pandas to read in the csv file
for index, item in enumerate(all_data_lines): # iterate over all lines in the csv file
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) == None and re.findall(r"Formula", all_data_lines[index+1][10]) != ['Formula']:
skiprows = index + 9 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows + 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to the list as a tuple
data_format = 'plate1'
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) != None:
skiprows = index + 10
skiprows_meta = index + 1
end_of_data = blanks[blanks > skiprows].min()
read_in_info.append((skiprows, end_of_data - skiprows - 1, skiprows_meta))
data_format = 'plate2'
if item != [] and len(item) > 1 and re.fullmatch(r"G-factor", item[0]):
g_factor = float(item[4])
return FA._pre_process_plate(csv_file, read_in_info, data_format, size), g_factor
def _pre_process_plate(csv_file, read_in_info, data_format, size):
"""Extracts the data and metadata from the csv file, processes it and returns a nested dictionary containing data and metadata for each repeat and channel.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param read_in_info: Tuples with read in parameters for each channel.
:type read_in_info: list
:param data_format: Plate type (plate1 or plate2)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict """
data_frames = {} # dictionary to store data frames
counter = 1 # counter incremented by 0.5 to enable alternating labelling of data frames as 'p' or 's'
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # generate a list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # generate a list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # generate a list of well IDs for the pre-processed data frames
for index, item in enumerate(read_in_info): # iterate over all tuples in the list, each tuple contains skiprows, nrows and skiprows_meta for one channel
if data_format == 'plate1': # raw data table does not have row and column names so 'names' parameter passed to omit the last column
raw_data = pd.read_csv(csv_file, sep=',', names=col_numbers, index_col=False, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if data_format == 'plate2': # raw data table has row and column names, so index_col=0 to set the first column as row labels
raw_data = pd.read_csv(csv_file, sep=',', index_col=0, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if len(raw_data.columns) in [13, 25]:
raw_data.drop(raw_data.columns[-1], axis=1, inplace=True) # delete the last column because it is empty
# generate df for metadata (number of rows of metadata table is always 1) and convert measurement time into datetime object
metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=item[2], nrows=1, encoding='utf-8').astype({'Measurement date': 'datetime64[ns]'})
# convert and reshape data frame into 1D array
data_as_array = np.reshape(raw_data.to_numpy(), (int(size), 1))
if counter % 1 == 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['p']) # generate new 384 (or 96) by 1 data frame with p channel data
data_frames[f'repeat_{int(counter)}'] = {'metadata':metadata, 'data': {'p': new_data, 's':''}} # add p channel data and metadata dfs to dictionary
if counter % 1 != 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['s']) # generate new 384 (or 96) by 1 data frame with s channel data
data_frames[f'repeat_{int(counter-0.5)}']['data']['s'] = new_data # add s channel data to dictionary
counter = counter + 0.5
return data_frames
def _read_in_list(csv_file, size):
"""Reads the raw data file and extracts the data and metadata. Passes the raw data to pre_process_list function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: tuple """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == [] or set(item) == {''}) # list containing indexes of all blank rows
blanks = np.array(blank_indexes) # convert the list of blank indexes to a numpy array
# iterate over all lines to find beggining of the data table ('skiprows') and determine the format of data (list A, B, or C)
for index, item in enumerate(all_data_lines):
if item != [] and len(item) == 1 and re.findall(r"Plate information", item[0]) == ["Plate information"]:
skiprows_meta = index + 1
end_of_metadata = blanks[blanks > skiprows_meta].min() # find the end of metadata by finding the smallest blank index after the beginning of metadata
if item != [] and len(item) >= 2 and re.findall(r"PlateNumber", item[0]) == ['PlateNumber'] and re.findall(r"PlateRepeat", item[1]) == ['PlateRepeat']: # find line number with the beggining of the data
skiprows = index - 1
data_format = 'listA'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Barcode", item[1]) == ['Barcode']: # find line number with the beggining of the data
skiprows = index
data_format = 'listB'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Well", item[1]) == ['Well']:
skiprows = index
data_format = 'listC'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and re.fullmatch(r"G-factor", item[0]): # find the g factor
g_factor = float(item[4])
nrows = end_of_data - skiprows - 1 # calculate the length of data table
nrows_meta = end_of_metadata - skiprows_meta - 1 # calucalte the length of metadata table (number of rows depends on the number of repeats)
raw_data = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows, nrows=nrows, encoding='utf-8')
raw_metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows_meta, nrows=nrows_meta, encoding='utf-8')
return FA._pre_process_list(raw_data, raw_metadata, data_format, size), g_factor
def _pre_process_list(raw_data, raw_metadata, data_format, size):
"""Extracts the data and metadata for each channel and repeat from the raw data and raw metadata
and returns a nested dictionary containing data and metadata for each repeat and channel.
:param raw_data: Data frame containing raw data
:type raw_data: pandas data frame
:param raw_metadata: Data frame containing raw metadata
:type raw_metadata: pandas data frame
:param data_format: Type of list (listA, listB, or listC)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict"""
# remove the '0' from middle position of well numbers (A01 -> A1), done by reassigning the 'Well' column to a Series containing modified well numbers
raw_data['Well'] = raw_data['Well'].apply(lambda x: x[0] + x[2] if x[1] == '0' else x)
data_frames = {} # dictionary to store data frames
repeats = list(raw_metadata['Repeat'].to_numpy()) # generate a list with repeats based on the metadata table, e.g. for 3 repeats -> [1,2,3]
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # generate a list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # generate a list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # generate a list of well IDs for the pre-processed data frames
for index, repeat in enumerate(repeats): # iterate over the number of repeats
if data_format == 'listA':
groupped_data = raw_data.groupby(raw_data.PlateRepeat).get_group(repeat) # group and extract the data by the plate repeat column, i.e. in each iteration get data only for the current repeat
p_groupped = groupped_data.iloc[::3, :] # extract data only for the p channel, i.e. each third row starting from the first row
s_groupped = groupped_data.iloc[1::3, :] # extract data only for the s channel, i.e. each third row starting from the second row
p_raw_data = p_groupped[['Well', 'Signal']] # extract only the two relevant columns
s_raw_data = s_groupped[['Well', 'Signal']] # for each channel
if data_format in ['listB', 'listC']:
# the column naming is different for the first repeat ('Signal'), then it's 'Signal.1', 'Signal.2', etc.
if repeat == 1:
p_raw_data = raw_data[['Well', 'Signal']]
s_raw_data = raw_data[['Well', f'Signal.{repeat}']]
else:
p_raw_data = raw_data[['Well', f'Signal.{repeat + index - 1}']] # the column cotntaining data to be extracted is calculated in each iteration
s_raw_data = raw_data[['Well', f'Signal.{repeat + index}']]
# create an empty df with no columns and indexes matching the plate size
indexes = pd.DataFrame(well_ids, columns=['Wells'])
empty_frame = indexes.set_index('Wells')
p_raw_data.set_index('Well', inplace=True) # set the row indexes as the well numbers
p_raw_data.set_axis(['p'], axis=1, inplace=True) # rename the 'Signal' column to 'p'
p_data = empty_frame.join(p_raw_data) # join the raw data df to an empty frame based on the indexes, assigns 'NaN' to indexes not present in the raw data table
s_raw_data.set_index('Well', inplace=True)
s_raw_data.set_axis(['s'], axis=1, inplace=True)
s_data = empty_frame.join(s_raw_data)
metadata = raw_metadata.iloc[[repeat-1]].astype({'Measurement date': 'datetime64[ns]'}) # extract the row with metadata relevant for each repeat and covert date and time into a datetime object
data_frames[f'repeat_{repeat}'] = {'metadata': metadata, 'data': {'p': p_data, 's': s_data}} # add data frames to the dictionary
return data_frames
def visualise(self, colorby='Type', labelby='Type', title="", cmap='Paired', dpi=250, export=False):
"""Returns a visual representation of the plate map.
The label and colour for each well can be customised to be a platemap variable, for example 'Type', 'Protein Name', 'Protein Concentration', etc.
It can also be the p or s channel value, calculated anisotropy or intensity, however in such cases the 'colorby' or 'labelby'
parameters must be passed as tuple of two strings specifying the repeat number and variable to display, for example ('repeat_2', 'p_corrected').
:param colorby: Variable to color code by, for example 'Type', 'Contents', 'Protein Concentration', ('repeat_2', 'p'), defaults to 'Type'.
:type colorby: str or tuple
:param labelby: Variable to display on the wells, for example 'Type', 'Protein Name', ('repeat_1', 's_corrected'), defaults to 'Type'.
:type labelby: str or tuple
:param title: Sets the title of the figure, defaults to None.
:type title: str
:param cmap: Sets the colormap for the color-coding, defaults to 'Paired'.
:type cmap: str
:param dpi: Resolution of the exported figure in points per inches, defaults to 250.
:type dpi: int
:param export: If True, save the figure as .png file, defaults to False.
:type export: bool
:return: Visual representation of the plate map.
:rtype: figure
"""
plate_map = self.plate_map
size = plate_map.shape[0]
scinot = False
str_len = None
if type(labelby) == tuple: # option for labelling by the p or s anisotropy values
plate_map = self.plate_map.join(self.data_dict[labelby[0]]['data'][labelby[1]]) # data frame containing p or s values from specified repeat is added to the platemap
labelby = labelby[1]
if type(colorby) == tuple: # option for colouring by the p or s anisotropy values
plate_map = plate_map.join(self.data_dict[colorby[0]]['data'][colorby[1]])
colorby = colorby[1]
if labelby in ['Protein Concentration', 'Tracer Concentration', 'Competitor Concentration', 'p', 's', 'p_corrected', 's_corrected', 'r_raw', 'r_corrected', 'i_raw' , 'i_corrected']:
if sum((plate_map[labelby] > 1000) | (plate_map[labelby] < 0)) > 0: # display in sci notation if the number is greater than 1000 or less than 0
scinot = True
str_len = 8
return pm.visualise(platemap=plate_map, title=title, size=size, export=export, cmap=cmap, colorby=colorby, labelby=labelby, dpi=dpi, scinot=scinot, str_len=str_len)
def invalidate(self, valid=False, **kwargs):
"""Invalidates wells, entire columns and/or rows. Any of the following keyword arguments, or their combination,
can be passed: wells, rows, columns. For example, to invalidate well A1, rows C and D and columns 7 and 8 execute
the following: invalidate(wells='A1', rows=['C','D'], columns=[7,8]).
To validate previously invalidated wells, rows and/or columns, pass the additional 'valid' argument as True.
:param valid: Sets the stipulated well, row or column invalid ('False') or valid ('True'), defaults to False.
:type valid: bool
:param wells: Wells to be invalidated passed as a string or list of strings.
:type wells: str or list
:param rows: Rows to be invalidated passed as a string or list of strings.
:type rows: str or list
:param columns: Columns to be invalidated passed as an integer or list of integers.
:type columns: int or list
"""
# execute the corresponding invalidate functon from the platemapping package
if 'wells' in kwargs:
pm.invalidate_wells(platemap=self.plate_map, wells=kwargs['wells'], valid=valid)
if 'rows' in kwargs:
rows = tuple(kwargs['rows']) # convert the rows to tuple because invalidate_rows cannot take in a list
pm.invalidate_rows(platemap=self.plate_map, rows=rows, valid=valid)
if 'columns' in kwargs:
pm.invalidate_cols(platemap=self.plate_map, cols=kwargs['columns'], valid=valid)
if len(kwargs) == 0: # return error if neither of the keyword arguments is passed
raise TypeError('No arguments were passed. Specify the wells, rows and/or columns to be invalidated!')
def background_correct(self):
"""Calculates background corrected values for p and s channel in all repeats.
The backgorund correction is done by subtracting the mean value of blank p (or s) channel intensity for a given
protein (or tracer) concentration from each non-blank value of the p (or s) channel intensity for that concentration.
"""
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for key, value in self.data_dict.items():
metadata, data = value.values()
# calculate p and s corrected data frame using _background_correct func and add it to data dictionary
self.data_dict[key]['data']['p_corrected'] = FA._background_correct(data['p'], self.plate_map, t_type)
self.data_dict[key]['data']['s_corrected'] = FA._background_correct(data['s'], self.plate_map, t_type)
print('Background correction has been successfully performed!')
def _background_correct(data, platemap, t_type):
"""Calculate background corrected p or s channel values for protein or tracer titration.
:param data: Data frame with raw p or s channel values
:type data: pandas df
:param platemap: Data frame with platemap
:type platemap: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:return: Data frame with background corrected values
:rtype: pandas df
"""
df = platemap.join(data) # join p or s channel data to platemap
df[df.columns[-1]] = df[df.columns[-1]][df['Valid'] == True] # replace 'p' or 's' values with NaN if the well is invalidated
col_name = df.columns[-1] + '_corrected'
no_index = df.reset_index() # move the 'well id' index to df column
mindex = pd.MultiIndex.from_frame(no_index[['Type', f'{t_type} Name', f'{t_type} Concentration']]) # create multiindex
reindexed = no_index.set_index(mindex).drop(['Type', f'{t_type} Name', f'{t_type} Concentration'], axis=1) # add multiindex to df and drop the columns from which multiindex was created
mean = reindexed.groupby(level=[0,1,2]).mean().drop('Valid', axis=1) # calculate mean for each group of three wells and remove 'Valid' column
mean.rename(columns={mean.columns[-1]: 'Mean'}, inplace=True) # rename the last column to 'Mean' to avoid errors during joining
blank = mean.xs('blank', level=0, drop_level=True) # take a group with only blank wells
joined = reindexed.join(blank, on=[f'{t_type} Name', f'{t_type} Concentration']) # join the mean data for blanks to the whole df
joined[col_name] = joined[joined.columns[-2]] - joined[joined.columns[-1]] # calculate background corrected values
jindexed = joined.set_index('index', append=True).reset_index(level=[0,1,2]).rename_axis(None) # set index to 'well id' and move multiindex to df columns
return jindexed[[col_name]] # extract and return df with corrected values
def calc_r_i(self, correct=True, plot_i=True, thr=80):
"""Calculates anisotropy and fluorescence intensity for each well in all repeats using the raw and background corrected p and s channel data.
The fluorescence intensity (I) and anisotropy (r) are calculated using the follwing formulas: I = s + (2*g*p) for intensity and
r = (s - (g*p)) / I for anisotropy. Results are stored in the following data frames: i_raw and r_raw (calculated using the uncorrected
p and s channel values) and i_corrected and r_corrected (calculated using the background corrected p and s channel values).
The function also calculates the percentage intesity of the non blank wells as comapred to the blank corrected wells using the formula:
(raw intensity - corrected intensity) / raw intensity * 100%. If 'plot_i=True', the graph of percentage intenstiy against the
well ids for all repeats is displayed along with a summary of wells above the threshold (defaults to 80%).
:param correct: Calculate the anisotropy and intensity using the background corrected values of p and s channel data, defaults to True.
:type correct: bool
:param plot_i: Display plots of the percentage intensity against well ids for all repeats, defaults to True.
:type plot_i: bool
:param thr: Percentage intensity above which the wells are included in the summary if plot_i=True, defaults to 80.
:type thr: int
"""
FA.th = thr # assign the threshold value to the class variable so that it can be accessed by functions that are not class methods
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
# calculate raw intensity and anisotropy using _calc_r_i function and add them to data dictionary
i, r = FA._calc_r_i(data['p'], data['s'], self.g_factor, 'raw')
self.data_dict[key]['data']['i_raw'] = i
self.data_dict[key]['data']['r_raw'] = r
if correct: # calculate intensity and anisotropy using background corrected values of p and s
if 'p_corrected' and 's_corrected' not in data: # check if background subtraction has been done
raise AttributeError('The corrected anisotropy and intensity can only be calculated after background correction of the raw p and s channel data.')
i_c, r_c = FA._calc_r_i(data['p_corrected'], data['s_corrected'], self.g_factor, 'corrected')
self.data_dict[key]['data']['i_corrected'] = i_c
self.data_dict[key]['data']['r_corrected'] = r_c
# calculate intensity percentage data and add it to data dict
self.data_dict[key]['data']['i_percent'] = FA._calc_i_percent(i, i_c, self.plate_map)
if plot_i: # plot the percentage intensity against the well ids for all repeats
FA._plot_i_percent(self.data_dict, self.plate_map)
else:
print('The fluorescence intensity and anisotropy have been successfully calculated!\n')
def _calc_r_i(p, s, g, col_suffix):
"""Calculates either anisotropy or intensity and labels the resulting dfs according to the col_suffix parameter
:param p: Data frame with p channel data (can be either raw or background corrected)
:type p: pandas df
:param s: Data frame with s channel data (can be either raw or background corrected)
:type s: pandas df
:param g: G-factor
:type g: float
:param col_suffix: Suffix to add to column name of the resulting intensity or anisotropy data frame, e.g. 'raw', 'corrected'
:type col_suffix: str
:return: Two data frames with calculated anisotropy and intensity values
:rtype: tuple of pandas df"""
p_rn = p.rename(columns={p.columns[0]: s.columns[0]}) # rename the col name in p data frame so that both p and s dfs have the same col names to enable calculation on dfs
i = s + (2 * g * p_rn) # calculate intensity
r = (s - (g * p_rn)) / i # and anisotropy
i_rn = i.rename(columns={i.columns[0]: 'i_' + col_suffix}) # rename the col name using the column suffix argument
r_rn = r.rename(columns={r.columns[0]: 'r_' + col_suffix})
return i_rn, r_rn
def _calc_i_percent(ir, ic, platemap):
"""Calculate the percentage intensity of blank wells compared to non-blank wells.
:param ir: Data frame with corrected intensity
:type ir: pandas df
:param ic: Data frame with raw intensity
:type ic: pandas df
:param platemap: Platemap
:type platemap: pandas df
:return: Data frame with percentage intensity data
:rtype: pandas df"""
ir_rn = ir.rename(columns={ir.columns[0]:ic.columns[0]}) # rename the col name in raw intensity df so that it's the same as in corrected intensity df
percent = (ir_rn - ic) / ir_rn * 100
percent.rename(columns={'i_corrected':'i_percent'}, inplace=True)
return percent
def _plot_i_percent(data_d, platemap):
"""Plot the percentage intensity data against the well ids with a horizontal threshold bar and print a summary of wells above the
threshold for all non-blank and non-empty cells in all repeats. A single figure with multiple subplots for each repeat is created.
:param data_d: Data dictionary
:type data_d: dict
:param platemap: Platemap needed to subset only the non-blank and non-empty cells
:type platemap: pandas df"""
summary = '' # empty string to which lists of wells to be printed are appended after checking data from each repeat
fig = plt.figure(figsize=(8*int((len(data_d) + 2 - abs(len(data_d) - 2))/2), 4*int( math.ceil((len(data_d))/2)) ), tight_layout=True) # plot a figure with variable size depending on the number subplots (i.e. repeats)
for key, value in data_d.items(): # iterate over all repeats
metadata, data = value.values()
df = platemap.join(data['i_percent'])
df_per = df[(df['Type'] != 'blank') & (df['Type'] != 'empty')] # subset only the non-blank and non-empty cells
plt.subplot(int( math.ceil((len(data_d))/2) ), int( (len(data_d) + 2 - abs(len(data_d) - 2))/2 ), int(key[-1]))
plt.bar(df_per.index, df_per['i_percent']) # plot a bar plot with intensity percentage data
plt.axhline(FA.th, color='red') # plot horizontal line representing the threshold on the bar plot
ax = plt.gca() # get the axis object
ax.set_ylabel('')
ax.set_xlabel('wells')
ax.set_title(f'Repeat {key[-1]}')
ax.yaxis.set_major_formatter(mtick.PercentFormatter()) # set formatting of the y axis as percentage
xlabels = [i if len(i) == 2 and i[1] == '1' else '' for i in list(df_per.index)] # create a list of xtics and xticklabels consiting only of the first wells from a each row
ax.set_xticks(xlabels)
ax.set_xticklabels(xlabels)
wells = list(df_per[df_per['i_percent'] > FA.th].index) # get a list of well ids above the threshold for this repeat
if wells != []: # append wells above the threshold and the repective repeat number to the string with appropriate formatting
summary = summary + f'\t{key}: {str(wells)}\n'
plt.show() # ensure the figure is displayed before printing the summary message
if summary != '': # display the summary of wells above the threshold
print(f'In the following wells the percentage intensity value was above the {FA.th}% threshold:')
print(summary)
else:
print(f'None of the wells has the percentage intensity value above the {FA.th}% threshold.')
def plot_i_percent(self):
"""Disply the graph of percentage intesity of the non blank wells as comapred to the blank corrected wells against well ids for all repeats."""
return FA._plot_i_percent(self.data_dict, self.plate_map)
def calc_mean_r_i(self):
"""Calculates data required for fitting a logistic curve to anisotropy and intensity data, i.e. the mean anisotropy and intensity
over the number of replicates for each specific protein (or tracer) concentration along with standard deviation and standard error.
"""
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for key, value in self.data_dict.items():
metadata, data = value.values()
# create dictionaries 'r_mean'and 'i_mean' containing mean anisotropy and intensity data frames for each protein-tracer pair
data['r_mean'] = FA._calc_mean_r_i(data['r_corrected'], self.plate_map, t_type)
data['i_mean'] = FA._calc_mean_r_i(data['i_corrected'], self.plate_map, t_type)
# create data frame for storing the fitting params and set lambda value to 1
cols = ['rmin','rmin error', 'rmax', f'rmax error', 'r_EC50', 'r_EC50 error', 'r_hill', 'r_hill error', 'Ifree',
'Ifree error', 'Ibound', 'Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error', 'lambda']
data['fit_params'] = pd.DataFrame(index=FA.final_fit.index, columns=cols)
data['fit_params']['lambda'] = 1
def _calc_mean_r_i(df, plate_map, t_type):
"""Calculates mean anisotropy for each protein (or tracer) concentration value, its standard deviation and standard error.
Creates an empty data frame for storing the fitting parameters for each repeat and sets the lambda value as 1.
:param df: Data frame with anisotropy or intensity values
:type df: pandas df
:param plate_map: Plate map data frame
:type plate_map: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:return: A dictionary with data frames for each unique protein-tracer pair and data frame for storing the fitting parameter
:rtype: dict"""
join = plate_map.join(df) # join anisotropy or intensity values to platemap
subset = join[(join['Type'] != 'blank') & (join['Type'] != 'empty')] # use only the non-blank and non-empty cells
noidx = subset.reset_index()
group = noidx.groupby([f'{t_type} Concentration', 'Protein Name', 'Tracer Name'])
mean = group.mean()
std = group.std()
sem = group.sem()
meanr = mean.rename(columns={mean.columns[-1]: 'mean'}).drop('Valid', axis=1)
stdr = std.rename(columns={std.columns[-1]: 'std'}).drop('Valid', axis=1) # rename the std column and remove the 'Valid' column
semr = sem.rename(columns={sem.columns[-1]: 'sem'}).drop('Valid', axis=1) # rename the sem column and remove the 'Valid' column
merge = pd.concat([meanr, stdr, semr], axis=1)
tosplit = merge.reset_index() # remove multiindex
split = dict(tuple(tosplit.groupby(['Protein Name', 'Tracer Name']))) # split df based on multiindex so that a new df is created for each unique combination of protein and tracer
return split
def calc_lambda(self, approve=True):
"""Calculates lambda value for each protein-tracer pair for all repeats and, if approve=True, displays them so that
a single value can be saved for each protein-tracer pair which will be used in subsequent calculations.
:param approve: Display lambda, rmin and rmax values for each protein-tracer pair and for all repeats, defaults to True.
:type approve: bool
"""
w_info = [] # list of tuples with info (rep no, lambda value, etc) need to generate the widgets
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
df = data['fit_params'].copy() # create a copy of the fitting params df
df['lambda'] = df['Ibound']/df['Ifree'] # calculate the lambda value in a copied data frame
if approve == False:
#self.data_dict[key]['data']['fit_params']['lambda'] = df['lambda'] # add the lambda values to fitting params df
print('The lambda values were calculated and saved.')
else:
for pt_pair in list(df.index): # iterate over each protein-tracer pair and create tuples with info needed for generation of widgets
rating = 100 # place for the rating function
info = (key, pt_pair, rating, df.loc[pt_pair, "lambda"], data['fit_params'].loc[pt_pair, "rmin"], data['fit_params'].loc[pt_pair, "rmax"]) # tuples conataining repeat no., calculated lambda, and protein-tracer names
w_info.append(info)
if approve == True: # execute the function for displying and handling the widgets
return FA._widget(self.data_dict, w_info, df)
def _widget(data_dict, w_info, df):
"""Function for generating and displaying the widgets with lambda values.
It generates widgets for each tuple in the w_info list.
:param data_dict: Data dictionary
:type data_dict: dict
:param w_info: A list of tuples containg information needed for the generation of widgets
:type w_info: list
:param df: Data frame with calculated lambda values
:type df: pandas df
"""
w_info.sort(key=lambda x: x[1]) # sort the tuples by the protein name so that the widgets are displayed by protein-tracer name
reps = [wg.HTML(f"Repeat {i[0][-1]}") for i in w_info] # list of text widgets with repeat numbres
proteins = [wg.HTML(f"{i[1][0]}") for i in w_info] # list of text widgets with protein names
tracers = [wg.HTML(f"{i[1][1]}") for i in w_info] # list of text widgets with tracer names
#scores = [wg.HTML(f"Score: {i[2]}") for i in w_info]
lambdas = [wg.Checkbox(value=False, description="$\lambda$ = %.4f" % (i[3])) for i in w_info] # list of checkbox widgets with lambda values
rminmax = [wg.Checkbox(value=False, description="rmin = %.5f, rmax = %.5f" % (i[4], i[5])) for i in w_info] # list of checkbox widgets with rmin and rmax values
v_lambdas = wg.VBox(lambdas) # group all lambda checkbox widgets into a vertical list layout
v_proteins = wg.VBox(proteins) # group all protein name widgets into a vertical list layout
v_tracers = wg.VBox(tracers) # group all tracer name widgets into a vertical list layout
v_reps = wg.VBox(reps) # group all repeat number widgets into a vertical list layout
#v_scores = wg.VBox(scores)
v_rminmax = wg.VBox(rminmax) # group all rmin and rmax checkbox widgets into a vertical list layout
hbox = wg.HBox([v_proteins, v_tracers, v_reps, v_lambdas, v_rminmax]) # arrange the six vertical boxes into one widget box'
button = wg.Button(description='Save') # create a button for saving the selected values
print("""Choose the lambda values that will be saved for each protein-tracer pair. \nIf you choose more than one lambda value for a given protein-tracer pair, only the first choice will be saved.\nIf you do not choose any lambda value for a given protein-tracer pair the default value of 1 will remain but you still need to select the rmin and rmax for this pair.""")
display(hbox, button) # display the box with widgets and the button
def btn_eventhandler(obj):
"""Function that is executed when the 'Save' button is clicked. It checks which checkboxes were ticked and
updates the final fit df with the calcualted lambda values and/or rmin and rmax values.
Only the first value of lambda for a given protein-tracer will be saved.
"""
added_lambda = [] # protein-tracer pairs for which lambda values were added
added_rminmax = [] # protein-tracer pairs for which rmin and rmax values were added
for i in range(0, len(lambdas)): # iterate over each checkbox widget
index = (proteins[i].value, tracers[i].value) # get the tuple with protein-tracer names
if lambdas[i].value == True: # if the lambda checkbox was ticked, the widget's 'value' attribute is True
if index not in added_lambda: # if lambda for this protein-tracer pair has not yet been added
FA.final_fit.loc[index, "lambda"] = df.loc[index, "lambda"] # add the calculated lambda to the final_fit d
FA.final_fit.loc[index, ['rmin','rmin error','rmax','rmax error']] = data_dict[f'repeat_{reps[i].value[-1]}']['data']['fit_params'].loc[index, ['rmin','rmin error','rmax','rmax error']] #add rmin, rmax and their errors to the final_fit df
added_lambda.append(index)
if rminmax[i].value == True:
if index not in added_lambda and index not in added_rminmax: # if neither lambda nor rmin/rmax for this protein-tracer pair have been added
FA.final_fit.loc[index, ['rmin','rmin error','rmax','rmax error']] = data_dict[f'repeat_{reps[i].value[-1]}']['data']['fit_params'].loc[index, ['rmin','rmin error','rmax','rmax error']]
added_rminmax.append(index)
print('Selected values were saved.')
button.on_click(btn_eventhandler) #link the button event handler function with actual clicking of the button using 'on_click' function
def calc_amount_bound(self):
"""Calculates the amount of fluorescent tracer bound to the protein using the following formula:
L_B =( ( (λ*(rmin-rmax)) / (r-rmin ) +1) )^(-1) * L_T
The amount bound is calculated as a mean for all replicates for each protein (or tracer) concentration along with
its standard deviation and standard error.
"""
pt_pairs = list(FA.final_fit[FA.final_fit['rmin'].isna()].index) # list of indexes for which rmin and rmax are not defined
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
if pt_pairs != []:
raise DataError(f"The 'rmin' and 'rmax' values are not defined for the following protein-tracer pairs: {pt_pairs}.\nUse 'calc_lambda' function or 'set_fitparams' to choose 'rmin' and 'rmax' values.")
for key, value in self.data_dict.items():
metadata, data = value.values()
data['amount_bound'] = FA._calc_amount_bound(data['r_corrected'], self.plate_map, t_type) # create dictionary 'r_mean' with mean anisotropy data frames for each protein-tracer pair
def _calc_amount_bound(df, platemap, t_type):
"""
:param df: Data frame with anisotropy or intensity values
:type df: pandas df
:param plate_map: Plate map data frame
:type plate_map: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:return: A dictionary with data frames for each unique protein-tracer pair
:rtype: dict
"""
join_pm = platemap.join(df) # join df with corrected anisotropy to the platemap df
subset = join_pm[(join_pm['Type'] != 'blank') & (join_pm['Type'] != 'empty')] # take only non-blank and non-empty wells
re_idx = subset.set_index(pd.MultiIndex.from_frame(subset[['Protein Name', 'Tracer Name']])).rename_axis([None,None]) # replace the index with multiindex (protein-tracer name) and remove its names
join_ff = re_idx.join(FA.final_fit) # join the final fitting parameters to the anisotropy df on multiindex (protein-tracer)
# calcualte the amount bound (all parameters needed are already in the data frame)
join_ff['mean'] = (((((join_ff["lambda"] * (join_ff['rmax']-join_ff['r_corrected'])) / (join_ff['r_corrected'] - join_ff['rmin']))) +1) **(-1)) * join_ff['Tracer Concentration']
# remove the redundant columns and set dtype of 'amount' column as float to avoid pandas DataError
drop = join_ff.drop(['r_corrected','Valid', 'rmin', 'rmax', 'rmin error', 'rmax error', "lambda", 'Kd'], axis=1).astype({'mean': 'float64'})
group = drop.groupby([f'{t_type} Concentration', 'Protein Name', 'Tracer Name'])
mean = group.mean()
std = group.std()
sem = group.sem()
stdr = std.rename(columns={std.columns[-1]: 'std'}) # rename column to 'std'
semr = sem.rename(columns={sem.columns[-1]: 'sem'}) # rename column to 'sem'
merge = pd.concat([mean, stdr, semr], axis=1) # merge the amount, std and sem data frames into one df
tosplit = merge.reset_index() # remove multiindex
split = dict(tuple(tosplit.groupby(['Protein Name', 'Tracer Name']))) # dictionary a data frame for each protein-tracer pair
return split
##### Curve fitting functions #####
def _r_func(pc, rmin, rmax, EC50, hill):
"""Function for fitting a curve to the plot of anisotropy (or intensity) against protein concentration,
where pc is protein concentration, rmin is the lower asymptote, rmax is the upper asymptote,
EC50 is midpoint of transition (pc at point of inflection), hill is the slope
"""
return (rmin - rmax) / (1 + (pc/EC50)**hill) + rmax
def _init_params(df, t_type):
"""Estimates initial parameters for the r_func that are passed to the curve fitting function
:param df: Data frame containing mean values of anisotropy or intensity
:type df: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:return: List with estiamted parameters of min, max and EC50, hill is assumed to be 1
:rtype: list
"""
rmin = df['mean'].min()
rmax = df['mean'].max()
mid = (rmax + rmin) / 2
mid_idx = df['mean'].sub(mid).abs().argmin()
EC50 = df.iloc[mid_idx][f'{t_type} Concentration']
init_param = [rmin, rmax, EC50, 1]
return init_param
def _curve_fit(func, df, t_type, var, **kwargs):
"""Fits a curve to the plot of specified variable against protein (or tracer) concentration using pre-defined funcion.
:param func: Funcion describing the curve
:type func: func
:param df: Data frame containing mean values of anisotropy, intensity or amount bound and their errors (std and sem).
:type df: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param **kwargs: Keyword arguments that can be passed into the scipy curve_fit function
:param var: Type of fitting perormetd, either logisitic ('log') or single site ('ssb').
:type var: str
:return: A list of fitting parameters along with their error in proper order so that it can be added to the fitting params data frame
:rtype: list
"""
drop = df[df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaN mean values from data fitting
if 'sigma' in kwargs:
sigma = drop[kwargs.pop('sigma')] # take the column with std or sem error data
else:
sigma = None
if 'p0' not in kwargs and var == 'log': # user did not pass their initial guess
p0 = FA._init_params(drop, t_type)
elif 'p0' in kwargs and var == 'log':
p0 = kwargs.pop('p0')
else: # user provided initial guess, remove p0 from kwargs and assign to p0 argument so that there is only one p0 arg passed to curve fit
p0 = None
popt, pcov = curve_fit(func, drop[f'{t_type} Concentration'], drop['mean'], p0=p0, sigma=sigma, **kwargs)
perr = np.sqrt(np.diag(pcov)) # calculate the error of the fitting params
if var == 'log':
all_params = np.insert(popt, obj=[1,2,3,4], values=perr) # insert the errors after the respective fitting parameter value
else:
all_params = np.insert(popt[::-1], obj=[1,2], values=perr[::-1])
return list(all_params)
def logistic_fit(self, prot=['all'], trac=['all'], rep=['all'], var='both', **kwargs):
"""Fits a logistic curve to the plot of anisotropy (or intensity) against protein concentration.Returns the fitting
parameters with associated errors for each repeat that are stored in the fitting paramters data frame in data_dict.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param var: A variable for which fitting is performed, (either 'r' for anisotropy or 'i' for inteensity), defaults to 'both'.
:type var: str
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, pt_pairs = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, rep)
errors = [] # list for storing the details of errors due to failed fitting
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for pt_pair in pt_pairs: # iterate over all protein-tracer pairs
if var == 'r' or var == 'both':
try: # try fitting the curve to anisotropy data
r_mean = data['r_mean'][pt_pair] # extract the df with mean anisotropy for a given protein-tracer pair
params_r = FA._curve_fit(FA._r_func, r_mean, t_type, 'log', **kwargs) # fit the data to logistic curve using the initial parameteers
data['fit_params'].loc[pt_pair, ['rmin','rmin error','rmax', 'rmax error', 'r_EC50', 'r_EC50 error', 'r_hill', 'r_hill error']] = params_r # add the fitting parameters to the respective df
except RuntimeError as e: # if fitting fails, added details about the error to the errors list and proceed intensity data fitting
r_error_info = (rep, 'r', pt_pair, e)
errors.append(r_error_info)
if var == 'i' or var == 'both':
try: # try fitting the curve to intensity data
i_mean = data['i_mean'][pt_pair] # extract the df with i mean for a given protein-tracer pair
params_i = FA._curve_fit(FA._r_func, i_mean, t_type, 'log', **kwargs)
data['fit_params'].loc[pt_pair, ['Ifree', 'Ifree error', 'Ibound','Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error']] = params_i
except RuntimeError as e: # if fitting fails, added details about the error to the errors list and proceed to to the next protein-tracer pair
i_error_info = (rep, 'i', pt_pair, e)
errors.append(i_error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function.", RuntimeWarning)
def _LB(LT, PT, Kd):
"""Function for fitting a curve to the plot of concentration of fluorescent tracer bound to the target protein against
protein (or tracer) concentration.
LB is the concentration of fluorescent tracer bound to the target protein
LT is total protein concentration
PT is total tracer concentration
Kd is dissociation constant
"""
return ( (LT+PT+Kd) - np.sqrt( ( ((LT+PT+Kd)**2) - (4*LT*PT) ) ) ) / 2
def single_site_fit(self, prot=['all'], trac=['all'], rep=['all'], **kwargs):
"""Fits a curve to the plot of concentration of fluorescent tracer bound to the target protein against the
protein (or tracer) concentration.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, pt_pairs = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, rep)
errors = [] # list for storing the details of errors due to failed fitting
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for pt_pair in pt_pairs: # iterate over all protein-tracer pairs
try: # try fitting the curve to anisotropy data
amount_b = data['amount_bound'][pt_pair] # extract the df with mean amount bound for a given protein-tracer pair
params = FA._curve_fit(FA._LB, amount_b, t_type, 'ssb', **kwargs)
if t_type == 'Protein':
FA.final_fit.loc[pt_pair, ['Kd', 'Kd error', 'LT', 'LT error']] = params
else:
FA.final_fit.loc[pt_pair, ['Kd', 'Kd error', 'PT', 'PT error']] = params
except RuntimeError as e:
error_info = (rep, pt_pair, e)
errors.append(error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function", RuntimeWarning)
##### Anisotropy and biniding constant plotting functions #####
def _get_items_to_plot(data_d, platemap, prot, trac, rep):
"""Creates a list of tuples with protein-tracer names based on the 'prot' and 'trac' parameters
and a subset of data_dict based on the 'rep' parameter.
"""
if prot[0] == 'all' and trac[0] == 'all': # all proteins and all tracers
pt_pairs = list(data_d['repeat_1']['data']['r_mean'].keys()) # 'r_mean' dict contains all protein-tracer names as dict keys
elif prot[0] != 'all' and trac[0] == 'all': # all tracers and some proteins
trac = list(platemap['Tracer Name'].dropna().unique()) # take all tracer names from the platemap
pt_pairs = [item for item in product(prot, trac)]
elif prot[0] == 'all' and trac[0] != 'all': # all proteins and some tracers
prot = list(platemap['Protein Name'].dropna().unique()) # take all protein names from the platemap
pt_pairs = [item for item in product(prot, trac)]
elif prot[0] != 'all' and trac[0] != 'all': # some proteins and some tracers
pt_pairs = [item for item in product(prot, trac)]
# define a data dictionary to iterate through based on the 'rep' parameter:
if rep[0] == 'all': # for all repeats use the whole data_dict
data_dict = data_d
else: # for specific repeats use the subset of data_dict containg only the repeats specified in 'rep' parameter
data_dict = {key: value for key, value in data_d.items() if int(key[-1]) in rep}
return data_dict, pt_pairs
def _plot_ani(data_df, params_df, pt_pair, t_type, fig, axs, err, var, rep, exp=False, disp=True, leg=True, dpi=250):
"""General function for plotting the anisotropy and intensity and saving the figures.
:param data_df: Data frame with mean values of anisotropy or intensity and their associated errors
:type data_df: pandas df
:params_df: Data frame with fitting parameters
:type params_df: pandas df
:param pt_pair: protein-tracer pair for which the graph is to be generated
:type pt_pair: tuple
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param fig: Figure on which the data is plotted, needed for saving the figure as png file
:type fig: matplotlib Figure
:param axs: Indexed axis object on which the data is to be plotted, (e.g. axs[0, 1])
:type axs: matplotlib AxesSubplot
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param var: Variable for which the plot is to be generated ('r' or 'i')
:type var: str
:param repeat: Repeat number for labelling of the graph
:type repeat: 'str'
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param disp: Determines whether the figure will be displayed after plotting, default True
:type disp: bool
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
if var == 'r': # define the parameters, legend text and legend coordinates characteristic for anisotropy data
params = [params_df.loc[pt_pair, 'rmin'], params_df.loc[pt_pair, 'rmax'], params_df.loc[pt_pair,'r_EC50'], params_df.loc[pt_pair,'r_hill']]
text = "$r_{min}$ = %.4f \u00B1 %.4f\n$r_{max}$ = %.4f \u00B1 %.4f\n$EC_{50}$ = %.2f \u00B1 %.2f\n$hill$ = %.2f \u00B1 %.2f" % tuple(params_df.loc[pt_pair, ['rmin',
'rmin error','rmax','rmax error','r_EC50','r_EC50 error', 'r_hill', 'r_hill error']])
label_coords = (0.02, 0.68)
ylabel = 'Anisotropy'
else: # define the parameters, legend text and legend coordinates characteristic for intensity data
params = [params_df.loc[pt_pair, 'Ifree'], params_df.loc[pt_pair, 'Ibound'], params_df.loc[pt_pair, 'I_EC50'], params_df.loc[pt_pair, 'I_hill']]
text = "$I_{free}$ = %.0f \u00B1 %.0f\n$I_{bound}$ = %.0f \u00B1 %.0f\n$EC_{50}$ = %.0f \u00B1 %.0f\n$hill$ = %.2f \u00B1 %.2f" % tuple(params_df.loc[pt_pair, ['Ifree',
'Ifree error', 'Ibound', 'Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error']])
label_coords = (0.02, 0.03)
ylabel = 'Intensity'
drop = data_df[data_df[f'{t_type[0]} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from plotting
axs.errorbar(drop[f'{t_type[0]} Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel(ylabel)
axs.set_xlabel(f'[{pt_pair[int(t_type[1])]}] (nM)')
minc = drop[f'{t_type[0]} Concentration'].min()
maxc = drop[f'{t_type[0]} Concentration'].max()
vir_data = np.logspace(np.log10(minc), np.log10(maxc), 100)
axs.plot(vir_data, FA._r_func(vir_data, *params), color='blue')
if leg == True: # display legend and a box with fitting parameters on the graph
axs.set_title(f'Protein: {pt_pair[0]}, Tracer: {pt_pair[1]}')
axs.legend(['logistic fitted curve'], frameon=False, fontsize=11)
axs.annotate(text, xy=label_coords, xycoords='axes fraction', fontsize=11)
if exp == True: # save figures in the same directory as the notebook
fig.savefig(f"rep_{rep[-1]}_{var}_{str(pt_pair[0])}_{str(pt_pair[1])}.png", dpi=dpi)
if type(exp) == str: # save figures in the user defined directory
fig.savefig(f"{exp}rep_{rep[-1]}_{var}_{str(pt_pair[0])}_{str(pt_pair[1])}.png", dpi=dpi)
if disp == False:
plt.close(fig)
def plot_ani(self, prot=['all'], trac=['all'], rep=['all'], err='std'):
"""Plots anisotropy and intensity against protein concentration with a fitted logistic curve for specified repeats and
protein-tracer pairs. A separate figure for each repeat is created with anisotropy and intensity graphs for all
specified proteins and tracers side by side.
:param prot: List of protein names for which the graphs are created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs are created, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs are created, defaults to ['all'].
:type rep: list of int
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
"""
# get data_dict and a list of protein-tracer names
data_dict, pt_pairs = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, rep)
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = ('Protein', 0)
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = ('Tracer', 1)
for key, value in data_dict.items(): # iterte over all repeats and create a sperate figure for each repeat
metadata, data = value.values()
fig, axs = plt.subplots(len(pt_pairs), 2, figsize=(2*6.4, len(pt_pairs)*4.8), tight_layout=True) # grid for subplots has two columns and a variable number of rows, figsize automatically scales up
fig.suptitle(f"Repeat {key[-1]}", fontsize=16)
fit_params = data['fit_params']
for idx, pt_pair in enumerate(pt_pairs): # for each portein-tracer pair plot two graphs: anisotropy and intensity
r_data_df, i_data_df = data['r_mean'][pt_pair], data['i_mean'][pt_pair] # extract the df with anisotropy and intensity
if len(pt_pairs) == 1: # for only one protein-tracer pair the subplot grid is 1-dimensional
FA._plot_ani(r_data_df, fit_params, pt_pair, t_type, fig, axs[0], err, 'r', key)
FA._plot_ani(i_data_df, fit_params, pt_pair, t_type, fig, axs[1], err, 'i', key)
else: # for more than one protein-tracer pair the subplot grid is 2-dimensional
FA._plot_ani(r_data_df, fit_params, pt_pair, t_type, fig, axs[idx,0], err, 'r', key)
FA._plot_ani(i_data_df, fit_params, pt_pair, t_type, fig, axs[idx,1], err, 'i', key)
def save_ani_figs(self, prot=['all'], trac=['all'], rep=['all'], var='both', path='', err='std', leg=False, dpi=250):
"""Saves single figures of anisotropy and intensity for for specified repeats and protein-tracer pairs in the same
directory as this notebook or in user defined directory if the path is provided.
:param prot: List of protein names for which the graphs are exported, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs are exported, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs are exported, defaults to ['all'].
:type rep: list of ints
:param var: A variable for which the graphs are exported, either 'r' for anisotropy or 'i' for inteensity, defaults to 'both'.
:type var: str
:param path: A path to directory in which the figures are saved, defaults to '' (the same directory as this Jupyter Notebook).
:type path: str
:param err: Type of error data displayed as error bars, either 'std' or 'sem', defaults to 'std'.
:type err: str
:param leg: Display legend on the figures, defaults to False.
:type leg: bool
:param dpi: Resolution of the figure in points per inch, defaults to 250.
:type dpi: int
"""
# get data_dict and a list of protein-tracer names
data_dict, pt_pairs = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, rep)
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
fit_params = data['fit_params']
for pt_pair in pt_pairs: # iterate over each protein-tracer pair in
r_data_df, i_data_df = data['r_mean'][pt_pair], data['i_mean'][pt_pair] # extract the df with anisotropy and intensity dfs
if var == 'r' or var == 'both':
fig, axs = plt.subplots(figsize=(6.4, 4.8), tight_layout=True) # create a figure with a single axis for anisotropy
FA._plot_ani(r_data_df, fit_params, pt_pair, self.type, fig, axs, err, 'r', key, exp=path, disp=False, leg=leg, dpi=dpi)
if var == 'i' or var == 'both':
fig, axs = plt.subplots(figsize=(6.4, 4.8), tight_layout=True)
FA._plot_ani(i_data_df, fit_params, pt_pair, self.type, fig, axs, err, 'i', key, exp=path, disp=False, leg=leg, dpi=dpi)
print('The figures were successfully exported.')
def _plot_kd(data_df, rep, pt_pair, t_type, err, leg, exp, dpi):
"""Plots amount bound against protein or tracer concentration with a fitted curve on a separate figure for a specific protein-tracer pair.
:param data_df: Data frame with mean values of amount of tracer bound and their associated errors
:type data_df: pandas df
:param rep: Repeat number for labelling of the graph
:type rep: 'str'
:param pt_pair: Protein and tracer names for which data will be plotted
:type pt_pair: list of tuples
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
drop = data_df[data_df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from data fitting
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
# define the x axis data and labels for protein and tracer titration cases
if t_type == 'Protein':
text = '$L_{T}$ = %.2f\u00B1 %.2f\n$K_{d}$ = %.2f \u00B1 %.2f' % (FA.final_fit.loc[pt_pair, 'LT'],
FA.final_fit.loc[pt_pair, 'LT error'], FA.final_fit.loc[pt_pair, 'Kd'], FA.final_fit.loc[pt_pair, 'Kd error'])
xlabel = f'[{pt_pair[0]}] (nM)'
params = [FA.final_fit.loc[pt_pair, 'LT'], FA.final_fit.loc[pt_pair, 'Kd']]
else:
text = '$P_{T}$ = %.2f\u00B1 %.2f\n$K_{d}$ = %.2f \u00B1 %.2f' % (FA.final_fit.loc[pt_pair, 'PT'],
FA.final_fit.loc[pt_pair, 'PT error'], FA.final_fit.loc[pt_pair, 'Kd'], FA.final_fit.loc[pt_pair, 'Kd error'])
xlabel = f'[{pt_pair[1]}] (nM)'
params = [FA.final_fit.loc[pt_pair, 'PT'], FA.final_fit.loc[pt_pair, 'Kd']]
axs.errorbar(drop[f'{t_type} Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel('[Fluorescent Tracer Bound] (nM)')
axs.set_xlabel(xlabel)
minc = drop[f'{t_type} Concentration'].min()
maxc = drop[f'{t_type} Concentration'].max()
vir_data = np.logspace(np.log10(minc),np.log10(maxc), 100)
axs.plot(vir_data, FA._LB(vir_data, *params), color='blue')
if leg == True: # display the figure title, legend and annotation with fitting params
axs.set_title(f'Repeat {rep[-1]}, Protein: {pt_pair[0]}, Tracer: {pt_pair[1]}')
axs.legend([f'single site fitted curve'], fontsize=11, frameon=False)
axs.annotate(text, xy=(0.02, 0.80), xycoords='axes fraction', fontsize=11)
plt.show()
if exp == True: # save the figure to the same directory as the notebook
fig.savefig(f"Kd_plot_{str(pt_pair[0])}_{str(pt_pair[1])}.png", dpi=dpi)
if type(exp) == str: # save the figure to user defined directory
fig.savefig(f"{exp}Kd_plot_{str(pt_pair[0])}_{str(pt_pair[1])}.png", dpi=dpi)
def _overlay_kd_plots(plate_map, data_dict, pt_pairs, t_type, err, leg, exp, dpi):
"""Creates a figure with overlayed plots for specified protein-tracer pairs and repeats
:param plate_map: Platemap
:type plate_map: pandas df
:param data_dict: Data dictionary containing the specific repeats for which data will be plotted
:type data_dict: dict
:param pt_pairs: List of protein-tracer names for which data will be plotted
:type pt_pairs: list of tuples
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
leg_text = [] # list to store the legend text
cmaps = ['Blues', 'Greens', 'Oranges', 'Purples', 'Reds', 'Greys', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']
iter_cmaps = iter(cmaps)
for key, value in data_dict.items(): # iterte through all repeats of the defined data_dict
metadata, data = value.values()
for pt_pair in pt_pairs: # iterate through the list of protein-tracer names to plot its data on the same figure
data_df = data['amount_bound'][pt_pair] # extract the correct df with amount bound for a given protein-tracer pair
drop = data_df[ data_df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from data fitting
if t_type == 'Protein':
params = [FA.final_fit.loc[pt_pair, 'LT'], FA.final_fit.loc[pt_pair, 'Kd']]
text = '$L_{T}$ = %.2f\u00B1 %.2f\n$K_{d}$ = %.2f \u00B1 %.2f' % tuple(FA.final_fit.loc[pt_pair, ['LT','LT error','Kd','Kd error']])
else:
params = [FA.final_fit.loc[pt_pair, 'PT'], FA.final_fit.loc[pt_pair, 'Kd']]
text = '$P_{T}$ = %.2f\u00B1 %.2f\n$K_{d}$ = %.2f \u00B1 %.2f' % tuple(FA.final_fit.loc[pt_pair, ['PT','PT error','Kd','Kd error']])
text_long = f"rep {key[-1]}, {pt_pair[0]}, {pt_pair[1]}\n{text}"
leg_text.append(text_long)
minc = drop[f'{t_type} Concentration'].min()
maxc = drop[f'{t_type} Concentration'].max()
vir_data = np.logspace(np.log10(minc),np.log10(maxc), 100)
cmap = plt.cm.get_cmap(next(iter_cmaps)) # take the next color map from the list
axs.errorbar(drop[f'{t_type} Concentration'], drop['mean'], yerr=drop[err], fmt='o', capsize=3, marker='s', color=cmap(0.95))
axs.plot(vir_data, FA._LB(vir_data, *params), color=cmap(0.50))
axs.set_xscale('log')
axs.set_ylabel('[Fluorescent Tracer Bound] (nM)')
axs.set_xlabel(f'[{t_type}] (nM)')
if leg == True: # display the figure title, legend and annotation with fitting params
axs.set_title(f'Overlayed plot')
lbox = axs.legend(leg_text, fontsize=11, frameon=False, loc='upper left', bbox_to_anchor=(1.03, 0.95))#, bbox_transform=fig.transFigure)#, xycoords='axes fraction')
fig.canvas.draw() # draw the canvas so that figure and legend size is defined
# calculate length by which the figure will be widened to accomodate the legend
w = (lbox.get_window_extent().width + (0.06 * axs.get_window_extent().width)) / fig.dpi
fig.set_size_inches(6.4 + w, 4.8) # resize the figure
plt.show()
if exp == True: # save the figure to the same directory as the notebook
fig.savefig(f"Overlayed_Kd_plot.png", dpi=dpi) #
if type(exp) == str: # save the figure to user defined directory
fig.savefig(f"{exp}Overlayed_Kd_plot.png",dpi=dpi)
def plot_kd(self, prot=['all'], trac=['all'], rep=['all'], err='std', overlay=False, legend=True, export=False, dpi=250):
"""Plots the concentration of fluorescent tracer bound to target protein against the protein (or tracer) concentration.
:param prot: List of protein names for which the graphs will be created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs will be created, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs will be created, defaults to ['all'].
:type rep: list of ints
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
:param overlay: Overlayes the data on a single figure, defaults to False.
:type overlay: bool
:param legend: Display the figure title and legend, defaults to True.
:type legend: bool
:param export: Saves the figures as png files in the same location as the Notebook or in a specified directory, defaults to False.
:type export: bool or str
:param dpi: Resolution of the exported figure in dots per inches, defaults to 250.
:type dpi: int
"""
data_dict, pt_pairs = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, rep)
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
if overlay == False:
for key, value in data_dict.items(): # iterte through all repeats of the defined data_dict
metadata, data = value.values()
for pt_pair in pt_pairs: # iterate through the list of protein-tracer names to create a separate figure for each pair
data_df = data['amount_bound'][pt_pair] # extract the correct df with amount bound for a given protein-tracer pair
FA._plot_kd(data_df, key, pt_pair, t_type, err, legend, export, dpi)
else:
FA._overlay_kd_plots(self.plate_map, data_dict, pt_pairs, t_type, err, legend, export, dpi)
##### Fittig params set, export and import functions #####
def set_fitparams(self, pair, final=True, rep=None, **kwargs):
"""Allows to set a value of any parameter in the final fit data frame (by default) or in fit_params data frame
for a specific protein-tracer pair.
:param pair: A tuple wtih protein and tracer names for which the parameters are changed
:type pair: tuple
:param final: If True, the parameters will be changed in the final_fit data frame, otherwise in the fitting param data frame
:type final: bool
:param rep: Repeat number for which the fit_params data frame will be modified, passed only if the final=False, defaults to None
:type rep: int
:param **kwargs: Keyword arguments represeting the parameter and its value, e.g. lambda=1.5, rmin=0.30
"""
if final == True:
for key, value in kwargs.items(): # iterate over the kwargs dictionary
FA.final_fit.loc[pair, key] = value # overwrite the parameters in fitting params df with all params passed as keyword arguments
if final == False:
for key, value in kwargs.items(): # iterate over the kwargs dictionary
self.data_dict[f'repeat_{rep}']['data']['fit_params'].loc[pair, key] = value # overwrite the parameters in fitting params df with all params passed as keyword arguments
def export_params(self, path='', file_type='csv'):
"""Export the final fit parameters and the fitting parameters for each repeat to csv or excel files.
:param path: A path to directory in which the file is saved, defaults to '' (i.e. the same directory as this Jupyter Notebook)
:type path: str
:param file_type: Type of file generated, either 'csv' or 'excel' file, defaults to csv
:type file_type: 'str'
"""
if file_type == 'csv': # export as csv file
FA.final_fit.to_csv(path_or_buf=f"{path}final_fit_parameters.csv")
if file_type == 'excel': # export as excel file
FA.final_fit.to_excel(excel_writer=f"{path}all_fitting_parameters.xlsx")
for key, value in self.data_dict.items(): #iterate over all repeats
metadata, data = value.values()
if file_type == 'csv': # export as csv file
data['fit_params'].to_csv(path_or_buf=f"{path}{key}_fitting_parameters.csv")
if file_type == 'excel': # export as excel file
with pd.ExcelWriter(f"{path}all_fitting_parameters.xlsx", engine='openpyxl', mode='a') as writer:
data['fit_params'].to_excel(writer, sheet_name=f"{key}_fit_params")
print(f'The fitting parameters were exported to the {file_type} files.')
def import_params(self, csv_file):
"""Allows to import a csv file with final_fit parameters (i.e. rmin, rmax, lamda, Kd and their errors).
:param csv_file: A csv file path with parameters to be imported
:type csv_file: str
"""
with open(csv_file) as file: # read the csv into pandas df
df = pd.read_csv(file, sep=',', index_col=[0,1], engine='python', encoding='utf-8') # import with multiindex
indexes = list(df.index) # create a list with protein-tracer names
for col in df.columns: # iterate over the imported columns
if col not in FA.final_fit.columns: # if there is no such column in final_fit df, raise a warning, otherwise column with wrong name will be added to final_fit
warnings.warn(f"The final_fit data frame does not contain matching columns: '{col}'")
else: # overwrite the existing values in the final_fit df with the ones from imported df
FA.final_fit.loc[FA.final_fit.index.intersection(indexes), col] = df.loc[df.index.intersection(indexes), col]
def export_data(self, path=''):
"""Saves the mean anisotropy, intensity and amount bound data along with their standard deviation
and standard error in excel file.
:param path: Path to the folder in wchich the excel file is saved.
:type path: str
"""
c = 0 # count number of iterations
for key, value in self.data_dict.items():
metadata, data = value.values()
pt_pairs = list(data['r_mean'].keys()) # list of all protein-tracer names
for pt_pair in pt_pairs:
# remove redundant columns and rename the remaining ones for anisotropy, intensity and amount bound dfs
r_df = data['r_mean'][pt_pair].drop(['Protein Name','Tracer Name'], axis=1)
r_df2 = r_df.rename(columns={'mean': 'anisotropy mean', 'std': 'ani std', 'sem': 'ani sem'}).set_index('Protein Concentration')
i_df = data['i_mean'][pt_pair].drop(['Protein Name','Tracer Name'], axis=1)
i_df2 = i_df.rename(columns={'mean': 'intensity mean', 'std': 'int std', 'sem': 'int sem'}).set_index('Protein Concentration')
ab_df = data['amount_bound'][pt_pair].drop(['Protein Name','Tracer Name'], axis=1)
ab_df2 = ab_df.rename(columns={'mean': 'amount bound mean', 'std': 'ab std', 'sem': 'ab sem'}).set_index('Protein Concentration')
# join the anisotropy, intensity and amount bound dfs together
m = | pd.concat([r_df2, i_df2, ab_df2], axis=1) | pandas.concat |
from argparse import Namespace
import pandas
from pandas import DataFrame, Series
from ssl_metrics_git_bus_factor.args import mainArgs
def buildBusFactor(df: DataFrame) -> DataFrame:
daysSince0: Series = df["author_days_since_0"].unique()
data: list = []
day: int
for day in range(daysSince0.max() + 1):
temp: dict = {}
busFactor: int = len(df[df["author_days_since_0"] == day]["author_email"].unique())
temp["days_since_0"] = day
temp["productivity"] = busFactor
data.append(temp)
return | DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from typing import Dict, List
import pandas as pd
import pandera as pa
import requests
stations_schema = pa.DataFrameSchema(
columns={
"station_id": pa.Column(pa.Int),
"name": pa.Column(pd.StringDtype()),
"physical_configuration": pa.Column(pd.StringDtype()),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
"altitude": pa.Column(pa.Float64, nullable=True),
"address": pa.Column(pd.StringDtype()),
"capacity": pa.Column(pa.Int),
"physicalkey": pa.Column(pa.Int),
"transitcard": pa.Column(pa.Int),
"creditcard": pa.Column(pa.Int),
"phone": pa.Column(pa.Int),
},
index=pa.Index(pa.Int),
)
def get_stations_metadata(
stations_url: str, stations_params: Dict
) -> pd.DataFrame:
"""Get bikeshare stations metadata from JSON feed."""
package = requests.get(stations_url, params=stations_params).json()
resources = package["result"]["resources"]
df_about = pd.DataFrame.from_records(resources)
r = requests.get(df_about["url"].tolist()[0]).json()
url_stations = r["data"]["en"]["feeds"][2]["url"]
df_stations = pd.DataFrame.from_records(
requests.get(url_stations).json()["data"]["stations"]
)
df_stations = df_stations.astype(
{
"physical_configuration": | pd.StringDtype() | pandas.StringDtype |
import requests
import pandas
import io
import logging
from scipy import stats
import plotnine
plotnine.options.figure_size = (12, 8)
from plotnine import *
from mizani.breaks import date_breaks
from mizani.formatters import date_format
# Setting up a logger
logger = logging.getLogger('non_regression_tests')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def get(url):
'''
Download a CSV file at the specified URL and load it into a dataframe.
'''
data = requests.get(url)
if data.status_code != 200:
raise ValueError(f'Could not download the CSV file, got an error {data.status_code}')
df = pandas.read_csv(io.BytesIO(data.content))
logger.info(f'Downloaded a dataframe with {len(df)} rows and {len(df.columns)} columns')
return df
def format(df):
'''
Apply various formating on the given dataframe (e.g., datetime parsing).
'''
df['timestamp'] = | pandas.to_datetime(df['start_time'], unit='s') | pandas.to_datetime |
import datetime as dt
import json
import os
import pandas as pd
from loguru import logger
class Analysis:
CLASS_CONFIG = {
'AMO_CITY_FIELD_ID': 512318,
'DRUPAL_UTM_FIELD_ID': 632884,
'TILDA_UTM_SOURCE_FIELD_ID': 648158,
'TILDA_UTM_MEDIUM_FIELD_ID': 648160,
'TILDA_UTM_CAMPAIGN_FIELD_ID': 648310,
'TILDA_UTM_CONTENT_FIELD_ID': 648312,
'TILDA_UTM_TERM_FIELD_ID': 648314,
'CT_UTM_SOURCE_FIELD_ID': 648256,
'CT_UTM_MEDIUM_FIELD_ID': 648258,
'CT_UTM_CAMPAIGN_FIELD_ID': 648260,
'CT_UTM_CONTENT_FIELD_ID': 648262,
'CT_UTM_TERM_FIELD_ID': 648264,
'CT_TYPE_COMMUNICATION_FIELD_ID': 648220,
'CT_DEVICE_FIELD_ID': 648276,
'CT_OS_FIELD_ID': 648278,
'CT_BROWSER_FIELD_ID': 648280,
'AMO_ITEMS_2019_FIELD_ID': 562024,
'AMO_ITEMS_2020_FIELD_ID': 648028,
}
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
WEEK_OFFSET = dt.timedelta(hours=24 + 24 + 6)
LEAD_UTM_FIELDS = [
'source',
'medium',
'campaign',
'content',
'term'
]
def __init__(self, config=None):
self.CLASS_CONFIG = dict()
if config:
self.CLASS_CONFIG = config
self.CLASS_CONFIG.update(Analysis.CLASS_CONFIG)
self.transform_data = []
def extract(self, file_name):
with open(os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8') as f:
data = json.load(f)
return data
def transform(self):
data = self.extract(file_name)
for row in data:
self.transform_data.append(self.transform_row(row))
self.logging_check(self.transform_data)
return self.transform_data
def transform_row(self, row):
created_at_datetime = dt.datetime.fromtimestamp(row['created_at'])
res = {
'id': row['id'],
'amo_updated_at': (None if 'updated_at' not in row else
row['updated_at']),
'amo_trashed_at': (None if 'trashed_at' not in row else
row['trashed_at']),
'amo_closed_at': (None if 'closed_at' not in row else
row['closed_at']),
'amo_pipeline_id': row['pipeline_id'],
'amo_status_id': row['status_id'],
'created_at_bq_timestamp': created_at_datetime.strftime(
self.TIME_FORMAT),
'created_at_year': created_at_datetime.strftime('%Y'),
'created_at_month': created_at_datetime.strftime('%m'),
'created_at_week': ((created_at_datetime + self.WEEK_OFFSET)
.isocalendar()[1]),
'amo_city': self.get_custom_field(
row, self.CLASS_CONFIG['AMO_CITY_FIELD_ID']),
'drupal_utm': self.get_custom_field(
row, self.CLASS_CONFIG['DRUPAL_UTM_FIELD_ID']),
'tilda_utm_source': self.get_custom_field(
row, self.CLASS_CONFIG['TILDA_UTM_SOURCE_FIELD_ID']),
'tilda_utm_medium': self.get_custom_field(
row, self.CLASS_CONFIG['TILDA_UTM_MEDIUM_FIELD_ID']),
'tilda_utm_campaign': self.get_custom_field(
row, self.CLASS_CONFIG['TILDA_UTM_CAMPAIGN_FIELD_ID']),
'tilda_utm_content': self.get_custom_field(
row, self.CLASS_CONFIG['TILDA_UTM_CONTENT_FIELD_ID']),
'tilda_utm_term': self.get_custom_field(
row, self.CLASS_CONFIG['TILDA_UTM_TERM_FIELD_ID']),
'ct_utm_source': self.get_custom_field(
row, self.CLASS_CONFIG['CT_UTM_SOURCE_FIELD_ID']),
'ct_utm_medium': self.get_custom_field(
row, self.CLASS_CONFIG['CT_UTM_MEDIUM_FIELD_ID']),
'ct_utm_campaign': self.get_custom_field(
row, self.CLASS_CONFIG['CT_UTM_CAMPAIGN_FIELD_ID']),
'ct_utm_content': self.get_custom_field(
row, self.CLASS_CONFIG['CT_UTM_CONTENT_FIELD_ID']),
'ct_utm_term': self.get_custom_field(
row, self.CLASS_CONFIG['CT_UTM_TERM_FIELD_ID']),
'ct_type_communication': self.get_custom_field(
row, self.CLASS_CONFIG['CT_TYPE_COMMUNICATION_FIELD_ID']),
'ct_device': self.get_custom_field(
row, self.CLASS_CONFIG['CT_DEVICE_FIELD_ID']),
'ct_os': self.get_custom_field(
row, self.CLASS_CONFIG['CT_OS_FIELD_ID']),
'ct_browser': self.get_custom_field(
row, self.CLASS_CONFIG['CT_BROWSER_FIELD_ID']),
'amo_items_2019': self.get_custom_field(
row, self.CLASS_CONFIG['AMO_ITEMS_2019_FIELD_ID']),
'amo_items_2020': self.get_custom_field(
row, self.CLASS_CONFIG['AMO_ITEMS_2020_FIELD_ID'])
}
for field in self.LEAD_UTM_FIELDS:
res[f'lead_utm_{field}'] = self.get_lead_utm(res, field)
return res
def get_custom_field(self, row, field_id):
for custom_field in row['custom_fields_values']:
if custom_field['field_id'] == field_id:
items = []
for item in custom_field['values']:
items.append(item.get('value', None))
return ','.join(items)
return None
def get_lead_utm(self, res, field):
if res['drupal_utm']:
drupal_utm_list = res['drupal_utm'].split(', ')
drupal_utm_dict = dict([item.split('=')
for item in drupal_utm_list])
if field in drupal_utm_dict:
if (drupal_utm_dict['medium'] in ['yandex', 'google'] and
field == 'source'):
return drupal_utm_dict['medium']
if drupal_utm_dict['source'] in ['context', 'context-cpc',
'search'] and field == 'medium':
return drupal_utm_dict['source']
return drupal_utm_dict[field]
elif res[f'ct_utm_{field}']:
return res[f'ct_utm_{field}']
return res[f'tilda_utm_{field}']
def logging_check(self, data):
logger.add('info.log', mode='w')
for res in data:
for field in self.LEAD_UTM_FIELDS:
if ((res[f'ct_utm_{field}'] and
res[f'ct_utm_{field}'] != res[f'lead_utm_{field}']) or
(res[f'tilda_utm_{field}'] and
res[f'tilda_utm_{field}'] != res[f'lead_utm_{field}'])):
logger.info(f'Конфликт utm_{field} в сделке {res["id"]}')
def create_dataframe(self):
self.transform()
frame = | pd.DataFrame(self.transform_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
'''
This function filters out all the rows for which the label column does not match a given value (i.e GetDistribution).
And saves "elapsed" value for rows that do match the specified label text(transaction name) into a csv.
'''
LOCATION = os.getcwd()
TRANSACTION_NAME = 'HTTP Request' #replace the transaction name with your transaction name
def extract_latency_data():
FILE_TO_WRITE =""
for file in os.listdir(LOCATION):
try: #extract latency data from csv files that begin with TestPlan_
if file.startswith("TestPlan_") and file.endswith(".csv"):
FILE_TO_WRITE = "new_"+os.path.basename(file)
df = | pd.read_csv(file) | pandas.read_csv |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sd(X)': self.df.groupby('grp')['X'].std()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].std().values,
'grp': ['A', 'B']
},
index=['sd(X)', 'sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_standard_deviation_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, np.sqrt(0.75))
def test_weighted_standard_deviation_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((np.sqrt(2), 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted sd(X)'
testing.assert_series_equal(output, expected)
def test_weighted_standard_deviation_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted sd(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted sd(X)': [np.sqrt(2), 1]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [np.sqrt(2), 1],
'grp': ['A', 'B']
},
index=['Y-weighted sd(X)', 'Y-weighted sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_cv_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.sqrt(1 / 3.))
def test_cv_biased(self):
metric = metrics.CV('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0) / np.mean(self.df.X))
def test_cv_split_by_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std() / [1, 2.75]
expected.name = 'cv(X)'
testing.assert_series_equal(output, expected)
def test_cv_where(self):
metric = metrics.CV('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std() / 2.75
self.assertEqual(output, expected)
def test_cv_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cv(X)': [np.sqrt(1 / 3.)]})
testing.assert_frame_equal(output, expected)
def test_cv_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [np.sqrt(1 / 3.)]}, index=['cv(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'cv(X)': [0, np.sqrt(1 / 8.25)]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
data={
'Value': [0, np.sqrt(1 / 8.25)],
'grp': ['A', 'B']
},
index=['cv(X)', 'cv(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_correlation(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.corrcoef(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.corr(self.df.Y))
def test_weighted_correlation(self):
metric = metrics.Correlation('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
cov = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)
expected = pd.DataFrame(
{'Y-weighted corr(X, Y)': [cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])]})
testing.assert_frame_equal(output, expected)
def test_correlation_method(self):
metric = metrics.Correlation('X', 'Y', method='kendall')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.corr(self.df.Y, method='kendall'))
def test_correlation_kwargs(self):
metric = metrics.Correlation('X', 'Y', min_periods=10)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertTrue(pd.isnull(output))
def test_correlation_split_by_not_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
corr_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([corr_a, corr_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'corr(X, Y)'
testing.assert_series_equal(output, expected)
def test_correlation_where(self):
metric = metrics.Correlation('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_correlation_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'corr(X, Y)': [self.df.X.corr(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_correlation_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(df, 'grp')
corr_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'corr(X, Y)': [corr_a, corr_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cov(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.cov(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.cov(self.df.Y))
def test_cov_bias(self):
metric = metrics.Cov('X', 'Y', bias=True)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_ddof(self):
metric = metrics.Cov('X', 'Y', ddof=0)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_kwargs(self):
metric = metrics.Cov('X', 'Y', fweights=self.df.Y)
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, fweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_weighted_cov(self):
metric = metrics.Cov('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'Y-weighted cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_not_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([cov_a, cov_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'cov(X, Y)'
testing.assert_series_equal(output, expected)
def test_cov_where(self):
metric = metrics.Cov('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_cov_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cov(X, Y)': [self.df.X.cov(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'cov(X, Y)': [cov_a, cov_b]}, index=['A', 'B'])
expected.index.name = 'grp'
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import numpy as np
from .utils import fast_OLS, fast_optimize, bootstrap_sampler, eval_expression, bias_corrected_ci, z_score, \
percentile_ci
import scipy.stats as stats
from numpy.linalg import inv, LinAlgError
from numpy import dot
from itertools import product, combinations
import pandas as pd
from functools import partial
import warnings
class BaseLogit(object):
"""
A convenience parent class for the methods used in Logistic models.
"""
def __init__(self, endog, exog, options):
self._endog = endog
self._exog = exog
self._n_obs = exog.shape[0]
self._n_vars = exog.shape[1]
if not options:
options = {}
self._options = options
@staticmethod
def _cdf(X):
"""
The CDF of the logistic function.
:param X: A scalar
:return: A scalar
"""
idx = X > 0
out = np.empty(X.size, dtype=float)
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
out[idx] = 1 / (1 + np.exp(-X[idx]))
exp_X = np.exp(X[~idx])
out[~idx] = exp_X / (1 + exp_X)
return out
except RuntimeWarning:
raise RuntimeError
def _loglike(self, params):
return np.sum(self._loglikeobs(params))
def _loglikeobs(self, params):
q = 2 * self._endog - 1
X = self._exog
return np.log(self._cdf(q * dot(X, params)))
def _score(self, params):
z = dot(self._exog, params)
L = self._cdf(z)
return dot(self._endog - L, self._exog)
def _hessian(self, params):
X = self._exog
L = self._cdf(dot(X, params))
return dot(L * (1 - L) * X.T, X)
def _optimize(self):
max_iter = self._options["iterate"]
tolerance = self._options["convergence"]
iterations = 0
score = lambda params: self._score(params) / self._n_obs
hess = lambda params: -self._hessian(params) / self._n_obs
oldparams = np.inf
newparams = np.repeat(0, self._n_vars)
while iterations < max_iter and np.any(np.abs(newparams - oldparams) > tolerance):
oldparams = newparams
H = hess(oldparams)
newparams = oldparams - dot(inv(H), score(oldparams))
iterations += 1
return newparams
class NullLogitModel(BaseLogit):
def __init__(self, endog, options=None):
n_obs = endog.shape[0]
exog = np.ones((n_obs, 1))
if not options:
options = {}
super().__init__(endog, exog, options)
class ParallelMediationModel(object):
"""
A class describing a parallel mediation model between an endogenous variable Y, one or several mediators M, and a
set of exogenous predictors for the endogenous variable and the mediators.
"""
def __init__(self, data, exog_terms_y, exog_terms_m, mod_symb, spot_values,
n_meds, analysis_list, symb_to_ind, symb_to_var, options=None):
"""
:param data: array
An NxK array of data
:param exog_terms_y: list of strings
Symbols of exogenous terms for the estimation of the outcome Y
:param exog_terms_m: list of strings
Symbols of exogenous terms for the estimation of the mediator(s) M (same for all mediators)
:param mod_symb: list of strings
Symbols of the moderator(s) of the path from X to the mediator(s) M and of the path from M to Y
:param spot_values: dict of lists
The spotlight values of the moderator(s)
:param n_meds: int
Number of mediator(s)
:param analysis_list: list of ["PMM", "CMM", "MMM"]
The list of additional analysis to conduct.
:param symb_to_ind: dict of int
Dictionary mapping the symbols to the indices of the variable in the data
:param symb_to_var:
Dictionary mapping the symbols to the actual names of the variable in the data
:param options: dict
Dictionary of options, from the Process object
"""
self._data = data
self._exog_terms_y = exog_terms_y
self._exog_terms_m = exog_terms_m
self._n_meds = n_meds
self._symb_to_ind = symb_to_ind
self._symb_to_var = symb_to_var
self._n_obs = data.shape[0]
if not options:
options = {}
self._options = options
self._vars_y = [i for i in self._exog_terms_y if (("*" not in i) & (i != "Cons"))]
self._ind_y = self._symb_to_ind["y"]
self._exog_inds_y = [self._symb_to_ind[var] for var in self._exog_terms_y]
self._vars_m = [i for i in self._exog_terms_m if (("*" not in i) & (i != "Cons"))]
self._endog_vars_m = ["m{}".format(i + 1) for i in range(self._n_meds)]
self._inds_m = [self._symb_to_ind[m] for m in self._endog_vars_m]
self._exog_inds_m = [self._symb_to_ind[var] for var in self._exog_terms_m]
self._compute_betas_m = fast_OLS
if self._options["logit"]:
max_iter = self._options["iterate"]
tolerance = self._options["convergence"]
self._compute_betas_y = partial(fast_optimize, n_obs=self._n_obs, n_vars=len(self._exog_inds_y),
max_iter=max_iter, tolerance=tolerance)
else:
self._compute_betas_y = fast_OLS
self._true_betas_y, self._true_betas_m = self._estimate_true_params()
self._boot_betas_y, self._boot_betas_m, self._n_fail_samples = self._estimate_bootstrapped_params()
self._base_derivs = self._gen_derivatives()
self._moderators_symb = mod_symb
self._moderators_values = [spot_values.get(i, [0]) for i in self._moderators_symb]
self._has_moderation = True if mod_symb else False
self._analysis_list = analysis_list
if self._has_moderation:
self.estimation_results = self._cond_ind_effects()
else:
self.estimation_results = self._simple_ind_effects()
def _estimate_true_params(self):
"""
Compute the true parameters for:
* The path from the predictors to Y (computed using OLS/Logit, depending on the nature of Y)
* The path(s) from the mediator(s) to Y (computed using OLS)
:return: A tuple of (true_betas_y, true_betas_m)
* true_betas_y is a vector of size n_params_y
* true_betas_m is a list of vectors of size n_params_m
"""
# True betas of the path from Ms to Y
endog_y = self._data[:, self._ind_y]
exog_y = self._data[:, self._exog_inds_y]
true_betas_y = self._compute_betas_y(endog_y, exog_y)
# For each mediator Mi, true betas from X to Mi
true_betas_m = []
m_exog = self._data[:, self._exog_inds_m]
for m_ind in self._inds_m:
m_endog = self._data[:, m_ind]
betas = self._compute_betas_m(m_endog, m_exog)
true_betas_m.append(betas)
return true_betas_y, true_betas_m
def _estimate_bootstrapped_params(self):
"""
Compute the bootstrapped parameters for:
* The path from the predictors to Y (computed using OLS/Logit, depending on the nature of Y)
* The path(s) from the mediator(s) to Y (computed using OLS)
:return: A tuple of (true_betas_y, true_betas_m)
* true_betas_y is a matrix of size n_boots x n_params_y
* true_betas_m is a list of matrices of size n_boots x n_params_y
"""
n_boots = self._options["boot"]
seed = self._options["seed"]
boot_betas_y = np.empty((n_boots, len(self._exog_terms_y)))
boot_betas_m = np.empty((self._n_meds, n_boots, len(self._exog_terms_m)))
n_fail_samples = 0
boot_ind = 0
sampler = bootstrap_sampler(self._n_obs, seed)
while boot_ind < n_boots:
ind = next(sampler)
data_boot = self._data[ind, :]
y_e = data_boot[:, self._ind_y]
y_x = data_boot[:, self._exog_inds_y]
try:
y_b = self._compute_betas_y(y_e, y_x)
m_x = data_boot[:, self._exog_inds_m]
boot_betas_y[boot_ind] = y_b
for j, m_ind in enumerate(self._inds_m):
m_e = data_boot[:, m_ind]
m_b = self._compute_betas_m(m_e, m_x)
boot_betas_m[j][boot_ind] = m_b
boot_ind += 1
except LinAlgError: # Hessian (Logit) or X'X (OLS) cannot be inverted
n_fail_samples += 1
return boot_betas_y, boot_betas_m, n_fail_samples
def _gen_derivatives(self):
"""
Generate the list of symbolic derivatives for the indirect path(s) from X to Y. The derivative of the path from
X to M is taken with respect to X, and the derivative of the path to Y is taken with respect to M.
For instance (Model 21), we consider the equation of x_to_m:
* The equation of x_to_m is: aConstant + bX + cW + dX*W. Rearranging for X: 1*(aConstant + cW) + X*(b + dW).
* The derivative of this expression is: (b + dW), or in matrix form: [0, 1, 0, W] * [a, b, c, d]
The first vector depends on the value of the moderator W: therefore, it cannot be represented numerically.
Instead, we express derivative using the following technique:
* Each term in the equation (i.e. Constant, X, W, X*W) is represented by a row.
* Each variable is represented by a column.
* The column for X (the variable with respect to which the equation is derivated) is equal to 0 if the
term does not contain X, and 1 otherwise
* The other columns are equal to the variable if the term contains the variable, and to 1 otherwise.
That way, the product of the columns is equal to the value of each term in the derivative:
X W
[[ 0, 1 ], # Value of the Constant term : 0*1 = 0
[ 1, 1 ], # Value of X term : 1*1 = 1
[ 0, W ], # Value of the W term: 0*W = 0
[ 1, W ]] # Value of the X*W: 1*W = W
The advantage of this matrix is that it is a symbolic expression, in which we can substitute for the values of
the moderators, and then take the product of columns to obtain the numerical representation of the derivative
as a vector.
:return: dict of matrices
A dictionary with keys 'x_to_m' and 'm_to_y':
'x_to_m' is the symbolic derivative of X to the mediator(s) M (one derivative)
'm_to_y' is the list of symbolic derivative(s) from the mediator(s) M to Y (n_meds derivative(s))
"""
derivs = {}
# Derivative of X to M
vars_m = self._vars_m
exog_terms_m = self._exog_terms_m
x_to_m = np.empty((len(vars_m), len(exog_terms_m)), dtype="object")
for j, var in enumerate(vars_m):
if var == "x":
x_to_m[j] = [1 if var in term else 0 for term in exog_terms_m]
else:
x_to_m[j] = [var if var in term else 1 for term in exog_terms_m]
derivs['x_to_m'] = x_to_m.T
list_m_to_y = []
for i in range(self._n_meds): # For all the mediators...
# ... derivate the path from M to Y (unique to each mediator)
vars_y = self._vars_y
exog_terms_y = self._exog_terms_y
m_to_y = np.empty((len(vars_y), len(exog_terms_y)), dtype="object")
for j, var in enumerate(vars_y):
if var == "m{}".format(i + 1):
m_to_y[j] = [1 if var in term else 0 for term in exog_terms_y]
else:
m_to_y[j] = [var if var in term else 1 for term in exog_terms_y]
list_m_to_y.append(m_to_y.T)
derivs['m_to_y'] = list_m_to_y
return derivs
def _indirect_effect_at(self, med_index, mod_dict):
"""
Compute the indirect effect through a specific mediator at specific value(s) of the moderator(s)
:param med_index: int
Index of the mediator.
:param mod_dict: dict
None, or a mod_name:mod_value dictionary of moderator values.
:return: e: scalar
Effect at the moderator values
be: array
Effects for all bootstrap samples (N_Boots x 1)
se: scalar
Standard error based on bootstrap samples
llci: scalar
Lower level of CI based on bootstrap samples
ulci: scalar
Upper level of CI based on bootstrap samples
"""
conf = self._options["conf"]
der_x_to_m = self._base_derivs["x_to_m"]
der_m_to_y = self._base_derivs["m_to_y"][med_index]
expr_x_to_m = eval_expression(der_x_to_m, mod_dict)
expr_m_to_y = eval_expression(der_m_to_y, mod_dict)
# Generation of the effects and bootstrapped effects: product of m_der and y_der
e = dot(self._true_betas_y, expr_m_to_y) * dot(self._true_betas_m[med_index], expr_x_to_m)
be = dot(self._boot_betas_y, expr_m_to_y) * dot(self._boot_betas_m[med_index], expr_x_to_m)
se = be.std(ddof=1)
if self._options["percent"]:
llci, ulci = percentile_ci(be, conf)
else:
llci, ulci = bias_corrected_ci(e, be, conf)
return e, be, se, llci, ulci
def _get_conditional_indirect_effects(self, med_index, mod_symb, mod_values):
"""
Return the indirect effects for all combinations of the moderators mod_symb specified in mod_values.
:param med_index: int
Index of the mediator.
:param mod_names: array
An array of moderator names
:param mod_values: matrix
A (N_Comb x N_Mods) matrix of all combinations of values for all moderator(s)
:return: e: array
Effects for all combinations of the moderator values (N_Comb x 1)
be: matrix
Effects for all combinations of the moderator values for all bootstrap samples (N_Comb x N_Boots)
se: array
SE based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
llci: array
LLCI based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
ulci: array
ULCI based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
"""
n_boots = self._options["boot"]
n_comb = len(mod_values)
e, se, llci, ulci = np.empty((4, n_comb))
be = np.empty((n_comb, n_boots))
for i, vals in enumerate(mod_values):
mod_dict = {k: v for k, v in zip(mod_symb, vals)}
e[i], be[i], se[i], llci[i], ulci[i] = self._indirect_effect_at(med_index, mod_dict)
return e, be, se, llci, ulci
def _simple_ind_effects(self):
"""
Generate the indirect effects.
This is done only if the indirect path from X to Y through M is not moderated.
If the option "total" is set to 1, then the total indirect effect is estimated.
If the option "contrast" is set to 1, then the pairwise contrasts between the different mediators are estimated.
:return: dict
A dictionary of lists "effect", "se", "llci", and "ulci".
"""
conf = self._options["conf"]
n_boots = self._options["boot"]
e = np.empty(self._n_meds)
be = np.empty((self._n_meds, n_boots))
for i in range(self._n_meds):
e[i], be[i], *_ = self._indirect_effect_at(i, {})
effects = []
se = []
llci, ulci = [], []
if self._options["total"]:
total_e = e.sum()
boot_total_e = be.sum(axis=0)
total_se = boot_total_e.std(ddof=1)
if self._options["percent"]:
total_ci = percentile_ci(boot_total_e, conf)
else:
total_ci = bias_corrected_ci(total_e, boot_total_e, conf)
effects.append(total_e)
se.append(total_se)
llci.append(total_ci[0])
ulci.append(total_ci[1])
for i in range(self._n_meds):
effects.append(e[i])
se.append(be[i].std(ddof=1))
if self._options["percent"]:
ci = percentile_ci(be[i], conf)
else:
ci = bias_corrected_ci(e[i], be[i], conf)
llci.append(ci[0])
ulci.append(ci[1])
if self._options["contrast"]:
inds = [i for i in range(self._n_meds)]
contrasts = combinations(inds, 2)
for i1, i2 in contrasts:
cont_e = e[i1] - e[i2]
boot_cont_e = be[i1] - be[i2]
cont_se = boot_cont_e.std(ddof=1)
if self._options["percent"]:
cont_ci = percentile_ci(boot_cont_e, conf)
else:
cont_ci = bias_corrected_ci(cont_e, boot_cont_e, conf)
effects.append(cont_e)
se.append(cont_se)
llci.append(cont_ci[0])
ulci.append(cont_ci[1])
statistics = [np.array(i).flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _cond_ind_effects(self):
"""
Generate the conditional indirect effects for all mediators.
:return: dict
A dictionary "effect", "se", "llci", and "ulci" of (N_Meds x N_Comb) matrices, corresponding to the
statistics for the N_Meds mediators at the N_Comb different levels of the moderator(s).
"""
mod_values = [i for i in product(*self._moderators_values)]
mod_symb = self._moderators_symb
n_combs = len(mod_values)
effects, se, llci, ulci = np.empty((4, self._n_meds, n_combs))
for i in range(self._n_meds):
effects[i], _, se[i], llci[i], ulci[i] = self._get_conditional_indirect_effects(i, mod_symb, mod_values)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _PMM_index(self):
"""
The Partial Moderated Mediation (PMM) index is only computed when exactly two moderators are present on the
mediation path.
It represents the marginal impact of one moderator (i.e. the impact of an increase in one unit for this
moderator on the indirect effect), conditional on a value of zero for the other moderator.
"""
if "PMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Partial Moderated Mediation.")
conf = self._options["conf"]
n_boots = self._options["boot"]
mod1, mod2 = self._moderators_symb # Only two moderators
dict_baseline = dict([[mod1, 0], [mod2, 0]])
e_baseline, be_baseline = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod1 = dict([[mod1, 1], [mod2, 0]])
e_mod1, be_mod1 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod2 = dict([[mod1, 0], [mod2, 1]])
e_mod2, be_mod2 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
effects, se, llci, ulci = np.empty((4, 2, self._n_meds))
for i in range(self._n_meds):
e_baseline[i], be_baseline[i], *_ = self._indirect_effect_at(i, dict_baseline)
e_mod1[i], be_mod1[i], *_ = self._indirect_effect_at(i, dict_mod1)
e_mod2[i], be_mod2[i], *_ = self._indirect_effect_at(i, dict_mod2)
e_pmm1 = e_mod1[i] - e_baseline[i] # Moderator1 at 1 vs. Moderator1 at 0
e_pmm2 = e_mod2[i] - e_baseline[i] # Moderator2 at 1 vs. Moderator2 at 0
be_pmm1 = be_mod1[i] - be_baseline[i]
be_pmm2 = be_mod2[i] - be_baseline[i]
effects[0][i] = e_pmm1
se[0][i] = be_pmm1.std(ddof=1)
if self._options["percent"]:
llci[0][i], ulci[0][i] = percentile_ci(be_pmm1, conf)
else:
llci[0][i], ulci[0][i] = bias_corrected_ci(e_pmm1, be_pmm1, conf)
effects[1][i] = e_pmm2
se[1][i] = be_pmm2.std(ddof=1)
if self._options["percent"]:
llci[1][i], ulci[1][i] = percentile_ci(be_pmm2, conf)
else:
llci[1][i], ulci[1][i] = bias_corrected_ci(e_pmm2, be_pmm2, conf)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _MMM_index(self):
"""
"""
if "MMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Moderated Moderated Mediation.")
conf = self._options["conf"]
n_boots = self._options["boot"]
mod1, mod2 = self._moderators_symb # Only two moderators
dict_baseline = dict([[mod1, 1], [mod2, 1]])
e_baseline, be_baseline = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod1 = dict([[mod1, 2], [mod2, 0]])
e_mod1, be_mod1 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod2 = dict([[mod1, 0], [mod2, 2]])
e_mod2, be_mod2 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
effects, se, llci, ulci = np.empty((4, 1, self._n_meds))
for i in range(self._n_meds):
e_baseline[i], be_baseline[i], *_ = self._indirect_effect_at(i, dict_baseline)
e_mod1[i], be_mod1[i], *_ = self._indirect_effect_at(i, dict_mod1)
e_mod2[i], be_mod2[i], *_ = self._indirect_effect_at(i, dict_mod2)
e_pmm = e_baseline[i] - (e_mod1[i] + e_mod2[i]) / 2
be_pmm = be_mod1[i] - be_baseline[i]
effects[0][i] = e_pmm
se[0][i] = be_pmm.std(ddof=1)
if self._options["percent"]:
llci[0][i], ulci[0][i] = percentile_ci(be_pmm, conf)
else:
llci[0][i], ulci[0][i] = bias_corrected_ci(e_pmm, be_pmm, conf)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _CMM_index(self):
"""
"""
if "CMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Conditional Moderated Mediation.")
conf = self._options["conf"]
mod1, mod2 = self._moderators_symb
mod1_val, mod2_val = self._moderators_values
n_levels_mod1 = len(mod1_val)
n_levels_mod2 = len(mod2_val)
effects_mod1, se_mod1, llci_mod1, ulci_mod1 = np.empty((4, self._n_meds, n_levels_mod1))
effects_mod2, se_mod2, llci_mod2, ulci_mod2 = np.empty((4, self._n_meds, n_levels_mod2))
for i in range(self._n_meds):
for j, val in enumerate(mod1_val): # Conditional moderated mediation effects for Moderator 1
dict_off = dict([[mod1, val], [mod2, 0]])
dict_on = dict([[mod1, val], [mod2, 1]])
e_off, be_off, *_ = self._indirect_effect_at(i, dict_off)
e_on, be_on, *_ = self._indirect_effect_at(i, dict_on)
e_cmm = e_on - e_off
be_cmm = be_on - be_off
effects_mod1[i][j] = e_cmm
se_mod1[i][j] = be_cmm.std(ddof=1)
if self._options["percent"]:
llci_mod1[i][j], ulci_mod1[i][j] = percentile_ci(be_cmm, conf)
else:
llci_mod1[i][j], ulci_mod1[i][j] = bias_corrected_ci(e_cmm, be_cmm, conf)
for j, val in enumerate(mod2_val): # Conditional moderated mediation effects for Moderator 1
dict_off = dict([[mod1, val], [mod2, 0]])
dict_on = dict([[mod1, val], [mod2, 1]])
e_off, be_off, *_ = self._indirect_effect_at(i, dict_off)
e_on, be_on, *_ = self._indirect_effect_at(i, dict_on)
e_cmm = e_on - e_off
be_cmm = be_on - be_off
effects_mod2[i][j] = e_cmm
se_mod2[i][j] = be_cmm.std(ddof=1)
if self._options["percent"]:
llci_mod2[i][j], ulci_mod2[i][j] = percentile_ci(be_cmm, conf)
else:
llci_mod2[i][j], ulci_mod2[i][j] = bias_corrected_ci(e_cmm, be_cmm, conf)
stats_mod1 = [i.flatten() for i in [effects_mod1, se_mod1, llci_mod1, ulci_mod1]]
stats_mod2 = [i.flatten() for i in [effects_mod2, se_mod2, llci_mod2, ulci_mod2]]
statistics = np.concatenate([stats_mod1, stats_mod2], axis=1)
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _cond_ind_effects_wrapper(self):
"""
A wrapper for the conditional indirect effects.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the conditional indirect effects.
"""
symb_to_var = self._symb_to_var
results = self.estimation_results
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Effect", "Boot SE", "BootLLCI", "BootULCI"]
mod_values = self._moderators_values
med_values = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
values = med_values + mod_values
rows_levels = np.array([i for i in product(*values)])
cols_levels = ["Mediator"] + [symb_to_var.get(x, x) for x in self._moderators_symb]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _simple_ind_effects_wrapper(self):
"""
A wrapper for the indirect effects (and for total/contrast effects if specified)
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the simple/total/constrasts of indirect effects.
"""
symb_to_var = self._symb_to_var
results = self.estimation_results
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
med_names = [symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]
rows_levels = []
if self._options["total"]:
rows_levels += ["TOTAL"]
rows_levels += med_names
if self._options["contrast"]:
contrasts = ["Contrast: {} vs. {}".format(a, b) for a, b in combinations(med_names, 2)]
rows_levels += contrasts
rows_levels = np.array(rows_levels).reshape(-1, 1)
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = ["", "Effect", "Boot SE", "BootLLCI", "BootULCI"]
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _PMM_index_wrapper(self):
"""
A wrapper for the Partial Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the PMM index.
"""
symb_to_var = self._symb_to_var
results = self._PMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "LLCI", "ULCI"]
mod_names = [[symb_to_var.get(i, i) for i in self._moderators_symb]]
med_names = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
values = mod_names + med_names
rows_levels = np.array([i for i in product(*values)])
cols_levels = ["Moderator", "Mediator"]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _CMM_index_wrapper(self):
"""
A wrapper for the Conditional Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the CMM index.
"""
symb_to_var = self._symb_to_var
results = self._CMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "BootLLCI", "BootULCI"]
mod1_name, mod2_name = [symb_to_var.get(i, i) for i in self._moderators_symb]
mod1_values, mod2_values = self._moderators_values
med_names = [symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]
rows_modname = [mod2_name] * len(mod1_values) * self._n_meds + [mod1_name] * len(mod2_values) * self._n_meds
rows_modname = np.reshape(rows_modname, (-1, 1))
rows_medname = np.concatenate([np.repeat(med_names, len(mod1_values)), np.repeat(med_names, len(mod2_values))])
rows_medname = np.reshape(rows_medname, (-1, 1))
rows_modvalues = np.concatenate([np.tile(mod1_values, self._n_meds), np.tile(mod2_values, self._n_meds)])
rows_modvalues = np.reshape(rows_modvalues, (-1, 1))
cols_levels = ["Focal Mod", "Mediator", "Other Mod At"]
rows_levels = np.concatenate([rows_modname, rows_medname, rows_modvalues], axis=1)
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _MMM_index_wrapper(self):
"""
A wrapper for the Moderated Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the CMM index.
"""
symb_to_var = self._symb_to_var
results = self._MMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "BootLLCI", "BootULCI"]
med_names = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
rows_levels = np.array([i for i in product(*med_names)])
cols_levels = ["Mediator"]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def MMM_index_summary(self):
if "MMM" in self._analysis_list:
return self._MMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Moderated Moderated Mediation index.")
def PMM_index_summary(self):
if "PMM" in self._analysis_list:
return self._PMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Partial Moderated Mediation index.")
def CMM_index_summary(self):
if "CMM" in self._analysis_list:
return self._CMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Conditional Moderated Mediation index.")
def coeff_summary(self):
"""
Get the summary of the indirect effect(s).
:return: The appropriate moderated/unmoderated effect(s).
"""
return self._cond_ind_effects_wrapper() if self._has_moderation else self._simple_ind_effects_wrapper()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
analysis_func = {"PMM": ('PARTIAL MODERATED MEDIATION', self._PMM_index_wrapper),
"MMM": ('MODERATED MODERATED MEDIATION', self._MMM_index_wrapper),
"CMM": ('CONDITIONAL MODERATED MEDIATION', self._CMM_index_wrapper)}
symb_to_var = self._symb_to_var
if self._has_moderation:
basestr = "Conditional indirect effect(s) of {x} on {y} at values of the moderator(s):\n\n" \
"{coeffs}\n\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
else:
basestr = "Indirect effect of {x} on {y}:\n\n" \
"{coeffs}\n\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
for a in self._analysis_list:
name, get_results = analysis_func[a]
results = get_results()
basestr += "**************** INDEX OF {name} ******************\n\n" \
"{results}\n\n".format(name=name, results=results.to_string(float_format=float_format))
return basestr
def __str__(self):
return self.summary()
class BaseOutcomeModel(object):
"""
A statistical model reflecting the path from independent predictors (X, or X and M)
to an endogenous outcome (Y, or M).
"""
def __init__(self, data, endogvar, exogvars, symb_to_ind, symb_to_var, options=None):
"""
Instantiate the model.
:param data: np.array
A NxK array of data
:param endogvar: string
The name of the endogenous variable.
:param exogvars: list of strings
The names of the exogenous variables.
:param symb_to_ind: dict of int
A dictionary mapping variable symbols to indices.
:param symb_to_var: dict of strings
A dictionary mapping variable symbols to names.
:param options: dict
A dictionary of options.
"""
if options is None:
options = {}
self._data = data
self._endogvar = endogvar
self._exogvars = exogvars
self._symb_to_ind = symb_to_ind
self._symb_to_var = symb_to_var
if not options:
options = {}
self._options = options
endog_ind = self._symb_to_ind[self._endogvar]
exog_ind = [self._symb_to_ind[var] for var in self._exogvars]
self._endog = data[:, endog_ind]
self._exog = data[:, exog_ind]
self._n_obs = self._exog.shape[0]
self._n_vars = self._exog.shape[1]
self._varnames = [i for i in self._exogvars if (("*" not in i) & (i != "Cons"))]
self._derivative = self._gen_derivative(wrt="x")
self.estimation_results = self._estimate()
def _gen_derivative(self, wrt):
"""
Generate a symbolic derivative of the equation with respect to the variable 'wrt', and stores it in a matrix.
For instance (Model 21), we consider the equation aConstant + bX + cW + dX*W, that we derivate wrt to X:
* The rearranged equation for X is: 1*(aConstant + cW) + X*(b + dW).
* The derivative of this expression is: (b + dW), or in matrix form: [0, 1, 0, W] * [a, b, c, d]
The first vector depends on the value of the moderator W: therefore, it cannot be represented numerically.
Instead, we express derivative using the following technique:
* Each term in the equation (i.e. Constant, X, W, X*W) is represented by a row.
* Each variable is represented by a column.
* The column for X (the variable with respect to which the equation is derivated) is equal to 0 if the
term does not contain X, and 1 otherwise
* The other columns are equal to the variable if the term contains the variable, and to 1 otherwise.
That way, the product of the columns is equal to the value of each term in the derivative:
X W
[[ 0, 1 ], # Value of the Constant term : 0*1 = 0
[ 1, 1 ], # Value of X term : 1*1 = 1
[ 0, W ], # Value of the W term: 0*W = 0
[ 1, W ]] # Value of the X*W: 1*W = W
The advantage of this matrix is that it is a symbolic expression, in which we can substitute for the values of
the moderators, and then take the product of columns to obtain the numerical representation of the derivative
as a vector.
:return: A matrix of size (n_terms x n_vars)
"""
deriv = np.empty((len(self._varnames), len(self._exogvars)), dtype="object")
for i, var in enumerate(self._varnames):
if var == wrt:
deriv[i] = [1 if var in term else 0 for term in self._exogvars]
else:
deriv[i] = [var if var in term else 1 for term in self._exogvars]
return deriv.T
def coeff_summary(self):
"""
Get the estimates of the terms in the model.
:return: A DataFrame of betas, se, t (or z), p, llci, ulci for all variables of the model.
"""
results = self.estimation_results
if results:
if "t" in results.keys(): # Model has t-stats rather than z-stats
coeffs = np.array(
[results["betas"], results["se"], results["t"], results["p"], results["llci"], results["ulci"]]).T
df = pd.DataFrame(coeffs, index=results["names"],
columns=["coeff", "se", "t", "p", "LLCI", "ULCI"])
else: # Model has z-stats.
coeffs = np.array(
[results["betas"], results["se"], results["z"], results["p"], results["llci"], results["ulci"]]).T
df = pd.DataFrame(coeffs, index=results["names"],
columns=["coeff", "se", "Z", "p", "LLCI", "ULCI"])
else:
raise NotImplementedError(
"The model has not been estimated yet. Please estimate the model first."
)
return df
def _estimate(self):
pass
class OLSOutcomeModel(BaseOutcomeModel):
"""
An OLS subclass for OutcomeModels. Implement methods specific to the OLS estimation.
"""
def _estimate(self):
"""
Estimate the coefficients and statistics of the OLS model, and store the results in a dictionary of
estimation_results.
:return: self
"""
y = self._endog
x = self._exog
n_obs = self._n_obs
n_vars = self._n_vars
inv_xx = inv(dot(x.T, x))
xy = dot(x.T, y)
betas = dot(inv_xx, xy)
df_e = n_obs - n_vars
df_r = n_vars - 1
resid = y - dot(x, betas)
mse = (resid ** 2).sum() / df_e
sse = dot(resid.T, resid) / df_e
errortype = "standard" if self._options["hc3"] == 1 else "HC3"
if errortype == 'standard':
vcv = np.true_divide(1, n_obs - n_vars) * dot(resid.T, resid) * inv_xx
elif errortype == 'HC0':
sq_resid = (resid ** 2).squeeze()
vcv = dot(dot(dot(inv_xx, x.T) * sq_resid, x), inv_xx)
elif errortype == 'HC1':
sq_resid = (resid ** 2).squeeze()
vcv = np.true_divide(n_obs, n_obs - n_vars - 1) * \
dot(dot(dot(inv_xx, x.T) * sq_resid, x), inv_xx)
elif errortype == 'HC2':
sq_resid = (resid ** 2).squeeze()
H = (x.dot(inv_xx) * x).sum(axis=-1)
vcv = dot(dot(dot(inv_xx, x.T) * (sq_resid / (1 - H)), x), inv_xx)
elif errortype == 'HC3':
sq_resid = (resid ** 2).squeeze()
H = (x.dot(inv_xx) * x).sum(axis=-1)
vcv = dot(dot(dot(inv_xx, x.T) * (sq_resid / ((1 - H) ** 2)), x), inv_xx)
else:
raise ValueError("The covariance type {} is not supported. Please specify 'standard', 'HC0'"
"'HC1', 'HC2', or 'HC3".format(errortype))
betas = betas.squeeze()
se = np.sqrt(np.diagonal(vcv)).squeeze()
t = betas / se
p = stats.t.sf(np.abs(t), df_e) * 2
conf = self._options["conf"]
zscore = z_score(conf)
R2 = 1 - resid.var() / y.var()
adjR2 = 1 - (1 - R2) * ((n_obs - 1) / (n_obs - n_vars - 1))
F = (R2 / df_r) / ((1 - R2) / df_e)
F_pval = 1 - stats.f._cdf(F, df_r, df_e)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
names = [self._symb_to_var.get(x, x) for x in self._exogvars]
estimation_results = {"betas": betas,
"se": se,
"vcv": vcv,
"t": t,
"p": p,
"R2": R2,
"adjR2": adjR2,
"df_e": int(df_e),
"df_r": int(df_r),
"mse": mse,
"F": F,
"sse": sse,
"F_pval": F_pval,
"llci": llci,
"ulci": ulci,
"names": names,
"n": int(n_obs)}
return estimation_results
def model_summary(self):
"""
The summary of the model statistics: R², F-stats, etc...
:return: A DataFrame of model statistics
"""
results = self.estimation_results
stats = ["R2", "adjR2", "mse", "F", "df_r", "df_e", "F_pval"]
row = [[results[s] for s in stats]]
df = pd.DataFrame(row, index=[""], columns=["R²", "Adj. R²", "MSE", "F", "df1", "df2", "p-value"])
return df
def coeff_summary(self):
"""
The summary of the OLS estimates for the model: betas, se, t, p-values, etc...
:return: A DataFrame of coefficient statistics
"""
return super().coeff_summary()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the model and coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
basestr = ("Outcome = {} \n"
"OLS Regression Summary\n\n{}\n\n"
"Coefficients\n\n{}".format(self._symb_to_var[self._endogvar],
self.model_summary().to_string(float_format=float_format),
self.coeff_summary().to_string(float_format=float_format)))
return basestr
def __str__(self):
return self.summary()
class LogitOutcomeModel(BaseOutcomeModel, BaseLogit):
"""
A Logit subclass for OutcomeModels. Implement methods specific to the Logistic estimation.
"""
def _estimate(self):
"""
Estimate the coefficients and statistics of the Logistic model, and store the results in a dictionary of
estimation_results.
:return: self
"""
betas = self._optimize()
vcv = inv(self._hessian(betas))
se = np.sqrt(np.diagonal(vcv)).squeeze()
z = betas / se
p = stats.norm.sf(np.abs(z)) * 2
conf = self._options["conf"]
zscore = z_score(conf)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
# GOF statistics
llmodel = self._loglike(betas)
lmodel = np.exp(llmodel)
minus2ll = -2 * llmodel
null_model = NullLogitModel(self._endog, self._options)
betas_null = null_model._optimize()
llnull = null_model._loglike(betas_null)
lnull = np.exp(llnull)
d = 2 * (llmodel - llnull)
pvalue = stats.chi2.sf(d, self._n_vars - 1)
mcfadden = 1 - llmodel / llnull
coxsnell = 1 - (lnull / lmodel) ** (2 / self._n_obs)
nagelkerke = coxsnell / (1 - lnull ** (2 / self._n_obs))
names = [self._symb_to_var.get(x, x) for x in self._exogvars]
estimation_results = {"betas": betas,
"se": se,
"vcv": vcv,
"z": z,
"p": p,
"llci": llci,
"ulci": ulci,
"mcfadden": mcfadden,
"coxsnell": coxsnell,
"nagelkerke": nagelkerke,
"d": d,
"minus2ll": minus2ll,
"pvalue": pvalue,
"n": int(self._n_obs),
"names": names}
return estimation_results
def model_summary(self):
"""
The summary of the model statistics: Model LL, pseudo R², etc...
:return: A DataFrame of model statistics
"""
results = self.estimation_results
row = [[results[i] for i in ["minus2ll", "d", "pvalue", "mcfadden", "coxsnell", "nagelkerke", "n"]]]
return pd.DataFrame(row, index=[""],
columns=["-2LL", "Model LL", "p-value", "McFadden", "Cox-Snell", "Nagelkerke", "n"])
def coeff_summary(self):
"""
The summary of the OLS estimates for the model: betas, se, t, p-values, etc...
:return: A DataFrame of coefficient statistics
"""
return super().coeff_summary()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the model and coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
basestr = ("\n**************************************************************************\n"
"Outcome = {} \n"
"Logistic Regression Summary\n\n{}\n\n"
"Coefficients\n\n{}".format(self._symb_to_var[self._endogvar],
self.model_summary().to_string(float_format=float_format),
self.coeff_summary().to_string(float_format=float_format)))
return basestr
def __str__(self):
return self.summary()
class DirectEffectModel(object):
def __init__(self, model, mod_symb, spot_values, has_mediation, symb_to_var, options=None):
"""
A container for the direct effect of the variable X on the outcome Y. If the model includes one or several
moderators of X, this container returns the conditional direct effects.
:param model: process.OutcomeModel
The OutcomeModel object of the outcome Y.
:param mod_symb: list of string
The symbols of the moderators of the direct effect.
:param symb_to_var: dict of string
The dictionary mapping each symbol to a variable name.
:param options: dict
The options of the model.
"""
self._model = model
self._is_logit = isinstance(model, LogitOutcomeModel)
self._symb_to_var = symb_to_var
self._derivative = self._model._derivative
self._has_mediation = has_mediation
self._moderators_symb = mod_symb
self._moderators_values = [spot_values.get(i, [0]) for i in self._moderators_symb]
self._has_moderation = True if self._moderators_symb else False
if not options:
options = {}
self._options = options
self._estimation_results = self._estimate()
def _estimate(self):
"""
Estimates the direct effect of X on Y, and return the results into as a dictionary.
:return: dict
A dictionary of parameters and model estimates.
"""
mod_values = [i for i in product(*self._moderators_values)]
mod_symb = self._moderators_symb
betas, se, llci, ulci = self._get_conditional_direct_effects(mod_symb, mod_values)
t = betas / se
if self._is_logit:
p = stats.norm.sf(np.abs(t)) * 2
else:
df_e = self._model.estimation_results["df_e"]
p = stats.t.sf(np.abs(t), df_e) * 2
estimation_results = {"betas": betas,
"se": se,
"t": t,
"p": p,
"llci": llci,
"ulci": ulci}
return estimation_results
def _get_conditional_direct_effects(self, mod_symb, mod_values):
"""
Estimates the conditional direct effects of X on Y, at different values of the moderator(s)
:param mod_symb: list of string
A list of moderator symbols
:param mod_values: array of int/float
A list of lists of spotlight values for each moderator.
:return:
"""
betas, se, llci, ulci = np.zeros((4, len(mod_values)))
for i, val in enumerate(mod_values): # All possible products of level(s) of moderator(s)
mod_dict = {n: v for n, v in zip(mod_symb, val)}
betas[i], se[i], llci[i], ulci[i] = self._direct_effect_at(mod_dict)
return betas, se, llci, ulci
def _direct_effect_at(self, mod_dict):
"""
Compute the direct effect at specific value(s) of the moderator(s)
:param mod_dict: dict
None, or a mod_symb:mod_value dictionary of moderator values.
:return: e: scalar
Effect at the moderator values
se: scalar
Standard error
llci: scalar
Lower level of CI based on normal theory
ulci: scalar
Upper level of CI based on normal theory
"""
conf = self._options["conf"]
b = self._model.estimation_results["betas"]
vcv = self._model.estimation_results["vcv"]
deriv = self._derivative
grad = eval_expression(deriv, mod_dict) # Gradient at level(s) of the moderator(s)
betas = dot(grad, b) # Estimate is dot product of gradient and coefficients
var = dot(dot(grad, vcv), np.transpose(grad)) # V(Grad(X)) = Grad(X).V(X).Grad'(X)
se = np.sqrt(var)
zscore = z_score(conf)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
return betas, se, llci, ulci
def coeff_summary(self):
"""
The summary of the direct effect(s): betas, se, t, p-values, etc...
:return: pd.DataFrame
A DataFrame of coefficient statistics
"""
if self._estimation_results:
symb_to_var = self._symb_to_var
results = self._estimation_results
statistics = [results["betas"], results["se"], results["t"], results["p"], results["llci"],
results["ulci"]]
coeffs_rows = np.array([i.flatten() for i in statistics]).T
if self._is_logit:
coeffs_columns = ["Effect", "SE", "Z", "p", "LLCI", "ULCI"]
else:
coeffs_columns = ["Effect", "SE", "t", "p", "LLCI", "ULCI"]
mod_rows = np.array([i for i in product(*self._moderators_values)])
mod_columns = [symb_to_var.get(x, x) for x in self._moderators_symb]
rows = np.concatenate([mod_rows, coeffs_rows], axis=1)
columns = mod_columns + coeffs_columns
df = | pd.DataFrame(rows, columns=columns, index=[""] * rows.shape[0]) | pandas.DataFrame |
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# This is the class for poisson process
#
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import pandas as pd
import math
from handles.data_hand import get_slotted_data
from sklearn.linear_model import LinearRegression
from scipy.stats import kstest
import statsmodels.api as sm
import statsmodels.formula.api as smf
from modeling.stat.models import fit_neg_binom
from scipy.stats import expon,gamma,nbinom
import random
random.seed( 30 )
class poisson_process:
def __init__(self,events,x,slotmin=60,sesonality=24.00,x_meta=None,combine=None,variablity_lambda=True):
# x is the numeric features lambda depends on.
# x_meta are catagorical features that lambda depends on
# Sesonality is when to loop the ts back. i.e. like 24 hours
# x can be any factor levels. with _ in between each category. however, each catogory
# should be defined by a numeric indicator
self.x_names = np.array( x.columns )
self.ts = np.array(events)
self.x = np.array(x)
self.x_meta=x_meta
self.slotmin = slotmin
self.sesonality = float( sesonality )
self.processed_data = self.get_combined_ts_data(combine=combine)
self.def_scale_multiplier()
self._variablity_lambda = variablity_lambda
def combine_timeslots(self,x,combine):
p = x.copy()
p[np.in1d(x, combine)] = combine[0]
return p
def poles_fun(self,d):
return pd.DataFrame(d).apply(lambda x: 1/(x**3))
def def_scale_multiplier(self):
# this is based on emperical data
average_mat = pd.DataFrame({'2014':[0.237053898,0.23033784,0.22646637,0.224855127,0.22145071,0.22017719,0.219680942],
'2015':[0.190591233,0.185363899,0.183113651,0.180825924,0.179276851,0.179478113,0.17919847]}).T
average_mat.columns = [1000,1100,1200,1300,1400,1500,1600]
average_mat=average_mat.reset_index()
average_mat=average_mat.melt(id_vars=["index"],var_name="Poles",value_name="Value")
cols = ['year','poles','scale']
average_mat.columns = cols
average_mat[cols] = average_mat[cols].apply(pd.to_numeric, errors='coerce')
average_mat['poles']=self.poles_fun(average_mat['poles'])
regressor = LinearRegression()
regressor.fit(average_mat[['year','poles']], average_mat['scale'])
self.scale_multiplier_predictor = regressor
self.reset_scale_multiplier()
def reset_scale_multiplier(self):
self._scale_multiplier = 1
def avg_scale_pred(self,year,poles):
return self.scale_multiplier_predictor.predict(np.array([year,
np.array(self.poles_fun([poles]))]).reshape(1, -1))
def get_processed_data(self):
diff_col_name = 'Aarrival_diff'
delta_t = np.diff(self.ts, n=1).reshape(-1, 1)
fin_d = pd.DataFrame(np.concatenate((delta_t, self.x[:-1, :]), axis=1))
fin_d.columns = np.concatenate(
(np.array(diff_col_name).reshape(-1, 1), np.array(self.x_names).reshape(-1, 1)), axis=0).flatten()
fin_d[diff_col_name] = pd.to_numeric(fin_d[diff_col_name])
# split the values in the factor that was provided to us
split = fin_d[self.x_names[0]].str.split("_", -1)
n = []
for i in range(0, len(split[0])):
fin_d['f' + str(i)] = split.str.get(i)#.astype(float) # update this if code breaks
n.append('f' + str(i))
n.append(self.x_names[1])
self.all_names = n
fin_d = fin_d.sort_values(by=n)
return fin_d
def get_combined_ts_data(self,combine):
# combine timeslots
# if given argument = combine -- array of time slots to combine. we will replace these with
# the first element of the combine array
# start time internal is the timeslots to model the data on
self.processed_data = self.get_processed_data()
self.combine = combine
if combine is None:
self.combined_slots = False
combined_timeslots = self.processed_data[self.x_names[1]]
else:
self.combined_slots = True
combined_timeslots = self.combine_timeslots(self.processed_data[self.x_names[1]], combine=combine)
self.processed_data['Start_time_internal'] = combined_timeslots
return self.processed_data
def get_slotted_data(self,data, slot_secs):
return get_slotted_data(data=data,slot_secs=slot_secs)
# ------------------------------------------- FITTING --------------------------------------------------------------
def daywise_training_data(self,d,combine,fac1,fac2,f1,days,orignal_start_slot):
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
if self.combined_slots:
x = fac2[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
scale_val = model_d_temp[(model_d_temp[:, 0] == combine[0])].flatten()[1]
add = [[i, scale_val, day_i] for i in combine[1:]]
model_d_temp = np.concatenate((model_d_temp, add))
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
else:
x = orignal_start_slot[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
return model_d
def discreet_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days':data, 'arrivalslot':x,'indicator':1})
data_gamma = data_gamma.groupby(['days','arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot','count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
ks_t_D = pd.DataFrame()
ks_t_pval = pd.DataFrame()
t_t_pval = pd.DataFrame()
exp_loc = pd.DataFrame()
exp_scale = pd.DataFrame()
exp_shape = pd.DataFrame()
time_slot = pd.DataFrame()
pos_l = pd.DataFrame()
neg_bio_r = pd.DataFrame()
neg_bio_p = pd.DataFrame()
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric( data_gamma[data_gamma['arrivalslot'] == f2]['count'] )
# poission
lam = np.mean(d)
# gamma
alpha,loc, beta = gamma.fit(d,loc=0)
# ks test
D , kspval = kstest(d,'gamma', args=(alpha,loc,beta))
# ttest - one sided
# sample2 = gamma.rvs(a = alpha, loc=loc, scale=beta, size=d.shape[0])
val , pval = 0,0 #ttest_ind(d,sample2)
# neg_binom
r,p = fit_neg_binom(vec=np.array(d).flatten(),init=0.0000001)
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([var])
else:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['gamma_loc'] = np.array(exp_loc).flatten()
fit['gamma_scale'] = np.array(exp_scale).flatten()
fit['gamma_shape'] = np.array(exp_shape).flatten()
fit['KS_D'] = np.array(ks_t_D).flatten()
fit['KS_PVal'] = np.array(ks_t_pval).flatten()
fit['Ttest_PVal'] = np.array(t_t_pval).flatten()
fit['Poisson_lam'] = np.array(pos_l).flatten()
fit['Negbio_r'] = np.array(neg_bio_r).flatten()
fit['Negbio_p'] = np.array(neg_bio_p).flatten()
return fit,data_save,x_save
def neg_bio_reg_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days': data, 'arrivalslot': x, 'indicator': 1})
data_gamma = data_gamma.groupby(['days', 'arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot', 'count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
nb_mu = pd.DataFrame()
nb_p = pd.DataFrame()
nb_n = pd.DataFrame()
nb_alpha = | pd.DataFrame() | pandas.DataFrame |
from x2df.fileIOhandlers.__fileIOhandler__ import FileIOhandler
from pandas import DataFrame, read_csv
import json
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtWidgets
import io
import inspect
# we want to do the imports as late as possible to
# keep it snappy once we have more and more fileIOhandlers
dumpparams = inspect.signature(DataFrame.to_csv).parameters.keys()
claimedFormats = [".csv", ".txt"]
class Handler(FileIOhandler):
def dump(self, df, dst=None, **kwargs):
kwargs["index"] = False
kwargs["line_terminator"] = "\n"
parseinfo = df.attrs.setdefault("parseinfo", {})
parseinfo["comment"] = "#"
metastr = "#" + json.dumps(df.attrs, indent=4).replace("\n", "\n#") + "\n"
allargs = kwargs | parseinfo
allargs = dict((k, v) for k, v in allargs.items() if k in dumpparams)
csvstring = metastr + df.to_csv(**allargs)
if dst is not None:
open(dst, "w").write(csvstring)
return None
return csvstring
def parse(self, path, postprocess=True, **kwargs):
# since csvs are not a well-defined format, lets try a few things:
# first, check if the file starts with a metadata comment,
# we take the parser args from there
tryParseOverride = kwargs.get("parseinfo")
if tryParseOverride:
origmetadata = {"parseinfo": kwargs["parseinfo"]}
with open(path, "r") as f:
origmetadata = getMetadata(f)
skip = False
dfraw = DataFrame()
while not skip:
# we remove parseinfo from kwargs if it is present.
# this means that the override will be tried once,
# and it doesnt work, we try again without override.
if "parseinfo" in kwargs:
parseinfo = kwargs.pop("parseinfo")
infoValid = True
else:
parseinfo, infoValid = getParseInfo(path)
if not infoValid:
break
dfraw = readCSV(path, parseinfo)
skip |= not dfraw.empty
# if the parsing was successfull and we changed the original metadata,
# ask if we should add the metadata to the top of the file.
if not tryParseOverride:
updateOrigParseInfo(path, origmetadata, parseinfo, not dfraw.empty)
if postprocess:
return self.processRawDF(dfraw)
else:
return [dfraw]
def claim(self, path):
if any(path.endswith(x) for x in claimedFormats):
return [path]
else:
return []
def readCSV(path, parseInfo):
dfraw = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
from scipy import interpolate
import numpy as np
from numpy.lib.recfunctions import append_fields
import scipy.signal as sig
import scipy.stats as st
import time, os
import pandas as pd
import math
#import report_ctd
import ctdcal.report_ctd as report_ctd
import warnings
import ctdcal.fit_ctd as fit_ctd
import datetime
from decimal import Decimal
import settings
import sys
sys.path.append('ctdcal/')
import oxy_fitting
import gsw
warnings.filterwarnings("ignore", 'Mean of empty slice.')
def cast_details(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
'''
We determine the cast details using pandas magic.
First find alternating periods of pumps on and pumps off, then select the
pumps on period with the highest pressure. Get values from the row with the
highest pressure, and return all values to be sent to log.
Input:
stacast - integer, the station and cast, as SSSCC format
log_file - file handle or string, log_file
p_col - string, name of the pressure column
time_col - string, name of the time column
b_lat_col - string, name of the latitude column
b_lon_col - string, name of the longitude column
alt_col - string, name of the altimeter column
inMat - pandas dataframe, the dataframe to come in
Output:
start_cast_time - float, unix epoch seconds?, start of cast time, to be reported to log file
end_cast_time - float, unix epoch seconds?, end of cast time, to be reported to log file
bottom_cast_time - float, unix epoch seconds?, bottom of cast time, to be reported to log file
start_pressure - float, pressure at which cast started, to be reported to log file
max_pressure - float, bottom of the cast pressure, to be reported to log file
b_lat - float, latitude at bottom of cast
b_lon - float, longitude at bottom of cast
b_alti - float, altimeter reading at bottom of cast - volts only!
inMat - the dataframe that came in, with soak period trimmed off
don't need end_cast_time, max_pressure
inMat is trimmed to start and end of cast
'''
df_test = pd.DataFrame.from_records(inMat)
dfs = find_pump_on_off_dfs(df_test)
dfs_1 = find_pumps_on_dfs(dfs)
df_cast = find_max_pressure_df(dfs_1)
df_cast1 = find_last_soak_period(df_cast)
df_cast2 = trim_soak_period_from_df(df_cast1)
start_cast_time = float(df_cast2['scan_datetime'].head(1))
start_pressure = float(df_cast2['CTDPRS'].head(1))
end_cast_time = float(df_cast2['scan_datetime'].tail(1))
max_pressure = float(df_cast2['CTDPRS'].max())
bottom_cast_time = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['scan_datetime'])
b_lat = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLAT'])
b_lon = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLON'])
b_alti = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['ALT'])
#last two lines must be in to return the same as old - change to slices of df later
report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time,
bottom_cast_time, start_pressure, max_pressure, b_alti,
b_lat, b_lon)
#reconvert to ndarray - might need to be altered to remove second index
# inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()].to_records(index=False)
inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()]
return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
#Move next four functions to a library or class(?) Clean up module
def find_pump_on_off_dfs(df):
'''Find pump_on patterns of dataframes, and return a list(?) of dataframes to iterate over.
'''
return [g for i,g in df.groupby(df['pump_on'].ne(df['pump_on'].shift()).cumsum())]
def find_max_pressure_df(dfs):
'''Giving a list of data frames, return a reference to the frame with which contians the highest pressure value
'''
max_pressure_df = dfs[0]
max_pressure = max_pressure_df['CTDPRS'].max() #TODO make into config var
for df in dfs:
if df['CTDPRS'].max() > max_pressure:
max_pressure_df = df
return max_pressure_df
def find_pumps_on_dfs(dfs):
'''given a list of dataframes, remove all the frames with one or more rows containing a "false" pump on flag
'''
return list(filter(lambda df: df['pump_on'].all(), dfs))
def trim_soak_period_from_df(df):
'''Look for minimum pressure in dataframe, then return everything after minimum pressure/top of cast.
'''
test = int(df.iloc[1:int((len(df)/4))]['CTDPRS'].idxmin())
return df.loc[test:]
def find_last_soak_period(df_cast, surface_pressure=2, time_bin=8, downcast_pressure=50):
"""Find the soak period before the downcast starts.
The algorithm is tuned for repeat hydrography work, specifically US GO-SHIP
parameters. This assumes the soak depth will be somewhere between 10 and 30
meters, the package will sit at the soak depth for at least 20 to 30 seconds
before starting ascent to the surface and descent to target depth.
Parameters
----------
df_cast : DataFrame
DataFrame of the entire cast
surface_pressure : integer
Minimum surface pressure threshold required to look for soak depth.
2 dbar was chosen as an average rosette is roughly 1.5 to 2 meters tall.
time_bin : integer
Time, in whole seconds.
downcast_pressure : integer
Minimum pressure threshold required to assume downcast has started.
50 dbar has been chosen as double the deep soak depth of 20-30 dbar.
Returns
-------
df_cast_ret : DataFrame
DataFrame starting within time_bin seconds of the last soak period.
The algorithm is not guaranteed to catch the exact start of the soak period,
but within a minimum period of time_bin seconds(?) from end of the soak if
the soak period assumption is valid. This should be shorter than the total
soak period time, and able to catch the following rise and descent of the
package that signals the start of the cast.
The algorithm has been designed to handle four general cases of casts:
* A routine cast with pumps turning on in water and normal soak
* A cast where the pumps turn on in air/on deck
* A cast where the pumps turn on and off due to rosette coming out of water
* A cast where there are multiple stops on the downcast to the target depth
"""
#Validate user input
if time_bin <= 0:
raise ValueError('Time bin value should be positive whole seconds.')
if downcast_pressure <=0:
raise ValueError('Starting downcast pressure threshold must be positive integers.')
if downcast_pressure < surface_pressure:
raise ValueError(f'Starting downcast pressure threshold must be greater \
than surface pressure threshold.')
# If pumps have not turned on until in water, return DataFrame
if df_cast.iloc[0]['CTDPRS'] > surface_pressure:
return df_cast
#Bin the data by time, and compute the average rate of descent
df_blah = df_cast.loc[:,:]
df_blah['bin'] = pd.cut(df_blah.loc[:,'index'],
range(df_blah.iloc[0]['index'],df_blah.iloc[-1]['index'],time_bin*24),
labels=False, include_lowest=True)
df_blah2 = df_blah.groupby('bin').mean()
#Compute difference of descent rates and label bins
df_blah2['prs_diff'] = df_blah2['CTDPRS'].diff().fillna(0).round(0)
df_blah2['movement'] = pd.cut(df_blah2['prs_diff'], [-1000,-0.5,0.5,1000], labels=['up','stop','down'])
#Find all periods where the rosette is not moving
df_stop = df_blah2.groupby('movement').get_group('stop')
groupby_test = df_blah2.groupby(df_blah2['movement'].ne(df_blah2['movement'].shift()).cumsum())
list_test = [g for i,g in groupby_test]
#Find a dataframe index of the last soak period before starting descent
def poop(list_obj, downcast_pressure):
""" Return dataframe index in the last soak period before starting
descent to target depth.
"""
for i, x in zip(range(len(list_test)),list_test):
if x['CTDPRS'].max() < downcast_pressure:
if x.max()['movement'] == 'stop':
index = i
if x['CTDPRS'].max() > downcast_pressure:
return index
return index
#Truncate dataframe to new starting index : end of dataframe
start_index = np.around(list_test[poop(list_test, downcast_pressure)].head(1)['index'])
df_cast = df_cast.set_index('index')
df_cast = df_cast.loc[int(start_index):,:]
df_cast_ret = df_cast.reset_index()
return df_cast_ret
#End move four functions
# def cast_details_old(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
# """cast_details function
#
# Function takes full NUMPY ndarray with predefined dtype array
# and adjusts ndarray to remove all extraneous surface data.
# Function returns cast start time, end time, bottom time and
# cleaned up matrix.
#
# Args:
# param1 (str): stacast, station cast input
# param2 (str): log_file, log file to write cast data.
# param3 (str): p_col, pressure data column name
# param4 (str): time_col, time data column name
# param5 (ndarray): inMat, numpy ndarray with dtype array
#
# Returns:
# Narray: The return value is ndarray with adjusted time of parameter
# specified.
#
# """
#
#
# if inMat is None:
# print("In cast_details: No data")
# return
# else:
# # Top of cast time, bottom of cast time, end of cast time,
# start_cast_time = 0.0
# bottom_cast_time = 0.0
# end_cast_time = 0.0
# # Test cycle time constant
# fl = 24
# # starting P
# start_pressure = 2.0
# # Max P
# max_pressure = 10000.0
# lm = len(inMat)-1
# rev = np.arange(int(lm/4),0,-1)
#
# # Find starting top of cast
# # Smallest P from reverse array search
# for i in rev:
# if start_pressure < inMat[p_col][i]:
# tmp = i
# elif start_pressure > inMat[p_col][i]:
# start_pressure = inMat[p_col][i]
# tmp = abs(i - 24) #patched to not break through the c(sea)-floor, can be made cleaner
# break
# start_cast_time = inMat[time_col][tmp]
#
# # Remove everything before cast start
# inMat = inMat[tmp:]
#
# # Max P and bottom time
# max_pressure = max(inMat[p_col])
# tmp = np.argmax((inMat[p_col]))
# bottom_cast_time = inMat[time_col][tmp]
# b_lat = inMat[b_lat_col][tmp]
# b_lon = inMat[b_lon_col][tmp]
# b_alti = inMat[alt_col][tmp]
#
# tmp = len(inMat)
# # Find ending top of cast time
# for i in range(int(tmp/2),tmp):
# if start_pressure > inMat[p_col][i]:
# end_cast_time = inMat[time_col][i]
# if i < tmp: tmp = i + 24
# break
#
# # Remove everything after cast end
# inMat = inMat[:tmp]
#
# report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_alti, b_lat, b_lon)
#
# return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
def ctd_align(inMat=None, col=None, time=0.0):
"""ctd_align function
Function takes full NUMPY ndarray with predefined dtype array
and adjusts time of sensor responce and water flow relative to
the time frame of temperature sensor.
Args:
param1 (ndarray): inMat, numpy ndarray with dtype array
param2 (float): col, column to apply time advance to.
param3 (float): time, advance in seconds to apply to raw data.
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
# Num of frames per second.
fl = 24
if (inMat is not None) & (col is not None) & ( time > 0.0):
# Time to advance
advnc = int(fl * time)
tmp = np.arange(advnc, dtype=np.float)
last = inMat[col][len(inMat)-1]
tmp.fill(float(last))
inMat[col] = np.concatenate((inMat[col][advnc:],tmp))
return inMat
def ctd_quality_codes(column=None, p_range=None, qual_code=None, oxy_fit=False, p_qual_col=None, qual_one=None, inMat=None):
"""ctd_quality_codes function
Function takes full NUMPY ndarray with predefined dtype array
Args:
param1 (ndarray):
param2 (float):
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
#If p_range set apply qual codes to part of array and return
if p_range is not None:
print("Some algoirythm for formatting qual codes per pressure range")
return
else:
q_df = pd.DataFrame(index=np.arange(len(inMat)), columns=p_qual_col)
for pq in p_qual_col:
if pq in list(qual_one):
q_df[pq] = q_df[pq].fillna(1)
elif oxy_fit and pq is column:
q_df[pq] = q_df[pq].fillna(2)
else:
q_df[pq] = q_df[pq].fillna(2)
q_nd = q_df.as_matrix(columns=q_df.columns)
return q_nd
def formatTimeEpoc(time_zone='UTC', time_pattern='%Y-%m-%d %H:%M:%S', input_time = None):
"""formatTimeEpoc function
Function takes pattern of time input, relative time zone, and
date time data array and returns array of epoc time.
title and the second row are the units for each column.
Args:
param1 (str): relative time zone for data.
param2 (str): pattern of incoming data.
param3 (ndarray): input_time, numpy 1d ndarray time array
Returns:
1D ndarray: The return array of epoch time
"""
if input_time is None:
print("In formatTimeEpoc: No data entered.")
return
else:
os.environ['TZ'] = 'UTC'
epoch_time = input_time
for i in range(0,len(input_time)):
epoch_time[i] = int(time.mktime(time.strptime(str(input_time[i], "utf-8"), time_pattern)))
return epoch_time
def dataToDataFrame(inFile):
"""dataToDataFrame function
Function takes full file path to csv type data file and returns a
PANDAS dataframe for data treatment with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): Full path to data file.
Returns:
DataFrame: The return value is a full dataframe with header.
.. REF PAGE:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
"""
#df = pd.read_csv(inFile, header=[0,2])
df = pd.read_csv(inFile)
return df
def dataToNDarray(inFile, dtype=None, names=None, separator=',', skip=None):
"""dataToNDarray function
Function takes full file path to csv type data file and returns NUMPY
ndarray type ndarray for data manipulation with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): inFile, full path to csv file
param2 (arr): dtype list
param3 (str): separator, default comma ','
Returns:
Narray: The return value is a full data ndarray with two row header.
Reference Page:
https://scipy.github.io/old-wiki/pages/Cookbook/InputOutput.html
"""
try:
return pd.read_pickle(inFile).to_records()
except:
if skip is None:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names)
else:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names, skip_header=skip)
return arr
def hysteresis_correction(H1=-0.033, H2=5000, H3=1450, inMat = None):
"""Hysteresis Correction function
Function takes data ndarray and hysteresis coefficiants
and returns hysteresis corrected oxygen data.
Args:
param1 (float): H1, hysteresis correction coefficiant 1
param2 (float): H2, hysteresis correction coefficiant 2
param3 (float): H3, hysteresis correction coefficiant 3
param5 (array): inMat, raw ctd data.
Returns:
array: Return dissolved oxygen hysteresis corrected data.
.. REF PAGE:
http://http://www.seabird.com/document/an64-3-sbe-43-dissolved-oxygen-do-sensor-hysteresis-corrections
"""
Oxnewconc = np.arange(0,len(inMat),1)
Oxnewconc[0] = inMat['o1_mll'][1]
if inMat is None:
print("Hysteresis Correction function: No data")
return
else:
for i in range(1,len(inMat)-1):
D = 1 + H1 * (math.exp(inMat['p_dbar'][i] / H2) - 1)
C = math.exp(-1 * 0.04167/ H3)
Oxnewconc[i] = ((inMat['o1_mll'][i] + (Oxnewconc[i-1] * C * D)) - (inMat['o1_mll'][i-1] * C)) / D
inMat['o1_mll'][:] = Oxnewconc[:]
return inMat
def data_interpolater(inArr):
"""data_interpolater to handle indices and logical indices of NaNs.
Input:
- inArr, 1d numpy array with return True np.isnans()
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
- interpolated array
Example:
>>> # linear interpolation of NaNs
>>> outArray = data_interpolater(inArr)
"""
nans, tmp= np.isnan(inArr), lambda z: z.nonzero()[0]
inArr[nans] = np.interp(tmp(nans), tmp(~nans), inArr[~nans])
return inArr
def o2pl2pkg(p_col, t_col, sal_col, dopl_col, dopkg_col, lat_col, lon_col, inMat):
"""o2pl2pkg convert ml/l dissolved oxygen to umol/kg
Input:
- t_col, temperature column header deg c.
- sal_col, salinity column header psu.
- dopl_col, dissolved column header ml/l.
- dopkg_col, dissolved column header umol/kg
- lat_col, latitude for entire cast deg.
- lon_col, longitude for entire cast deg.
- inMat, dtype ndarray processed ctd time data.
Output:
- Converted Oxygen column umol/kg
Example:
>>> # linear interpolation of NaNs
>>> outArray = o2pl2kg(inArr)
"""
pkg = np.ndarray(shape=len(inMat), dtype=[(dopkg_col, np.float)])
# Absolute sailinity from Practical salinity.
SA = gsw.SA_from_SP(inMat[sal_col], inMat[p_col], inMat[lat_col], inMat[lon_col])
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, inMat[t_col], inMat[p_col])
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
# Convert DO ml/l to umol/kg
for i in range(0,len(inMat[dopl_col])):
pkg[i] = inMat[dopl_col][i] * 44660 / (s0[i] + 1000)
return pkg
def oxy_to_umolkg(df_sal, df_pressure, df_lat, df_lon, df_temp, df_oxy):
'''Rewritten from Courtney's method to use array-likes (aka use dataframes and ndarrays).
'''
# Absolute salinity from Practical salinity.
SA = gsw.SA_from_SP(df_sal, df_pressure, df_lat, df_lon)
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, df_temp, df_pressure)
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
series = df_oxy * 44660 / (s0 + 1000)
return series
def raw_ctd_filter(input_array=None, filter_type='triangle', win_size=24, parameters=None):
"""raw_ctd_filter function
Function takes NUMPY array
of raw ctd data and returns filtered data. This function also needs
one of three filter types (boxcar, gaussian, triangle) as well as
window size.
Args:
param1 (ndarray): Numpy ndarray with predefined header with at
param2 (str): One of three tested filter types
boxcar, gaussian_std, triangle.
default is triangle
param3 (int): A window size for the filter. Default is 24, which
is the number of frames per second from a SBE9+/11 CTD/Dech unit.
param4 (ndarray): parameters the dtype names used in filtering the
analytical inputs.
Returns:
Narray: The return value is a matrix of filtered ctd data with
the above listed header values.
"""
if input_array is None:
print("In raw_ctd_filter: No data array.")
return
else:
return_array = input_array
if parameters is None:
print("In raw_ctd_filter: Empty parameter list.")
else:
for p in parameters:
if filter_type is 'boxcar':
win = sig.boxcar(win_size)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/len(win)
elif filter_type is 'gaussian':
sigma = np.std(arr)
win = sig.general_gaussian(win_size, 1.0, sigma)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/(len(win))
elif filter_type is 'triangle':
win = sig.triang(win_size)
return_array[p] = 2*sig.convolve(input_array[p], win, mode='same')/len(win)
return return_array
def ondeck_pressure(stacast, p_col, c1_col, c2_col, time_col, inMat=None, conductivity_startup=20.0, log_file=None):
"""ondeck_pressure function
Function takes full NUMPY ndarray with predefined dtype array
of filtered ctd raw data the stores, analizes and removes ondeck
values from data.
Args:
param1 (str): stacast, station cast info
param1 (str): p_col, pressure data column name
param2 (str): c1_col, cond1 data column name
param3 (str): c2_col, cond2 data column name
param4 (str): time_col, time data column name
param5 (ndarray): numpy ndarray with dtype array
param6 (float): conductivity_startup, threshold value
param7 (str): log_file, log file name
Returns:
Narray: The return ndarray with ondeck data removed.
Also output start/end ondeck pressure.
"""
start_pressure = []
tmpMat = []
outMat = []
tmp = 0
start_p = 0.0
n = 0
ep = []
end_p = 0.0
# Frequency
fl = 24
fl2 = fl*2
# One minute
mt = 60
# Half minute
ms = 30
time_delay = fl*ms
if inMat is None:
print("Ondeck_pressure function: No data.")
return
else:
# Searches first quarter of matrix, uses conductivity
# threshold min to capture startup pressure
for j in range(0,int(len(inMat)/4)):
if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
tmp = j
start_pressure.append(inMat[p_col][j])
# Evaluate starting pressures
if not start_pressure: start_p = "Started in Water"
else:
n = len(start_pressure)
if (n > time_delay): start_p = np.average(start_pressure[fl2:n-(time_delay)])
else: start_p = np.average(start_pressure[fl2:n])
# Remove on-deck startup
inMat = inMat[tmp:]
tmp = len(inMat);
# Searches last half of NDarray for conductivity threshold
if len(inMat) % 2 == 0:
inMat_2 = inMat.copy()
else:
inMat_2 = inMat.iloc[1:].copy()
inMat_half1, inMat_half2 = np.split(inMat_2,2)
ep = inMat_half2[(inMat_half2[c1_col] < conductivity_startup) & (inMat_half2[c2_col] < conductivity_startup)][p_col]
# for j in range(int(len(inMat)*0.5), len(inMat)):
# if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
# ep.append(inMat[p_col][j])
# if (tmp > j): tmp = j
# Evaluate ending pressures
if (len(ep) > (time_delay)): end_p = np.average(ep[(time_delay):])
else: end_p = np.average(ep[(len(ep)):])
# Remove on-deck ending
outMat = inMat[:tmp]
# Store ending on-deck pressure
report_ctd.report_pressure_details(stacast, log_file, start_p, end_p)
return outMat
def _roll_filter(df, pressure_column="CTDPRS", direction="down"):
#fix/remove try/except once serialization is fixed
try:
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
except KeyError:
pressure_column = 'CTDPRS'
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
return df[df[pressure_column] == monotonic_sequence]
def roll_filter(inMat, p_col, up='down', frames_per_sec=24, search_time=15, **kwargs):
"""roll_filter function
Function takes full NUMPY ndarray with predefined dtype array
and subsample arguments to return a roll filtered ndarray.
Args:
param1 (str): stacast, station cast info
param2 (ndarray): inMat, numpy ndarray with dtype array
param3 (str): up, direction to filter cast (up vs down)
param4 (int): frames_per_sec, subsample selection rate
param5 (int): seach_time, search time past pressure inversion
Returns:
Narray: The return value ndarray of data with ship roll removed
"""
#When the "pressure sequence" code is fixed, uncomment and use this instead
start = kwargs.get("start", 0)
end = kwargs.get("end", -1)
full_matrix = kwargs.get("full_matrix", inMat)
tmp_df = pd.DataFrame.from_records(full_matrix[start:end])
tmp_df = _roll_filter(tmp_df)
#return tmp_df.to_records(index=False)
return tmp_df
remove = []
frequency = 24 # Hz of package
if (frames_per_sec > 0) & (frames_per_sec <= 24):
sample = int(frequency/frames_per_sec) # establish subsample rate to time ratio
else: sample = frequency
# Adjusted search time with subsample rate
search_time = int(sample*frequency*int(search_time))
if inMat is None:
print("Roll filter function: No input data.")
return
else:
P = inMat[p_col]
dP = np.diff(P,1)
if up is 'down':
index_to_remove = np.where(dP < 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] > P[i+1]:
deltaP = P[i+1] + abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
elif up is 'up':
index_to_remove = np.where(dP > 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] < P[i+1]:
deltaP = P[i+1] - abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
subMat = np.delete(subMat,remove,axis=0)
return subMat
def pressure_sequence(df, p_col='CTDPRS', intP=2.0, startT=-1.0, startP=0.0, up='down', sample_rate=12, search_time=15):
"""pressure_sequence function
Function takes a dataframe and several arguments to return a pressure
sequenced data ndarray.
Pressure sequencing includes rollfilter.
Necessary inputs are input Matrix (inMat) and pressure interval (intP).
The other inputs have default settings. The program will figure out
specifics for those settings if left blank.
Start time (startT), start pressure (startP) and up are mutually exclusive.
If sensors are not not fully functional when ctd starts down cast
analyst can select a later start time or start pressure but not both.
There is no interpolation to the surface for other sensor values.
'up' indicates direction for pressure sequence. If up is set startT and startP
are void.
Args:
param1 (Dataframe: Dataframe containing measurement data
param2 (str): p_col, pressure column name
param3 (float): starting pressure interval
param5 (float): start time (startT) for pressure sequence
param6 (float): start pressure (startP) for pressure sequence
param7 (str): pressure sequence direction (down/up)
param8 (int): sample_rate, sub sample rate for roll_filter. Cleans & speeds processing.
param9 (int): search_time, truncate search index for the aliasing part of ship roll.
param10 (ndarray): inMat, input data ndarray
Returns:
Narray: The return value is a matrix of pressure sequenced data
todo: deep data bin interpolation to manage empty slices
"""
# change to take dataframe with the following properties
# * in water data only (no need to find cast start/end)
# * The full down and up time series (not already split since this method will do it)
# New "algorithm" (TODO spell this right)
# * if direction is "down", use the input as is
# * if direction is "up", invert the row order of the input dataframe
# Use the "roll filter" method to get only the rows to be binned
# * the roll filter will treat the "up" part of the cast as a giant roll to be filtered out
# * the reversed dataframe will ensure we get the "up" or "down" part of the cast
# * there is no need to reverse the dataframe again as the pressure binning process will remove any "order" information (it doesn't care about the order)
# That's basically all I (barna) have so far TODO Binning, etc...
# pandas.cut() to do binning
#lenP, prvPrs not used
# Passed Time-Series, Create Pressure Series
start = 0
# Roll Filter
roll_filter_matrix = roll_filter(df, p_col, up, sample_rate, search_time, start=start)
df_roll_surface = fill_surface_data(roll_filter_matrix, bin_size=2)
#bin_size should be moved into config
binned_df = binning_df(df_roll_surface, bin_size=2)
binned_df = binned_df.reset_index(drop=True)
return binned_df
def binning_df(df, **kwargs):
'''Bins records according to bin_size, then finds the mean of each bin and returns a df.
'''
bin_size = kwargs.get("bin_size", 2)
try:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS_DBAR'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS_DBAR'], range(0,int(np.ceil(df['CTDPRS_DBAR'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS_DBAR'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
except KeyError:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS'], range(0,int(np.ceil(df['CTDPRS'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
def fill_surface_data(df, **kwargs):
'''Copy first scan from top of cast, and propgate up to surface
'''
surface_values = []
bin_size = kwargs.get("bin_size", 2)
try:
for x in range(1, int(np.floor(df.iloc[0]['CTDPRS_DBAR'])), bin_size):
surface_values.append(x)
df_surface = pd.DataFrame({'CTDPRS_DBAR': surface_values})
df_surface['interp_bol'] = 1
df_merged = pd.merge(df_surface, df, on='CTDPRS_DBAR', how='outer')
except KeyError:
for x in range(1, int(np.floor(df.iloc[0]['CTDPRS'])), bin_size):
surface_values.append(x)
df_surface = pd.DataFrame({'CTDPRS': surface_values})
# Added by KJ to keep track of backfilled values
df_surface['interp_bol'] = 1
if len(df_surface['interp_bol']) == 1:
df_surface['interp_bol'] = 0
df_merged = pd.merge(df_surface.astype('float64'), df, on='CTDPRS', how='outer')
if 'interp_bol' not in df_merged.columns:
df_merged['interp_bol'] = np.NaN
df_merged['interp_bol'].fillna(0,inplace=True)
return df_merged.fillna(method='bfill')
def load_reft_data(reft_file,index_name = 'btl_fire_num'):
""" Loads reft_file to dataframe and reindexes to match bottle data dataframe"""
reft_data = pd.read_csv(reft_file,usecols=['btl_fire_num','T90'])
reft_data.set_index(index_name)
reft_data['SSSCC_TEMP'] = reft_file[-14:-9]
reft_data['REFTMP'] = reft_data['T90']
return reft_data
def load_salt_data(salt_file, index_name= 'SAMPNO'):
salt_data = pd.read_csv(salt_file,usecols = ['SAMPNO','SALNTY','BathTEMP','CRavg'])
salt_data.set_index(index_name)
salt_data['SSSCC_SALT'] = salt_file[-15:-10]
salt_data.rename(columns={'SAMPNO':'SAMPNO_SALT'}, inplace=True)
return salt_data
def load_btl_data(btl_file,cols=None):
"""ex. '/Users/k3jackson/p06e/data/bottle/00201_btl_mean.pkl'"""
btl_data = dataToNDarray(btl_file,float,True,',',0)
btl_data = pd.DataFrame.from_records(btl_data)
if cols != None:
btl_data = btl_data[cols]
ssscc = btl_file[-18:-13]
btl_data['SSSCC'] = ssscc
return btl_data
def load_time_data(time_file):
time_data = dataToNDarray(time_file,float,True,',',1)
time_data = pd.DataFrame.from_records(time_data)
return time_data
def calibrate_param(param,ref_param,press,calib,order,ssscc,btl_num,xRange=None,):
### NOTE: REF VALUES DEEMED QUESTIONABLE ARE STILL BEING USED FOR CALIBRATION
df_good = quality_check(param,ref_param,press,ssscc,btl_num,find='good')
df_ques = quality_check(param,ref_param,press,ssscc,btl_num,find='quest')
df_ques['Parameter'] = param.name
#report questionable data to a csv file
#constrain pressure to within limits of xRange
if xRange != None:
x0 = int(xRange.split(":")[0])
x1 = int(xRange.split(":")[1])
df_good_cons = df_good[(df_good[press.name] >= x0) & (df_good[press.name] <= x1)]
else:
#Take full range of temperature values
x0 = df_good[param.name].min()
x1 = df_good[param.name].max()
df_good_cons = df_good[(df_good[param.name] >= x0) & (df_good[param.name] <= x1)]
if 'P' in calib:
coef = get_param_coef(df_good_cons[press.name],df_good_cons['Diff'],order,calib)
elif 'T' or 'C' in calib:
coef = get_param_coef(df_good_cons[param.name],df_good_cons['Diff'],order,calib)
else:
print('calib argument not valid, use CP TP T or C')
return coef,df_ques
def quality_check(param,param_2,press,ssscc,btl_num,find,thresh=[0.002, 0.005, 0.010, 0.020]):
param = fit_ctd.array_like_to_series(param)
param_2 = fit_ctd.array_like_to_series(param_2)
press = fit_ctd.array_like_to_series(press)
ssscc = fit_ctd.array_like_to_series(ssscc)
btl_num = fit_ctd.array_like_to_series(btl_num)
diff = param_2 - param
df = pd.concat([ssscc,btl_num.rename('Bottle'),param.rename('Param_1'),param_2.rename('Param_2'),press.rename('CTDPRS'),diff.rename('Diff')],axis=1)
if find == 'good':
# Find data values for each sensor that are below the threshold (good)
df['Flag'] = 1
#df_range_comp = df_range[(df_range[diff].abs() < threshold)]# & (df_range[d_2].abs() < threshold) & (df_range[d_12].abs() < threshold)]
df.loc[(df.CTDPRS > 2000) & (df.Diff.abs() < thresh[0]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 2000) & (df.CTDPRS >1000) & (df.Diff.abs() < thresh[1]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 1000) & (df.CTDPRS >500) & (df.Diff.abs() < thresh[2]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 500) & (df.Diff.abs() < thresh[3]), 'Flag'] = 2
#
# Filter out bad values
df = df[df['Flag'] == 2]
# Rename Columns back to what they were
if param.name != None:
df.rename(columns = {'Param_1' : param.name}, inplace=True)
if param_2.name != None:
df.rename(columns = {'Param_2' : param_2.name},inplace=True)
if press.name != None:
df.rename(columns = {'CTDPRS' : press.name}, inplace=True )
elif find == 'quest':
# Find data values for each sensor that are above the threshold (questionable)
df['Flag'] = 1
df.loc[(df.CTDPRS > 2000) & (df.Diff.abs() > thresh[0]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 2000) & (df.CTDPRS >1000) & (df.Diff.abs() > thresh[1]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 1000) & (df.CTDPRS >500) & (df.Diff.abs() > thresh[2]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 500) & (df.Diff.abs() > thresh[3]), 'Flag'] = 3
# Filter out good values
df = df[df['Flag'] == 3]
# Remove unneeded columns
df = df.drop(['Param_1','Param_2'],axis=1)
# Re-Order Columns for better readability
df = df[[ssscc.name,'Bottle',press.name,'Flag','Diff']]
else:
print('Find argument not valid, please enter "good" or "quest" to find good or questionable values')
return df
def get_param_coef(calib_param,diff,order,calib):
cf1 = np.polyfit(calib_param, diff, order)
if 'T' in calib:
coef = np.zeros(shape=5)
if order is 0:
coef[4] = cf1[0]
elif (order is 1) and (calib == 'TP'):
coef[1] = cf1[0]
coef[4] = cf1[1]
elif (order is 2) and (calib == 'TP'):
coef[0] = cf1[0]
coef[1] = cf1[1]
coef[4] = cf1[2]
elif (order is 1) and (calib == 'T'):
coef[3] = cf1[0]
coef[4] = cf1[1]
elif (order is 2) and (calib == 'T'):
coef[2] = cf1[0]
coef[3] = cf1[1]
coef[4] = cf1[2]
if 'C' in calib:
coef = np.zeros(shape=7)
if order is 0:
coef[6] = cf1[0]
elif (order is 1) and (calib == 'CP'):
coef[1] = cf1[0]
coef[6] = cf1[1]
elif (order is 2) and (calib == 'CP'):
coef[0] = cf1[0]
coef[1] = cf1[1]
coef[6] = cf1[2]
elif (order is 1) and (calib == 'C'):
coef[5] = cf1[0]
coef[6] = cf1[1]
elif (order is 2) and (calib == 'C'):
coef[4] = cf1[0]
coef[5] = cf1[1]
coef[6] = cf1[2]
return coef
def combine_quality_flags(df_list):
combined_df = pd.concat(df_list)
combined_df = combined_df.sort_values(['SSSCC','Bottle'])
combined_df = combined_df.round(4)
return combined_df
#Combine these three into a dataframe and write out to a csv
#Sort by sta/cast, bottle number, rev. press
def calibrate_conductivity(df,order,calib_param,sensor,xRange=None,
refc_col='BTLCOND',cond_col_1='CTDCOND1',cond_col_2='CTDCOND2',
p_col='CTDPRS'):#refc_data
### NOTE: REF VALUES DEEMED QUESTIONABLE ARE STILL BEING USED FOR CALIBRATION
if sensor == 1:
postfix = 'c1'
cond_col = 'CTDCOND1'
t_col = 'CTDTMP1'
elif sensor ==2:
postfix = 'c2'
cond_col = 'CTDCOND2'
t_col = 'CTDTMP2'
else:
print('No sensor name supplied, difference column name will be: diff')
if calib_param == 'P':
calib_col = p_col
elif calib_param == 'T':
calib_col = t_col
elif calib_param == 'C':
calib_col = cond_col
else:
print('No calib_param supplied')
diff = 'd_'+postfix #Difference between ref and prim sensor
# Calculate absolute differences between sensors and salt sample data
#df[diff] = refc_data[refc_col] - df[cond_col]
df[diff] = df[refc_col] - df[cond_col]
#df['primary_diff'] = refc_data[refc_col] - df[cond_col_1]
df['primary_diff'] = df[refc_col] - df[cond_col_1]
#df['secondary_diff'] = refc_data[refc_col] - df[cond_col_2]
df['secondary_diff'] = df[refc_col] - df[cond_col_2]
df['P-S'] = df[cond_col_1] - df[cond_col_2]
#Greater than 2000 dBar
lower_lim = 2000
upper_lim = df[p_col].max()
threshold = 0.002
df_deep_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_deep_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_deep_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Between 2000 and 1000
lower_lim = 1000
upper_lim = 2000
threshold = 0.005
df_lmid_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_lmid_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_lmid_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Between 1000 and 500
lower_lim = 500
upper_lim = 1000
threshold = 0.010
df_umid_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_umid_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_umid_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Less than 500
lower_lim = df[p_col].min() - 1
upper_lim = 500
threshold = 0.020
df_shal_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_shal_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_shal_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#concat dataframes into two main dfs
df_good = pd.concat([df_deep_good,df_lmid_good,df_umid_good,df_shal_good])
df_ques = pd.concat([df_deep_ques,df_lmid_ques,df_umid_ques,df_shal_ques])
df_ref = pd.concat([df_deep_ref,df_lmid_ref,df_umid_ref,df_shal_ref])
if sensor == 1:
df_ques['Parameter'] = 'C1'
df_ques['Flag'] = 3
df_ref['Parameter'] = 'C'
df_ref['Flag'] = 3
elif sensor == 2:
df_ques['Parameter'] = 'C2'
df_ques['Flag'] = 3
df_ref['Flag'] = 3
if xRange != None:
x0 = int(xRange.split(":")[0])
x1 = int(xRange.split(":")[1])
df_good_cons = df_good[(df_good[calib_col] >= x0) & (df_good[calib_col] <= x1)]
else:
#Take full range of temperature values
# x0 = df_good[t_col].min()
# x1 = df_good[t_col].max()
df_good_cons = df_good#[(df_good[calib_col] >= x0) & (df_good[calib_col] <= x1)]
cf = np.polyfit(df_good_cons[calib_col], df_good_cons[diff], order)
sensor = '_c'+str(sensor)
coef = np.zeros(shape=7)
if order is 0:
coef[6] = cf[0]
elif (order is 1) and (calib_param == 'P'):
coef[1] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'P'):
coef[0] = cf[0]
coef[1] = cf[1]
coef[6] = cf[2]
elif (order is 1) and (calib_param == 'T'):
coef[3] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'T'):
coef[2] = cf[0]
coef[3] = cf[1]
coef[6] = cf[2]
elif (order is 1) and (calib_param == 'C'):
coef[5] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'C'):
coef[4] = cf[0]
coef[5] = cf[1]
coef[6] = cf[2]
return coef,df_ques,df_ref
def prepare_fit_data(df,ref_col):
good_data = df.copy()
good_data = good_data[np.isfinite(good_data[ref_col])]
return good_data
def prepare_conductivity_data(ssscc,df,refc,ssscc_col = 'SSSCC',index_col = 'btl_fire_num'):
btl_concat = pd.DataFrame()
for x in ssscc:
btl_data = df[df[ssscc_col] == x]
refc_data = refc[refc[ssscc_col] == x]
btl_data_clean = prepare_fit_data(btl_data,refc_data,'C')
btl_concat = pd.concat([btl_concat,btl_data_clean])
refc = refc[refc[index_col] != 0]
refc = refc.reset_index(drop=True)
btl_concat = btl_concat.reset_index(drop=True)
return btl_concat, refc
def prepare_all_fit_data(ssscc,df,ref_data,param):
data_concat = pd.DataFrame()
for x in ssscc:
btl_data = df[df['SSSCC']==x]
ref_data_stn= ref_data[ref_data['SSSCC']==x]
btl_data_good = prepare_fit_data(btl_data,ref_data_stn,param)
data_concat = pd.concat([data_concat,btl_data_good])
return data_concat
def get_pressure_offset(start_vals,end_vals):
"""
Finds unique values and calclates mean for pressure offset
Parameters
----------
start_vals :array_like
Array containing initial ondeck pressure values
end_vals :array_like
Array containing ending ondeck pressure values
Returns
-------
p_off :float
Average pressure offset
"""
p_start = pd.Series(np.unique(start_vals))
p_end = pd.Series(np.unique(end_vals))
p_start = p_start[p_start.notnull()]
p_end = p_end[p_end.notnull()]
p_off = p_start.mean() - p_end.mean()
# <NAME> THIS METHOD SHOULD BE USED TO KEEP START END PAIRS
# p_df = pd.DataFrame()
# p_df['p_start'] = p_start
# p_df['p_end'] = p_end
# p_df = p_df[p_df['p_end'].notnull()]
# p_df = p_df[p_df['p_start'].notnull()]
# p_off = p_df['p_start'].mean() - p_df['p_end'].mean()
##########################################################
p_off = np.around(p_off,decimals=4)
return p_off
def load_pressure_logs(file):
"""
Loads pressure offset file from logs.
Parameters
----------
file : string
Path to ondeck_pressure log
Returns
-------
df : DataFrame
Pandas DataFrame containing ondeck start and end pressure values
"""
df = pd.read_csv(file,names=['SSSCC','ondeck_start_p','ondeck_end_p'])
# Change vaules in each row by removing non-number parts
df['SSSCC'] = df['SSSCC'].str[-5:]
df['ondeck_start_p'] = df['ondeck_start_p'].str[16:]
df['ondeck_end_p'] = df['ondeck_end_p'].str[14:]
df.loc[df['ondeck_start_p'].str[-5:] == 'Water','ondeck_start_p'] = np.NaN
df['ondeck_start_p'] = df['ondeck_start_p'].astype(float)
df['ondeck_end_p'] = df['ondeck_end_p'].astype(float)
return df
def write_offset_file(df,p_off,write_file='data/logs/poffset_test.csv'):
"""
"""
df_out = pd.DataFrame()
df_out['SSSCC'] = df['SSSCC']
df_out['offset'] = p_off
df_out.to_csv(write_file,index=False)
return
def pressure_calibrate(file):
pressure_log = load_pressure_logs(file)
p_off = get_pressure_offset(pressure_log)
return p_off
def load_hy_file(path_to_hyfile):
df = pd.read_csv(path_to_hyfile, comment='#', skiprows=[0])
df = df[df['EXPOCODE'] != 'END_DATA']
return df
def load_all_ctd_files(ssscc,prefix,postfix,series,cols,reft_prefix='data/reft/',reft_postfix='_reft.csv',
refc_prefix='data/salt/',refc_postfix='_salts.csv',press_file='data/logs/ondeck_pressure.csv', cast_details = 'data/logs/cast_details.csv',
oxy_prefix='data/oxygen/', oxy_postfix='',index_col='btl_fire_num',t_col='CTDTMP1',
p_col='CTDPRS',ssscc_col='SSSCC'):
"""
LOAD ALL CTD FILES was changed (commented out)
Lines 1324-1328,1335,1337, 1338,345
"""
df_data_all = pd.DataFrame()
if series == 'bottle':
for x in ssscc:
print('Loading BTL data for station: ' + x + '...')
btl_file = prefix + x + postfix
btl_data = load_btl_data(btl_file,cols)
reft_file = reft_prefix + x + reft_postfix
try:
reft_data = load_reft_data(reft_file)
except FileNotFoundError:
print('Missing (or misnamed) REFT Data Station: ' + x + '...filling with NaNs')
reft_data = pd.DataFrame()
reft_data[index_col] = pd.Series(btl_data[index_col].values.astype(int))
reft_data['T90'] = pd.Series([np.nan]*len(btl_data))
ref_ssscc = ssscc_col + '_TEMP'
reft_data[ref_ssscc] = x
reft_data.index = btl_data.index
#refc_file = refc_prefix + x + refc_postfix
refc_file = refc_prefix + x + refc_postfix
try:
#refc_data = fit_ctd.salt_calc(refc_file,index_col,t_col,p_col,btl_data)
refc_data = load_salt_data(refc_file, index_name= 'SAMPNO')
except FileNotFoundError:
print('Missing (or misnamed) REFC Data Station: ' + x + '...filling with NaNs')
refc_data = pd.DataFrame()
refc_data['SAMPNO_SALT'] = pd.Series(btl_data[index_col].values.astype(int))
refc_data['CRavg'] = pd.Series([np.nan]*len(btl_data))
refc_data['BathTEMP'] = pd.Series([np.nan]*len(btl_data))
refc_data['BTLCOND'] = pd.Series([np.nan]*len(btl_data))
refc_data.index = btl_data.index
#Fix Index for each parameter to bottle number
# btl_data[index_col] = btl_data[index_col].astype(int)
# btl_data=btl_data.set_index(btl_data[index_col].values)
#
# reft_data = reft_data.set_index(reft_data[index_col].values)
oxy_file = oxy_prefix + x + oxy_postfix
try:
oxy_data,params = oxy_fitting.oxy_loader(oxy_file)
except FileNotFoundError:
print('Missing (or misnamed) REFO Data Station: ' + x + '...filling with NaNs')
oxy_data = | pd.DataFrame() | pandas.DataFrame |
from io import BytesIO
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
def test_compression_roundtrip(compression):
df = pd.DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
with tm.decompress_file(path, compression) as fh:
result = fh.read().decode("utf8")
tm.assert_frame_equal(df, pd.read_json(result))
def test_read_zipped_json(datapath):
uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression="zip")
tm.assert_frame_equal(uncompressed_df, compressed_df)
@td.skip_if_not_us_locale
def test_with_s3_url(compression, s3_resource, s3so):
# Bucket "pandas-test" created in tests/io/conftest.py
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f)
roundtripped_df = pd.read_json(
"s3://pandas-test/test-1", compression=compression, storage_options=s3so
)
tm.assert_frame_equal(df, roundtripped_df)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient="records", lines=True, compression=compression)
roundtripped_df = pd.read_json(path, lines=True, compression=compression)
tm.assert_frame_equal(df, roundtripped_df)
def test_chunksize_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient="records", lines=True, compression=compression)
with pd.read_json(
path, lines=True, chunksize=1, compression=compression
) as res:
roundtripped_df = pd.concat(res)
tm.assert_frame_equal(df, roundtripped_df)
def test_write_unsupported_compression_type():
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
with pytest.raises(ValueError, match=msg):
df.to_json(path, compression="unsupported")
def test_read_unsupported_compression_type():
with | tm.ensure_clean() | pandas._testing.ensure_clean |
from datetime import datetime
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from pyconsolida.budget_reader import read_full_budget
from pyconsolida.postdoc_fix_utils import (
check_consistency_of_matches,
fix_tipologie_df,
isinlist,
)
DIRECTORY = Path(r"C:\Users\lpetrucco\Desktop\Sostenibilità_biennio19_20")
dest_dir = DIRECTORY / "exported_luigi"
dest_dir.mkdir(exist_ok=True)
# Trova tutti i file analisi:
file_analisi_list = list(DIRECTORY.glob("[0-9]*[0-9]*[0-9]*[0-9]/*nalisi*.xls"))
settori_map = pd.read_excel(DIRECTORY / "settori_map.xlsx").set_index("commessa")
tipologie_map = pd.read_excel(DIRECTORY / "tipologie_map.xlsx").set_index("da")
tipologie_fix = pd.read_excel(DIRECTORY / "tipologie_fix.xlsx")
categorie_map = | pd.read_excel(DIRECTORY / "categorie_map.xlsx") | pandas.read_excel |
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import requests
import pandas as pd
import datetime
import numpy as np
import os
from matplotlib import pyplot
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import calendar
def get_extracted_data_from_restful(ms, year, parcel_id, api_user, api_pass, tstype,
ptype):
was_error = False
if ms == "be-wa":
ms = "bewa"
if ptype == "":
url = "http://cap.users.creodias.eu/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&tstype=" + tstype + "&scl=True&ref=True"
else:
url = "http://cap.users.creodias.eu/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&ptype=" + ptype + "&tstype=" + tstype + "&scl=True&ref=True"
print(url)
# response = requests.get(url, auth=(api_user, api_pass))
try:
response = requests.get(url, auth=(api_user, api_pass))
print(response)
if response.status_code == 404 or response.status_code == 500:
was_error = True
df = pd.DataFrame()
else:
df = pd.read_json(response.text)
if not df.empty:
if tstype == "c6":
df['date_part']=df['date_part'].map(lambda e: datetime.datetime.fromtimestamp(e))
df['orbit'] = df['date_part'].apply(lambda s: 'D' if s.hour < 12 else 'A')
df['date'] = df['date_part'].apply(lambda s: s.date())
if tstype == "bs":
df['date_part']=df['date_part'].map(lambda e: datetime.datetime.fromtimestamp(e))
df['orbit'] = df['date_part'].apply(lambda s: 'D' if s.hour < 12 else 'A')
df['date'] = df['date_part'].apply(lambda s: s.date())
# convert backscatters to decibels
# df['mean'] = df['mean'].map(lambda s: 10.0*np.log10(s))
else:
# create an Empty DataFrame object
df = pd.DataFrame()
except requests.exceptions.HTTPError as errh:
was_error = True
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
was_error = True
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
was_error = True
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
was_error = True
print ("OOps: Something Else",err)
if was_error:
df = pd.DataFrame()
return url, df, was_error
def get_extracted_data_from_db(host : str, port : str, dbname : str, user : str, password : str, sql_select : str) -> pd.DataFrame :
"""
Summary :
This function connects with the database and run the SQL query that is passed as a parameter.
The query extracts time series of sentinel data formatted according to the marker detection tool requirements.
A single, a subset or the whole set of parcels is retrieved according to the SQL based on the parameters set by the user.
The result is stored in a dataframe and an index is set using the db id and the timestamp of the image.
Database creadential are needed (that is different from Restful).
Arguments :
host - IP address of the database server
port - port of the database (usually, 5432)
dbname - name of the database where data is stored
user - database user (with access privilege to the parcel, hist, sigs and metadata tables)
password - <PASSWORD>
sql_select - sql query that retrive the desired data. It is passed as a parament by the function that calls get_extracted_data_from_db.
Returns :
A data frame with all sentinel data ready to be used by the preprocessing and marker detection modules.
"""
# I connect with the db and check id the connection works fine
conn = None
try:
conn = psycopg2.connect(host=host, port=port, dbname= dbname, user= user, password= password)
print("Connection to DB established")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# I execute the query and copy the data it in a panda dataframe that
# I create with the same colums returned by the SQL statement
cur = conn.cursor()
cur.execute(sql_select)
data_ts = cur.fetchall()
col = []
for x in cur.description:
col.append(x[0])
ts_db = pd.DataFrame(data=data_ts, columns = col)
# I close the connection
cur.close()
conn.close()
# I set the index (parcel id as in the db + datetime of the images)
ts_db.set_index(['db_id', 'obstime'], inplace=True, verify_integrity=True)
return ts_db
def get_parcel_data_from_restful(ms, year, parcel_id, api_user, api_pass, ptype):
was_error = False
if ms == "be-wa":
ms = "bewa"
if ptype == "":
url = "http://cap.users.creodias.eu/query/parcelById?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&withGeometry=True"
else:
url = "http://cap.users.creodias.eu/query/parcelById?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&ptype=" + ptype + "&withGeometry=True"
print(url)
try:
response = requests.get(url, auth=(api_user, api_pass))
print(response)
if response.status_code == 404 or response.status_code == 500:
was_error = True
df = pd.DataFrame()
else:
df = pd.read_json(response.text)
except requests.exceptions.HTTPError as errh:
was_error = True
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
was_error = True
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
was_error = True
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
was_error = True
print ("OOps: Something Else",err)
if was_error:
df = pd.DataFrame()
return url, df, was_error
def get_cloudyness(cloud_stats, cloud_categories):
is_cloudy = False
cloudy_pixels = 0
total_pixels = 0
keys = [*cloud_stats]
for key in keys:
if int(key) in cloud_categories:
is_cloudy = True
cloudy_pixels += cloud_stats[key]
total_pixels += cloud_stats[key]
else:
total_pixels += cloud_stats[key]
if total_pixels > 0:
# cloud_percent = int(round(cloudy_pixels / total_pixels *100,0))
cloud_percent = round(cloudy_pixels / total_pixels *100,4)
else:
cloud_percent = None
return is_cloudy, cloud_percent
def get_utm_number_from_reference(ref):
return int(ref.split('_')[5][1:3])
def calculate_ndvi_std_from_band_mean_and_std(red_mean,nir_mean,red_std,nir_std):
ndvi_std = 2 * np.sqrt((nir_mean*red_std)**2 +
(red_mean*nir_std)**2) / (nir_mean + red_mean)**2
return ndvi_std
def calculate_ndvi_and_cloud_percent_for_the_parcel(df_ext, cloud_categories):
# we make a copy first of the dataframe passed to this function to avoid changing the original
# dataframe
df = df_ext.copy()
# Convert the epoch timestamp to a datetime
df['date_part']=df['date_part'].map(lambda e: datetime.datetime.fromtimestamp(e))
df['cloud_pct'] = df['hist'].apply(lambda s: get_cloudyness(s, cloud_categories)[1])
bands = ['B04', 'B08']
# Check if extraction exists for these bands 4 and 8 for NDVI calculation, otherwise quit
length_of_band0 = len(df[df['band']==bands[0]])
length_of_band1 = len(df[df['band']==bands[1]])
if length_of_band0>0 and length_of_band1>0:
# Treat each band separately.
df0 = df[df['band']==bands[0]][['date_part', 'mean', 'count', 'std', 'cloud_pct', 'reference']]
df1 = df[df['band']==bands[1]][['date_part', 'mean', 'count', 'std', 'cloud_pct', 'reference']]
# Merge back into one DataFrame based on reference that should be unique
dff = pd.merge(df0, df1, on = 'reference', suffixes = (bands[0], bands[1]))
dff['ndvi'] = (dff[f"mean{bands[1]}"]-dff[f"mean{bands[0]}"])/(dff[f"mean{bands[1]}"]+dff[f"mean{bands[0]}"])
dff['utm_number'] = dff['reference'].apply(lambda s: get_utm_number_from_reference(s))
dff['ndvi_std'] = dff.apply(lambda x: calculate_ndvi_std_from_band_mean_and_std(x.meanB04,x.meanB08,x.stdB04,x.stdB08), axis=1)
| pd.set_option('precision', 3) | pandas.set_option |
import numpy as np
from scipy.stats import poisson
#lr1,lr2 = [int(x) for x in input().strip().split()]
#lrr1,lrr2 = [int(x) for x in input().strip().split()]
#reward = [10,-2]
gamma = 0.9
V = np.zeros([20+1,20+1])
pie = np.zeros([20+1,20+1])
class samples:
def __init__(self,l1,l2,ep = 0.01):
self.ep = ep
self.probs1 = self.sample(l1)
self.probs2 = self.sample(l2)
def sample(self,lam):
d = []
i = 0
flag=False
s = 0
while True:
prob = poisson.pmf(i,lam)
if prob>=self.ep:
flag=True
s+=prob
d.append([i,prob])
i+=1
if prob<self.ep and flag:
break
s = (1-s)/len(d)
d = [(i,j+s) for i,j in d]
return d
locA = samples(3,3)
locB = samples(4,2)
def max_cars():
return 20
def state_action_value(state1,state2,action,V,gamma):
new_state = [max(0,min(state1-action,max_cars())),max(0,min(state2+action,max_cars()))]
val = -2*abs(action)
for kA1,vA1 in locA.probs1:
for kB1,vB1 in locB.probs1:
for kA2,vA2 in locA.probs2:
for kB2,vB2 in locB.probs2:
val_req = [min(new_state[0],kA1),min(new_state[1],kB1)]
probs = vA1*vB1*vA2*vB2
reward = sum(val_req)*10
new_s = [max(0,min(new_state[0]-val_req[0]+kA2,max_cars())),max(0,min(new_state[1]+val_req[1]+kB2,max_cars()))]#;print(new_state)
val+= probs*(reward+gamma*V[int(new_s[0]),int(new_s[1])])
return val
# def transition(state1,state2,action,r):
# s_ = state1-action
# s2_= state2+action
# s_,s2_ = state(s_,s2_)
# r += -2*abs(action)
# return s_,r,s2_
def policy_evaluation(V,pie,theta):
while True:
delta=0
for s1 in range(0,21):
for s2 in range(0,21):
v = V[s1,s2]
action = pie[s1,s2]
V[s1,s2] = state_action_value(s1,s2,action,V,0.9)
delta = max(delta,np.abs(v-V[s1,s2]))
print("one",delta)
if delta<theta:
break
def policy_iteration(V,pie):
stable = True
for i in range(21):
for j in range(21):
old = pie[i,j]
val = []
t1 = min(i,5)
t2 = -min(j,5)
for action in range(t2,t1+1):
v = state_action_value(i,j,action,V,0.9)
val.append([v,action])
action = sorted(val,key=lambda x: x[0])[-1][1]
pie[i,j] = action
if old!=pie[i,j] and abs(V[i,j]-val[-1][0])>0.1: stable = False
return stable
import seaborn as sns
def policy_improvement(V,pie,theta):
while True:
policy_evaluation(V,pie,theta)
print("ok")
theta = theta/2
stable = policy_iteration(V,pie)
if stable:
break
policy_improvement(V,pie,50)
import cv2
import numpy as np
img =np.clip(np.load(r"C:\Users\Akshay\Desktop\img.npy"),0,1)
imgcorr =np.clip(np.load(r"C:\Users\Akshay\Desktop\imgcorr.npy"),0,1)
cv2.imshow("ff",2*img)
ax = sns.heatmap(imgcorr)
import matplotlib.pyplot as plt
plt.savefig("av.png")
track=np.zeros(shape=[1000,1000])
track[500:,500:] = 1
plt.imshow(track)
import cv2
class Track_simulator:
def __init__(self,size,s_len,f_len,max_speed):
self.size = size
self.s_len = s_len
self.f_len = f_len
self.max_speed = max_speed
self.real_track = self.make_track()
def make_track(self):
track = np.zeros(self.size)
#track[self.s_len:,self.f_len:] = 1
real_track = np.ones([self.size[0]+4*self.max_speed,self.size[1]+4*self.max_speed])
real_track[2*self.max_speed:-2*self.max_speed,2*self.max_speed:-2*self.max_speed] = track
return real_track
def reset_speed(self):
self.Vx = 0
self.Vy = 0
def is_boundry(self,i,j):
return self.real_track[i][j]==1
def show_track(self):
plt.imshow(self.real_track)
def is_finish(self,i,j):
f_i = self.size[0]+2*self.max_speed-1
f_j_min = 2*self.max_speed
f_j_max = f_j_min+self.f_len-1
return i>=f_i and f_j_min<=j<=f_j_max
# def rand_start(self):
# start = np.random.randint(low=0,high=self.s_len)
# start_index = start+2*self.max_speed
# return start_index,self.size[0]+2*self.max_speed-2
def is_start(self,i,j):
s_j = self.size[1]+2*self.max_speed-1
s_i_min = 2*self.max_speed
s_i_max = s_i_min+self.s_len-1
return j==s_j and s_i_min<=i<=s_i_max
def take_action(self,action,i,j):
vx = self.Vx+action[0]
vy = self.Vy+action[1]
if (vx<=0 or vy<=0) and not self.is_start(i,j):
return self.Vx,self.Vy
self.Vx = max(min(vx,self.max_speed-1),0)
self.Vy = max(min(vy,self.max_speed-1),0)
return self.Vx,self.Vy
def s_point_loc(self,x):
i = 2*self.max_speed+x
j = self.size[1]+2*self.max_speed-1
return i,j
def rand_start(self):
x = np.random.randint(low=0,high=self.s_len)
return self.s_point_loc(x)
class MonteCarlo:
def __init__(self,num_sim,gamma=1):
self.track = Track_simulator([50]*2,10,20,5)
self.policy = np.zeros([*self.track.real_track.shape],dtype=int)
self.action_value = np.random.uniform(size=[*self.track.real_track.shape,9])
self.index_to_action = [[i,j] for i in range(-1,2) for j in range(-1,2)]
self.action_to_index = {str(self.index_to_action[i]):i for i in range(0,len(self.index_to_action))}
self.policy = np.argmax(self.action_value,axis=2)
self.num_sim = num_sim
self.b = 1/9
self.gamma = gamma
def rand_action(self):
x = np.random.choice([-1,0,1])
y = np.random.choice([-1,0,1])
return x,y
def create_chain(self,start):
self.track.reset_speed()
chain = []
mini = []
i,j=start
while True:
x,y = self.rand_action()
mini.append([i,j])
mini.append([x,y])
mini.append(-1)
Vx,Vy = self.track.take_action([x,y],i,j)
i+=Vx
j-=Vy
chain.append(mini)
mini = []
if self.track.is_finish(i,j):
break
if self.track.is_boundry(i,j):
self.track.reset_speed()
i,j = self.track.rand_start()
return chain
def arg_max(self,arr,i,j):
index = []
val = np.max(arr[i,j])
for k in range(len(arr[i,j])):
if arr[i,j,k]==val:
index.append(k)
return index
def Off_policy(self):
# This off policy Monte Carlo sampling uses per decision importance sampling
s = 0
count = np.zeros_like(self.action_value)
for episode in range(self.num_sim):
G = 0
s = s%self.track.s_len
i,j = self.track.s_point_loc(s)
chain = self.create_chain([i,j])
for stateT,actionT,rewardT in chain[::-1]:
G = self.gamma*G+rewardT
actionT = self.action_to_index[str(actionT)]
count[stateT[0],stateT[1],actionT]+=1
t=count[stateT[0],stateT[1],actionT]
self.action_value[stateT[0],stateT[1],actionT]+=(G-self.action_value[stateT[0],stateT[1],actionT])/t
index = self.arg_max(self.action_value,stateT[0],stateT[1])
max_action = actionT if actionT in index else index[0]
self.policy[stateT[0],stateT[1]] = max_action
W = 1/self.b if max_action==actionT else 0
G = W*G
s+=1
print("Episode:",episode)
def show(self,i,j):
temp = self.track.real_track.copy()
temp[i,j] = 10
cv2.imshow("playing with the policy",temp)
return cv2.waitKey(10) & 0xFF == ord('q')
def play(self,rand_start=True,pos=0):
self.track.reset_speed()
if rand_start:
loc = self.track.rand_start()
else:
loc = self.track.s_point_loc(pos)
while True:
close = self.show(*loc)
if close:
break
action = self.policy[loc[0],loc[1]]
action = self.index_to_action[action]
Vx,Vy = self.track.take_action(action,*loc)
#print(action,Vx,Vy)
loc = loc[0]+Vx,loc[1]-Vy
if self.track.is_finish(*loc):
print("FINISHED")
break
if self.track.is_boundry(*loc):
self.track.reset_speed()
loc = self.track.rand_start()
#print("Boundry Hit. New location is",loc,self.track.real_track[loc[0],loc[1]])
# def MCES_Every_Visit(self,):
#
#t = Track_simulator([10**3]*2,100,500,5)
#t.show_track()
d = MonteCarlo(100000)
ipolicy = d.policy.copy()
#f = d.create_chain(d.track.rand_start())
d.Off_policy()
d.play(False,pos=11)
#import gc
#gc.collect()
def pl(chain):
for i in chain:
temp = d.track.real_track.copy()
temp[i[0],i[1]]=10
cv2.imshow("track",temp)
cv2.waitKey(0)
yield 1
k = pl(f)
while True:
next(k)
# =============================================================================
#
# =============================================================================
import pygame
import sys
import time
class Grid_WorldSim:
def __init__(self,height,width,start_loc,finish_loc,actions,reward=-1,shift=None,kings_move=False):
self.shift = [0]*width if shift==None else shift
self.height = height
self.width = width
self.start_loc = start_loc
self.finish_loc = finish_loc
self.grid = self.make_grid()
self.r = reward
self.actions = actions
self.num_actions = len(self.actions)
self.reset_loc()
self.kings_move=kings_move
def reset_loc(self):
self.x_loc,self.y_loc = self.start_loc[0]+1,self.start_loc[1]+1
def ActionVal_init(self):
action_val = 0*np.random.uniform(low = 0,high = 1,size = [self.height+2,self.width+2,self.num_actions])
action_val[self.finish_loc[0]+1,self.finish_loc[1]+1] = 0
return action_val
def make_grid(self):
grid = np.zeros([self.height,self.width])
grid[self.finish_loc[0],self.finish_loc[1]]=-1
sudo_grid = np.ones([self.height+2,self.width+2])
sudo_grid[1:self.height+1,1:self.width+1] = grid
return sudo_grid
def is_finished(self,i,j):
return self.grid[i,j]==-1
def is_boundry(self,i,j):
return self.grid[i,j]==1
def apply_wind(self,x,y):
stoc_move = 0
if self.kings_move:
stoc_move = np.random.choice([-1,0,1])
x_ = x
x_ -= self.shift[y-1]+stoc_move
if 0<x_<=self.height and 0<y<self.width:
x = x_
return x,y
def starting_state(self):
return self.start_loc[0]+1,self.start_loc[1]+1
def simulate(self,action):
action = self.actions[action]
x_temp,y_temp = self.apply_wind(self.x_loc,self.y_loc)
if not self.is_boundry(x_temp,y_temp):
self.x_loc,self.y_loc = x_temp,y_temp
x_temp,y_temp=self.x_loc+action[0],self.y_loc+action[1]
if not self.is_boundry(x_temp,y_temp):
self.x_loc,self.y_loc = x_temp,y_temp
return self.r,[self.x_loc,self.y_loc]
class TDZero:
def __init__(self,simulation,num_episodes,epsilon=0.1,alpha=0.5,gamma=1):
self.simulation = simulation
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.num_episodes = num_episodes
self.action_val = self.simulation.ActionVal_init()
self.policy = np.argmax(self.action_val,axis=2)
self.num_action = self.simulation.num_actions
def action(self,state):
if self.epsilon>0:
probs = self.epsilon/self.num_action
rand = np.random.uniform()
if rand<=self.epsilon:
action = np.random.choice(range(self.num_action))
else:
action = self.policy[state[0],state[1]]
if action==self.policy[state[0],state[1]]:
return action,1-self.epsilon+probs
else:
return action,probs
else:
return self.policy[state[0],state[1]],1
def Learn(self):
t = 0
for episode in range(self.num_episodes):
self.simulation.reset_loc()
state = self.simulation.starting_state()
action = self.action(state)[0]
while True:
r,new_state = self.simulation.simulate(action)
new_action = self.action(new_state)[0]
Q = self.action_val[state[0],state[1],action]
Q_next = self.action_val[new_state[0],new_state[1],new_action]
self.action_val[state[0],state[1],action]+=self.alpha*(r+self.gamma*Q_next-Q)
self.policy[state[0],state[1]] = np.argmax(self.action_val[state[0],state[1]])
state = new_state
action = new_action
t+=1
if self.simulation.is_finished(*state):
break
print("Episode:",episode,"Time Steps Taken",t)
def play(self,rand_start=True,pos=0):
global SCREEN, CLOCK, GRID, HEIGHT, WIDTH, blockSize, BLACK, WHITE, GREEN, RED
BLACK = (0, 0, 0)
WHITE = (200, 200, 200)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
GRID = self.simulation.grid.copy()
blockSize = 20
WINDOW_HEIGHT, WINDOW_WIDTH = GRID.shape[0]*blockSize, GRID.shape[1]*blockSize
SCREEN = pygame.display.set_mode((WINDOW_WIDTH,WINDOW_HEIGHT))
CLOCK = pygame.time.Clock()
SCREEN.fill(BLACK)
HEIGHT,WIDTH = GRID.shape[0], GRID.shape[1]
self.simulation.reset_loc()
state = self.simulation.starting_state()
count=0
while True:
GRID = self.simulation.grid.copy()
GRID[state[0],state[1]] = 10
self.main()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
SCREEN.fill(BLACK)
action = self.action(state)[0]
print(state,action)
_,state = self.simulation.simulate(action)
count+=1
if self.simulation.is_finished(*state):
print("FINISHED")
print("Steps Taken",count)
pygame.quit()
sys.exit()
break
def main(self):
time.sleep(.5)
for x in range(WIDTH):
for y in range(HEIGHT):
color=WHITE
rect = pygame.Rect(x*(blockSize), y*(blockSize),
blockSize, blockSize)
if GRID[y][x]==1:
color=GREEN
SCREEN.fill(color,rect)
if GRID[y][x]==10:
color=RED
SCREEN.fill(color,rect)
if GRID[y][x]==-1:
color = WHITE
SCREEN.fill(color,rect)
pygame.draw.rect(SCREEN, color, rect, 1)
s = "0 0 0 1 1 1 2 2 1 0"
shift = [int(x) for x in s.strip().split()]
action = [[i,j] for i in range(-1,2) for j in range(-1,2) if not abs(i)==abs(j)==0]
grid = Grid_WorldSim(height=7,width=10,start_loc=[3,0],finish_loc=[3,7],shift = shift,actions = action,kings_move=True)
TD = TDZero(grid,1000,alpha=0.5)
TD.epsilon=0
import cv2
TD.Learn()
TD.play()
# =============================================================================
# EXPERIMENTS
# =============================================================================
class ChainSim:
def __init__(self,chain_len=5,reward=1):
self.chain_len = chain_len
self.pos = 1
self.reward = reward
def actual_val(self):
trans_mat = np.zeros(shape=(self.chain_len+2,self.chain_len+2))
for i in range(1,trans_mat.shape[0]-1):
for j in range(1,trans_mat.shape[1]-1):
if i==j:
trans_mat[i][j-1]=0.5
trans_mat[i][j+1]=0.5
ones = self.reward*np.ones(self.chain_len)
const= np.zeros(self.chain_len+2)
const[1:-1]=ones
val = np.linalg.solve(np.identity(self.chain_len+2)-trans_mat,const)
return val[1:-1]
def is_finished(self,state):
return state<=0 or state>=self.chain_len+1
def take_step(self,e):
return 1 if np.random.uniform()>e else -1
def set_pos(self,pos):
self.pos = pos
def StateVal_init(self):
return np.zeros(self.chain_len+2)
def num_state(self):
return self.chain_len
def simulate(self,e):
a = self.take_step(e)
if self.is_finished(self.pos):
return 0,self.pos,a
self.pos += a
return self.reward,self.pos,a
class TDnStep:
def __init__(self,sim,num_episodes,average_over,alpha,gamma,n_steps):
self.sim = sim
self.num_states = self.sim.num_state()
self.num_episodes = num_episodes
self.average_over = average_over
self.alpha = alpha
self.n_steps = n_steps
self.state_val = self.sim.StateVal_init()
self.gamma = gamma
self.actual_val = self.sim.actual_val()
def Learn(self):
c = 0
store=[]
b = 0.5
for episode in range(self.num_episodes):
state = c%self.num_states +1
self.sim.set_pos(state)
t = 0
chain=[]
#print('New_episode')
while True:
#print(state)
if not self.sim.is_finished(state):
r,s,a = self.sim.simulate(b)
chain.append([r,s,a])
else:
break
#print(chain)
tou = t-self.n_steps+1
if tou>=0:
G = 0
for r,s,a in chain[::-1]:
G=self.gamma*G+r
G+=(self.gamma**(self.n_steps))*self.state_val[chain[-1][-2]]
self.state_val[state] += self.alpha*(G-self.state_val[state])
state = chain.pop(0)[-2]
t+=1
c+=1
store.append(self.rms(self.state_val[1:-1],self.actual_val))
return sum(store)/len(store)
def rms(self,V,v):
rms = V-v
rms = rms**2
rms = np.sum(rms)
rms = rms**0.5
return rms
def TDerrorLearn(self):
c = 0
store = []
b=0.5
for episode in range(self.num_episodes):
state = c%self.num_states +1
self.sim.set_pos(state)
t = 0
chain=[]
#print('New_episode')
while True:
#print(state)
if not self.sim.is_finished(state):
r,s,a = self.sim.simulate(b)
chain.append([r,s,a])
else:
break
#print(chain)
tou = t-self.n_steps+1
if tou>=0:
G = 0
for i in range(0,self.n_steps-1):
_,s_,_ = chain[-2-i]
r,s,_ = chain[-1-i]
TDerror= r+self.gamma*self.state_val[s]-self.state_val[s_]
G=self.gamma*G+TDerror
r,s,a = chain[0]
TDerror = r+self.gamma*self.state_val[s]-self.state_val[state]
G = self.state_val[state]+self.gamma*G+TDerror
#G+=(self.gamma**(self.n_steps))*self.state_val[chain[-1][-1]]
self.state_val[state] += self.alpha*(G-self.state_val[state])
state = chain.pop(0)[-2]
t+=1
c+=1
store.append(self.rms(self.state_val[1:-1],self.actual_val))
return sum(store)/len(store)
def off_policyLearn(self):
c = 0
store=[]
b=0.4
for episode in range(self.num_episodes):
state = c%self.num_states +1
self.sim.set_pos(state)
t = 0
chain=[]
sigma = 1
#print('New_episode')
while True:
#print(state)
if not self.sim.is_finished(state):
r,s,a = self.sim.simulate(b)
chain.append([r,s,a])
else:
break
#print(chain)
tou = t-self.n_steps+1
if tou>=0:
for i in len(self.no_chain):
if a==1:
sigma = sigma*(0.5/(1-b))
else:
sigma = sigma*(0.5/b)
G = 0
for r,s in chain[::-1]:
G=self.gamma*G+r
G+=(self.gamma**(self.n_steps))*self.state_val[chain[-1][-1]]
self.state_val[state] += self.alpha*sigma*(G-self.state_val[state])
state = chain.pop(0)[-2]
t+=1
c+=1
store.append(self.rms(self.state_val[1:-1],self.actual_val))
def off_policyLearn_contVariate(self):
c = 0
store=[]
b=0.4
for episode in range(self.num_episodes):
state = c%self.num_states +1
self.sim.set_pos(state)
t = 0
chain=[]
sigma = 1
#print('New_episode')
while True:
#print(state)
if not self.sim.is_finished(state):
r,s,a = self.sim.simulate(b)
chain.append([r,s,a])
else:
break
#print(chain)
tou = t-self.n_steps+1
if tou>=0:
for i in len(self.no_chain):
if a==1:
sigma = sigma*(0.5/(1-b))
else:
sigma = sigma*(0.5/b)
G = self.state_val[chain[-1][-2]]
for i in range(0,self.no_chain-1):
r,s,a = chain[-i-1]
_,s_,_ = chain[-i-2]
if a==1:
sigma = (0.5/(1-b))
else:
sigma = (0.5/b)
G=sigma*(self.gamma*G+r) + (1-sigma)*self.state_val[s_]
r,s,a = chain[0]
if a==1:
sigma = (0.5/(1-b))
else:
sigma = (0.5/b)
G+=sigma*(r+self.gamma*G)+(1-sigma)*self.state_val[state]
self.state_val[state] += self.alpha*(G-self.state_val[state])
state = chain.pop(0)[-2]
t+=1
c+=1
store.append(self.rms(self.state_val[1:-1],self.actual_val))
import pickle
d = {}
alpha=0
inc = 0.05
while alpha<1:
alpha+=inc
n = 1
while n<256:
n = n*2
d[alpha,n] = []
store1=store2=0
for j in range(0,100):
sim = ChainSim(chain_len=10,reward=0.01)
td = TDnStep(sim,num_episodes=100,average_over=1,alpha=alpha,n_steps=n,gamma=1)
store1 += td.Learn()
td = TDnStep(sim,num_episodes=100,average_over=1,alpha=alpha,n_steps=n,gamma=1)
store2 += td.TDerrorLearn()
d[alpha,n].append(store1/100)
d[alpha,n].append(store2/100)
with open("data.plk",'wb') as a:
pickle.dump(d,a)
import seaborn as sns
from collections import Counter
alpha = [x[0] for x in d]
alpha = [x for x in Counter(sorted(alpha))]
n=1
ratings = np.unique(list(range(20)))
palette = iter(sns.husl_palette(len(ratings)))
while n<256:
n*=2
rms = []
for i in range(0,len(alpha)):
rms.append(d[alpha[i],n][0])
sns.pointplot(alpha,rms,color=next(palette))
rms = []
for i in range(0,len(alpha)):
rms.append(d[alpha[i],n][1])
sns.pointplot(alpha,rms)
class Grid_WorldSim2:
def __init__(self,height,width,start_loc,finish_loc,actions,phase_change=3000,reward=1):
self.height = height
self.width = width
self.start_loc = start_loc
self.finish_loc = finish_loc
self.grid = self.make_grid()
self.r = reward
self.actions = actions
self.num_actions = len(self.actions)
self.reset_loc()
self.phase_change=phase_change
self.t = 0
def reset_loc(self):
self.x_loc,self.y_loc = self.start_loc[0]+1,self.start_loc[1]+1
def ActionVal_init(self):
action_val = 0*np.random.uniform(low = 0,high = 1,size = [self.height+2,self.width+2,self.num_actions])
action_val[self.finish_loc[0]+1,self.finish_loc[1]+1] = 0
return action_val
def make_grid(self):
grid = np.zeros([self.height,self.width])
grid[self.finish_loc[0],self.finish_loc[1]]=-1
grid[-2,0:-1]=1
sudo_grid1 = np.ones([self.height+2,self.width+2])
sudo_grid1[1:self.height+1,1:self.width+1] = grid
grid = np.zeros([self.height,self.width])
grid[self.finish_loc[0],self.finish_loc[1]]=-1
grid[-2,1:-1]=1
sudo_grid2 = np.ones([self.height+2,self.width+2])
sudo_grid2[1:self.height+1,1:self.width+1] = grid
return sudo_grid1,sudo_grid2
def is_finished(self,i,j):
phase = 0 if self.t<self.phase_change else 1
return self.grid[phase][i,j]==-1
def is_boundry(self,i,j):
phase = 0 if self.t<self.phase_change else 1
return self.grid[phase][i,j]==1
def starting_state(self):
return self.start_loc[0]+1,self.start_loc[1]+1
def simulate(self,action):
self.t +=1
action = self.actions[action]
x_temp,y_temp=self.x_loc+action[0],self.y_loc+action[1]
if not self.is_boundry(x_temp,y_temp):
self.x_loc,self.y_loc = x_temp,y_temp
return 0 if not self.is_finished(self.x_loc,self.y_loc) else self.r,[self.x_loc,self.y_loc]
class DyanQ:
def __init__(self,sim,alpha,num_loops,epsilon,model_learn_steps,gamma=1,k=0):
self.sim = sim
self.alpha =alpha
self.num_loops = num_loops
self.gamma = gamma
self.epsilon = epsilon
self.alpha = alpha
self.action_val = self.sim.ActionVal_init()
self.policy = np.argmax(self.action_val,axis=2)
self.num_action = self.sim.num_actions
self.model = {}
self.model_learn_steps = model_learn_steps
self.T = {}
self.k = k
def action(self,state):
if self.epsilon>0:
probs = self.epsilon/self.num_action
rand = np.random.uniform()
if rand<=self.epsilon:
action = np.random.choice(range(self.num_action))
else:
action = self.policy[state[0],state[1]]
if action==self.policy[state[0],state[1]]:
return action,1-self.epsilon+probs
else:
return action,probs
else:
return self.policy[state[0],state[1]],1
def Learn(self):
t = 0
self.sim.reset_loc()
state = self.sim.starting_state()
cumm_r = [0]
for episode in range(self.num_loops):
t+=1
action = self.action(state)[0]
self.set_time(state,action,t)
r,new_state = self.sim.simulate(action)
Q = self.action_val[state[0],state[1],action]
Q_next = np.max(self.action_val[new_state[0],new_state[1]])
self.action_val[state[0],state[1],action]+=self.alpha*(r+self.gamma*Q_next-Q)
self.model_input(state,action,r,new_state)
self.Model_Learn(t)
self.policy[state[0],state[1]] = np.argmax(self.action_val[state[0],state[1]])
state = new_state
if self.sim.is_finished(*state):
self.sim.reset_loc()
cumm_r.append(cumm_r[-1]+r)
# print("Episode:",episode,"Time Steps Taken",t)
return cumm_r
def set_time(self,state,action,t):
state = tuple(state)
key = (state,action)
self.T[key]=t
def get_time(self,state,action):
state = tuple(state)
key = (state,action)
return 0 if key not in self.T else self.T[key]
def model_input(self,state,action,reward,state_):
state = tuple(state)
key = (state,action)
self.model[key]=[reward,state_]
def model_output(self,state,action):
state = tuple(state)
key = (state,action)
return self.model[key]
def sample_state_action(self):
state_actions = self.model.keys()
if len(state_actions)==1:
return list(state_actions)[0]
state,action = list(state_actions)[np.random.choice(range(len(list(state_actions))))]
return state,action
def Model_Learn(self,t):
for i in range(self.model_learn_steps):
state,action = self.sample_state_action()
r,new_state = self.model_output(state,action)
Q = self.action_val[state[0],state[1],action]
Q_next = np.max(self.action_val[new_state[0],new_state[1]])
T = t-self.get_time(state,action)
extra_r = self.k*(T**0.5)
self.action_val[state[0],state[1],action]+=self.alpha*(r+extra_r+self.gamma*Q_next-Q)
def play(self,rand_start=True,pos=0):
global SCREEN, CLOCK, GRID, HEIGHT, WIDTH, blockSize, BLACK, WHITE, GREEN, RED
BLACK = (0, 0, 0)
WHITE = (200, 200, 200)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
GRID = self.sim.grid[1].copy()
blockSize = 20
WINDOW_HEIGHT, WINDOW_WIDTH = GRID.shape[0]*blockSize, GRID.shape[1]*blockSize
SCREEN = pygame.display.set_mode((WINDOW_WIDTH,WINDOW_HEIGHT))
CLOCK = pygame.time.Clock()
SCREEN.fill(BLACK)
HEIGHT,WIDTH = GRID.shape[0], GRID.shape[1]
self.sim.reset_loc()
state = self.sim.starting_state()
count=0
while True:
GRID = self.sim.grid.copy()
GRID[state[0],state[1]] = 10
self.main()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
SCREEN.fill(BLACK)
action = self.action(state)[0]
print(state,action)
_,state = self.sim.sim(action)
count+=1
if self.sim.is_finished(*state):
print("FINISHED")
print("Steps Taken",count)
pygame.quit()
sys.exit()
break
def main(self):
time.sleep(.5)
for x in range(WIDTH):
for y in range(HEIGHT):
color=WHITE
rect = pygame.Rect(x*(blockSize), y*(blockSize),
blockSize, blockSize)
if GRID[y][x]==1:
color=GREEN
SCREEN.fill(color,rect)
if GRID[y][x]==10:
color=RED
SCREEN.fill(color,rect)
if GRID[y][x]==-1:
color = WHITE
SCREEN.fill(color,rect)
pygame.draw.rect(SCREEN, color, rect, 1)
action = [[i,j] for i in range(-1,2) for j in range(-1,2) if not abs(i)==abs(j)]
grid = Grid_WorldSim2(height=6,width=9,start_loc=[-1,3],finish_loc=[0,-1],actions = action)
TD = DyanQ(grid,0.1,num_loops=10000,model_learn_steps=100,epsilon=0.1,k=0.2)
#TD.epsilon=0
import cv2
cumm = TD.Learn()
#TD.play()
import seaborn as sns
sns.pointplot(x=list(range(len(cumm))),y=cumm)
import pandas
class Model:
def __init__(self,num_states,num_actions,b,terminal_prob=0.1):
self.num_states = num_states
self.b = b
self.start_state = 0
self.num_actions = num_actions
self.rewards = self.set_reward()
self.transitions = self.set_transitions()
self.terminal_prob = terminal_prob
self.terminal_r = np.random.normal(0,1)
def ActionVal_init(self):
return np.zeros([self.num_states,self.num_actions])
def reset_state(self):
self.state = self.start_state
def stating_state(self):
return self.start_state
def set_transitions(self):
T = np.random.randint(low=0,high=self.num_states,size=[self.num_states,self.num_actions,self.b])
return T
def set_reward(self):
reward = np.random.normal(0,1,size=[self.num_states,self.num_actions,self.b])
return reward
def sample_index(self):
probT = np.random.uniform()
if probT<self.terminal_prob:
return -1
index = np.random.randint(low=0,high=self.b)
return index
def on_policy_sim(self,action):
index = self.sample_index()
if index>=0:
s,r = self.transitions[self.state,action,index],self.rewards[self.state,action,index]
self.state = s
return r,s
self.state = -1
return self.terminal_r,-1
def uniform_sim(self):
state = np.random.randint(low=0,high=self.num_states)
action = np.random.randint(low=0,high=self.num_actions)
return state,action
class Agent:
def __init__(self,model,num_updates,epsilon=0.1):
self.model = model
self.num_updates = num_updates
self.epsilon = epsilon
def ValPolicy_init(self):
self.q = self.model.ActionVal_init()
self.policy = np.argmax(self.q,axis=-1)
def action(self,state):
if self.epsilon>0:
probs = self.epsilon/self.model.num_actions
rand = np.random.uniform()
if rand<=self.epsilon:
action = np.random.choice(range(self.model.num_actions))
else:
action = self.policy[state]
if action==self.policy[state]:
return action,1-self.epsilon+probs
else:
return action,probs
else:
return self.policy[state],1
def calc_v_naught(self):
max_action = self.policy[self.model.start_state]
exp = (1-self.epsilon+self.epsilon/self.model.num_actions)*self.q[self.model.start_state,max_action]
for i in range(self.model.num_actions):
if i!= max_action:
exp+=self.epsilon*self.q[self.model.start_state,i]/self.model.num_actions
return exp
def exp_update(self,state,action):
exp = 0
T = self.model.transitions
for i in range(self.model.b):
s = T[state,action,i]
r = self.model.rewards[state,action,i]
exp+=(1-self.model.terminal_prob)*(r+np.max(self.q[s]))
exp /= self.model.b
exp += self.model.terminal_prob*(self.model.terminal_r)
return exp
def on_policy_updates(self):
self.ValPolicy_init()
self.model.reset_state()
v_naught =[]
for i in range(self.num_updates):
state = self.model.state
action,_ = self.action(state)
r,s = self.model.on_policy_sim(action)
self.q[state,action] = self.exp_update(state,action)
if s==-1:
self.model.reset_state()
v_naught.append(self.calc_v_naught())
return v_naught
def uniform_updates(self):
self.ValPolicy_init()
self.model.reset_state()
v_naught =[]
for i in range(self.num_updates):
state,action = self.model.uniform_sim()
self.q[state,action] = self.exp_update(state,action)
v_naught.append(self.calc_v_naught())
return v_naught
df = pandas.DataFrame()
l = []
lk = []
for i in range(200):
model = Model(num_states=1000,num_actions=2,b=1)
agent = Agent(model,num_updates=20000,epsilon=0.1)
l.append(agent.on_policy_updates())
lk.append(agent.uniform_updates())
df["on_policy,b=1"]=np.sum(np.array(l),axis=0)
df["uniform,b=1"] =np.sum(np.array(lk),axis=0)
l = []
lk = []
for i in range(200):
model = Model(num_states=1000,num_actions=2,b=3)
agent = Agent(model,num_updates=20000,epsilon=0.1)
l.append(agent.on_policy_updates())
lk.append(agent.uniform_updates())
df["on_policy,b=3"]=np.sum(np.array(l),axis=0)
df["uniform,b=3"] =np.sum(np.array(lk),axis=0)
l = []
lk = []
for i in range(200):
model = Model(num_states=1000,num_actions=2,b=10)
agent = Agent(model,num_updates=20000,epsilon=0.1)
l.append(agent.on_policy_updates())
lk.append(agent.uniform_updates())
df["on_policy,b=10"]=np.sum(np.array(l),axis=0)
df["uniform,b=10"] =np.sum(np.array(lk),axis=0)
df.plot()
df.to_csv("1000states:ex8.8.csv")
df = pandas.DataFrame()
l = []
lk = []
for i in range(200):
model = Model(num_states=10000,num_actions=2,b=1)
agent = Agent(model,num_updates=200000,epsilon=0.1)
l.append(agent.on_policy_updates())
lk.append(agent.uniform_updates())
df["on_policy,b=3"]=np.sum(np.array(l),axis=0)
df["uniform,b=3"] =np.sum(np.array(lk),axis=0)
l = []
lk = []
for i in range(200):
model = Model(num_states=10000,num_actions=2,b=3)
agent = Agent(model,num_updates=200000,epsilon=0.1)
l.append(agent.on_policy_updates())
lk.append(agent.uniform_updates())
df["on_policy,b=10"]=np.sum(np.array(l),axis=0)
df["uniform,b=10"] =np.sum(np.array(lk),axis=0)
df.plot()
df.to_csv("1000states:ex8.8.csv")
# =============================================================================
# Exercise 11.3
# =============================================================================
class Biard:
def __init__(self,num_states):
self.num_states = num_states
self.action = {'solid':0,"dashed":1}
def starting_state(self):
self.state = np.random.randint(low=0,high=self.num_states)
return self.state
def simulate(self,action):
action = self.action[action]
if action==0:
self.state = self.num_states-1
else:
self.state = np.random.randint(low=0,high=self.num_states-1)
return 0,self.state
class SemiGrad_QLearning:
def __init__(self,sim,num_steps,alpha,gamma):
self.sim = sim
self.num_steps = num_steps
self.alpha = alpha
self.gamma = gamma
self.features = self.create_features()
self.params = np.ones(self.sim.num_states+1+1)
#self.params[6]=10
#probs for solid action
self.b = 1/self.sim.num_states
self.pie = 1
def create_features(self):
solid = [1]
dashed = [0]
from collections import defaultdict
features_dict = defaultdict(dict)
for i in range(self.sim.num_states):
zero_model = [0]*(self.sim.num_states+1)
if i ==self.sim.num_states-1:
zero_model[i] = 1
zero_model[self.sim.num_states] = 2
else:
zero_model[i] = 2
zero_model[self.sim.num_states] = 1
features_dict[i]['solid'] = np.array(zero_model+solid)
features_dict[i]['dashed'] =np.array(zero_model+dashed)
return features_dict
def action(self):
rand = np.random.uniform()
if rand<=self.b:
return "solid",self.b,self.pie
return "dashed",1-self.b,1-self.pie
def action_val(self,state,action):
params = self.params
xs = self.features[state][action]
return np.sum(params*xs)
def max_action_val(self,state):
params = self.params
xs1,xs2 = self.features[state].values()
return max(np.sum(params*xs1),np.sum(params*xs2))
def updates(self):
state = self.sim.starting_state()
p= [self.params]
for iteration in range(self.num_steps):
action,b_probs,pie_probs = self.action()
r,next_state = self.sim.simulate(action)
self.params = self.params+self.alpha*(r+self.gamma*self.max_action_val(next_state)-self.action_val(state,action))*self.features[state][action]
state=next_state
p.append(self.params)
return p
k=5
sim = Biard(k)
agent = SemiGrad_QLearning(sim,10,0.01,0.99)
p = []
for i in range(1000):
p += agent.updates()
import pandas
df = | pandas.DataFrame() | pandas.DataFrame |
import json, os, sys
from pprint import pprint as print
from datetime import datetime
from datetime import date, timedelta
from collections import Counter
from collections import OrderedDict
import openpyxl
from openpyxl.worksheet.dimensions import ColumnDimension, DimensionHolder
from openpyxl.utils import get_column_letter
from openpyxl.styles import Color, PatternFill, Font, Border
from openpyxl.styles import Font
import pandas as pd
import lh3.api as lh3
try:
from home.models import UnansweredChat
from home.models import ReportMonthly
except:
pass
client = lh3.Client()
chats = client.chats()
FRENCH_QUEUES = ['algoma-fr', 'clavardez', 'laurentian-fr', 'ottawa-fr',
'saintpaul-fr', 'western-fr', 'york-glendon-fr']
SMS_QUEUES = ['carleton-txt', 'clavardez-txt', 'guelph-humber-txt',
'mcmaster-txt', 'ottawa-fr-txt', 'ottawa-txt',
'scholars-portal-txt', 'western-txt', 'york-txt']
PRACTICE_QUEUES = ['practice-webinars', 'practice-webinars-fr', 'practice-webinars-txt']
LIST_OF_HOURS = dict()
UNANSWERED_CHATS = list()
UNANSWERED_CHATS_HTML = ['<h1 align="center">UNANSWERED CHATS</h1><hr/><br/>']
def french_queues(chats):
french = list()
for chat in chats:
if chat.get('queue') in FRENCH_QUEUES:
french.append(chat)
return french
def sms_queues(chats):
sms = list()
for chat in chats:
if chat.get('queue') in SMS_QUEUES:
sms.append(chat)
return sms
def remove_practice_queues(chats_this_day):
res = [chat for chat in chats_this_day if not "practice" in chat.get("queue")]
return res
def select_specific_queues(chats_this_day, specific_queues):
res = [chat for chat in chats_this_day if specific_queues in chat.get("queue")]
return res
def get_chat_for_this_day(this_day):
day = this_day.day
year = this_day.year
month = this_day.month
all_chats = chats.list_day(year,month,day)
return all_chats
def get_daily_stats(chats_this_day, chat_not_none, today):
unanswered_chats = [chat for chat in chats_this_day if chat.get("accepted") is None]
answered_chats_nbr = len(chats_this_day)- len(unanswered_chats)
french_chats = french_queues(chat_not_none)
sms_chats = sms_queues(chat_not_none)
data = []
data.append({
'Date': today.strftime("%A, %b %d, %Y"),
'Day': today.strftime("%A"),
'Month': today.strftime("%B"),
'Year': today.year,
'Total chats': len(chats_this_day),
'Total Answered Chats': answered_chats_nbr,
'Total UnAnswered Chats': len(unanswered_chats),
'Total French Answered': len(french_chats),
'Total SMS Answered': len(sms_chats)
})
return data
def get_chat_per_hour(chat_not_none):
chat_per_hour_not_none = list()
for chat in chat_not_none:
d = datetime.strptime(chat.get('started'), "%Y-%m-%d %H:%M:%S")
chat["hour"] = d.hour
chat_per_hour_not_none.append(d.hour)
nb_chat_per_hours = dict(Counter(chat_per_hour_not_none))
sort_dic_hourly = {}
for i in sorted(nb_chat_per_hours):
sort_dic_hourly.update({i:nb_chat_per_hours[i]})
return sort_dic_hourly
def list_of_un_answered_chats(all_chats, this_day, queues):
chats_this_day = remove_practice_queues(all_chats)
chat_is_none = [chat for chat in chats_this_day if chat.get("accepted") == None]
for chat in chat_is_none:
# breakpoint()
try:
queue = [q for q in queues if q['name'] == chat.get('queue')]
url = "https://ca.libraryh3lp.com/dashboard/queues/" +str(queue[0].get('id')) +"/calls/"+ str(chat.get('guest')) + "/"+ str(chat.get('id'))
chat.update({'transcript_url':url})
UNANSWERED_CHATS.append(chat)
UNANSWERED_CHATS_HTML.append("<p>"+"<a target='_blank' href='"+ url +"'>"+chat.get('started') + "--> " + chat.get('profile') + " --> " + chat.get('protocol') + "</a>"+ "'</p>")
transcript = client.one('chats', chat.get('id')).get()['transcript'] or '<h3>No transcript found</h3>'
UNANSWERED_CHATS_HTML.append(transcript+"<hr/>")
except:
pass
return chat_is_none
def main(all_chats, this_day):
chats_this_day = remove_practice_queues(all_chats)
chat_not_none = [chat for chat in chats_this_day if chat.get("accepted") != None]
data = get_daily_stats(chats_this_day, chat_not_none, this_day)
data = data[-1]
sort_dic_hourly = get_chat_per_hour(chat_not_none)
print(data)
report = data.update(sort_dic_hourly)
LIST_OF_HOURS.update(sort_dic_hourly)
return data
#update_excel_file(data, sort_dic_hourly)
def unanswered_chats():
#print(UNANSWERED_CHATS)
df = | pd.DataFrame(UNANSWERED_CHATS) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 12:49:10 2017
@author: fubao
"""
#main function for creation graph data
import os
import numpy as np
import pandas as pd
from blist import blist
from readCityState import readcitySatesExecute
from extractweatherData import readUSAStationIdToNameMap
class nodeType:
placeType = 1
timeType = 2 #time type, year, month or day time
tempType = 3 #temperature range
prcpType = 4 #precipation
snowType = 5 #snow depth
class graphCreationClass:
startNodeId = 1 #graph node Id starting from 1
graphNodeNameToIdMap = {} #store node name+type -> ID map
gNodeIdToNameMap = {} #store node id -> name map
graNodeTypeMap = {} #node id to type
edgeList = blist() #graph edge list "nodeId, nodeId, edge"
def __init__(self):
pass
def createNodeIdPlaces(self):
stateCityMap, stateToCountyMap, countyToCityMap = readcitySatesExecute()
#get state and county edge list
for state, counties in stateToCountyMap.items():
nodeInfoState = state + "+" + str(nodeType.placeType)
#store state and id mapping
if nodeInfoState not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoState] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoState
#node type map
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.placeType
graphCreationClass.startNodeId += 1
#store county and id mapping
for county in set(counties):
nodeInfoCounty = county + "+" + str(nodeType.placeType)
if nodeInfoCounty not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoCounty
#node type map
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.placeType
graphCreationClass.startNodeId += 1
#get edge list for each pair
edgeProp = 'lower' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoState], graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty], edgeProp])
edgeProp = 'higher'
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty], graphCreationClass.graphNodeNameToIdMap[nodeInfoState], edgeProp])
#get county and city edge list
for county, cities in countyToCityMap.items():
#store state and id mapping
nodeInfoCounty = county + "+" + str(nodeType.placeType)
if nodeInfoCounty not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoCounty
#node type map
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.placeType
graphCreationClass.startNodeId += 1
#store city and id mapping
for city in set(cities):
nodeInfoCity = city + "+" + str(nodeType.placeType)
if nodeInfoCity not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoCity] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoCity
#node type map
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.placeType
graphCreationClass.startNodeId += 1
#get edge list for each pair
edgeProp = 'lower' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty], graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], edgeProp])
edgeProp = 'higher'
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], graphCreationClass.graphNodeNameToIdMap[nodeInfoCounty], edgeProp])
#read the output of extrated daily weather (getDailyWeather) into edge list
#'stationID','year','month','day','tmax','tmin','snwd','acmm', 'acss','prcp','snow'])
def readstationWeatherOutput(self, inUSAStationFile, inFileStationWeather):
stationIDCodesUSAToNameMap = readUSAStationIdToNameMap(inUSAStationFile)
df = pd.read_csv(inFileStationWeather, delimiter = "\t")
#create edge list between city/town and weather
df['stationTemp'] = list(zip(df["stationID"], df["tmax"], df["tmin"])) #station temperature
#print ("stationTemp: ", df['stationTemp'])
#get temperature
for tple in df['stationTemp'].unique():
nodeInfoCity = stationIDCodesUSAToNameMap[tple[0]].split(',')[1].lower().strip() + "+" + str(nodeType.placeType) #state,city to split
#print ("stationCity: ", stationCity, type(tple), type(tple[1]))
if not np.isnan(tple[0]) and not np.isnan(tple[1]):
nodeInfoTmperature = "[" + str(tple[2]) + "," + str(tple[1]) + "]" + "+" + str(nodeType.tempType)
if nodeInfoTmperature not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTmperature] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoTmperature
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.tempType
graphCreationClass.startNodeId += 1
#edge for town/city to temperature
#cityNodeId = graphCreationClass.graphNodeNameToIdMap[stationCity]
if nodeInfoCity in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], graphCreationClass.graphNodeNameToIdMap[nodeInfoTmperature], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoTmperature], graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], edgeProp])
#get precipitation
df['stationPrcp'] = list(zip(df["stationID"], df["prcp"])) #station temperature
for tple in df['stationPrcp']:
nodeInfoCity = stationIDCodesUSAToNameMap[tple[0]].split(',')[1].lower().strip() + "+" + str(nodeType.placeType) #state,city to split
if not np.isnan(tple[0]) and not np.isnan(tple[1]):
nodeInfoprcp = str(tple[1]) + "+" + str(nodeType.prcpType)
if nodeInfoprcp not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoprcp] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoprcp
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.prcpType
#print("ddddddddddddddddddd: ", nodeType.prcpType)
graphCreationClass.startNodeId += 1
#edge for town/city to prcp
#cityNodeId = graphCreationClass.graphNodeNameToIdMap[stationCity]
if nodeInfoCity in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], graphCreationClass.graphNodeNameToIdMap[nodeInfoprcp], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoprcp], graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], edgeProp])
#get snow type
df['stationsnwd'] = list(zip(df["stationID"], df["snwd"])) #station temperature
print ("stationsnwd describe", df['stationsnwd'].describe())
for tple in df['stationsnwd'].unique():
nodeInfoCity = stationIDCodesUSAToNameMap[tple[0]].split(',')[1].lower().strip() + "+" + str(nodeType.placeType) #state,city to split
if not np.isnan(tple[0]) and not np.isnan(tple[1]): # tple[1] is not None:
nodeinfoSnwd = str(tple[1]) + "+" + str(nodeType.snowType)
#print("previous eeeeeeeeeeeeeeeeee: ", nodeType.snowType, nodeinfoSnwd)
if nodeinfoSnwd not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeinfoSnwd] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeinfoSnwd
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.snowType
#print("eeeeeeeeeeeeeeeee: ", nodeType.snowType)
graphCreationClass.startNodeId += 1
#edge for town/city to snwd
#cityNodeId = graphCreationClass.graphNodeNameToIdMap[stationCity]
if nodeInfoCity in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], graphCreationClass.graphNodeNameToIdMap[nodeinfoSnwd], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeinfoSnwd], graphCreationClass.graphNodeNameToIdMap[nodeInfoCity], edgeProp])
#get time month/day/year
df['tempTime'] = list(zip(df["tmax"], df["tmin"],df["month"], df["day"], df["year"])) #station temperature
for tple in df['tempTime'].unique():
if not np.isnan(tple[1]) and not np.isnan(tple[3]): #temp tmin and time day is not None
nodeInfoTmperature = "[" + str(tple[1]) + "," + str(tple[0]) + "]" + "+" + str(nodeType.tempType)
nodeInfoTime = str(tple[2]) + "/" + str(tple[3]) + "/" + str(tple[4]) + "+" + str(nodeType.timeType)
if nodeInfoTime not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoTime
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.timeType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
if nodeInfoTmperature not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoTmperature
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.tempType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
#edge for temp to time
if nodeInfoTmperature in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoTmperature], graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], graphCreationClass.graphNodeNameToIdMap[nodeInfoTmperature], edgeProp])
#get time month/day/year and prcp relations
df['prcpTime'] = list(zip(df["prcp"], df["month"], df["day"], df["year"])) #station temperature
for tple in df['prcpTime'].unique():
if not np.isnan(tple[0]) and not np.isnan(tple[3]): #temp tmin and time day is not None
nodeInfoPrcp = str(tple[0]) + "+" + str(nodeType.prcpType)
nodeInfoTime = str(tple[1]) + "/" + str(tple[2]) + "/" + str(tple[3]) + "+" + str(nodeType.timeType)
if nodeInfoTime not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoTime
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.timeType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
if nodeInfoPrcp not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoPrcp
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.prcpType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
#edge for temp to time
if nodeInfoPrcp in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoPrcp], graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], graphCreationClass.graphNodeNameToIdMap[nodeInfoPrcp], edgeProp])
#get time month/day/year and snwd relations
df['snwdTime'] = list(zip(df["snwd"], df["month"], df["day"], df["year"])) #station temperature
for tple in df['snwdTime'].unique():
if not np.isnan(tple[0]) and not np.isnan(tple[3]): #tple[0] is not None and tple[3] is not None: #temp tmin and time day is not None
nodeInfoSnwd = str(tple[0]) + "+" + str(nodeType.snowType)
nodeInfoTime = str(tple[1]) + "/" + str(tple[2]) + "/" + str(tple[3]) + "+" + str(nodeType.timeType)
if nodeInfoTime not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoTime
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.timeType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
if nodeInfoSnwd not in graphCreationClass.graphNodeNameToIdMap:
graphCreationClass.graphNodeNameToIdMap[nodeInfoTime] = graphCreationClass.startNodeId
graphCreationClass.gNodeIdToNameMap[graphCreationClass.startNodeId] = nodeInfoSnwd
if graphCreationClass.startNodeId not in graphCreationClass.graNodeTypeMap:
graphCreationClass.graNodeTypeMap[graphCreationClass.startNodeId] = nodeType.snowType
#print("fffffffffffffffffff: ", nodeType.timeType)
graphCreationClass.startNodeId += 1
#edge for temp to time
if nodeInfoSnwd in graphCreationClass.graphNodeNameToIdMap:
edgeProp = 'same' #lower hierarchical relation
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoSnwd], graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], edgeProp])
graphCreationClass.edgeList.append([graphCreationClass.graphNodeNameToIdMap[nodeInfoTime], graphCreationClass.graphNodeNameToIdMap[nodeInfoSnwd], edgeProp])
#write graphNodeNameToIdMap, graNodeTypeMap, and edgeList
def writeIntoFile(self, outNodeTypeFile, outNodeNameToIdFile, outEdgeListFile):
#write node type file
os.remove(outNodeTypeFile) if os.path.exists(outNodeTypeFile) else None
df = pd.DataFrame.from_dict(graphCreationClass.graNodeTypeMap, orient='index')
df.to_csv(outNodeTypeFile, header = ["node Type"], sep='\t', index=True)
#write into outNodeNameToIdFile
os.remove(outNodeNameToIdFile) if os.path.exists(outNodeNameToIdFile) else None
df = | pd.DataFrame.from_dict(graphCreationClass.graphNodeNameToIdMap, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import logging
import os
from collections import defaultdict
from annotation.utility import Utility
_logger = logging.getLogger(__name__)
TYPE_MAP_DICT = {"string": "String", "number": "Quantity", "year": "Time", "month": "Time", "day": "Time",
"date": "Time", "entity": 'WikibaseItem'}
# kyao
# Only add one location qualifier until datamart-api can handle multiple locations. 31 July 2020.
ADDITIONAL_QUALIFIER_MAP = {
# ("lat", "lon", "latitude", "longitude"): {"Attribute": "location", "Property": "P276"},
# ("country",): {"Attribute": "country", "Property": "P17"},
# ("admin1",): {"Attribute": "located in the first-level administrative country subdivision",
# "Property": "P2006190001"},
# ("admin2",): {"Attribute": "located in the second-level administrative country subdivision",
# "Property": "P2006190002"},
# ("admin3",): {"Attribute": "located in the third-level administrative country subdivision",
# "Property": "P2006190003"},
("country", "admin1", "admin2", "admin3"): {"Attribute": "located in the administrative territorial entity",
"Property": "P131"},
}
def generate_template_from_df(input_df: pd.DataFrame, dataset_qnode: str, dataset_id: str) -> dict:
"""
Function used for datamart annotation batch mode, return a dict of dataFrame instead of output a xlsx file.
"""
# Assumes cell [0,0] is the start of the annotation
if not input_df.iloc[0,0] == 'dataset':
raise Exception('The first column of the dataframe must bet the annotations (not the index of the dataframe)')
utility = Utility()
# updated 2020.7.22: it is possible that header is not at row 7, so we need to search header row if exist
header_row, data_row = utility.find_data_start_row(input_df)
input_df = input_df.set_index(0)
if 'tag' in input_df.iloc[:7, 0]:
annotation_rows = list(range(1, 7)) + [header_row]
else:
annotation_rows = list(range(1, 6)) + [header_row]
content_rows = list(range(data_row, len(input_df)))
annotation_part = input_df.iloc[annotation_rows].fillna("")
content_part = input_df.iloc[content_rows]
# start generate dataframe for templates
dataset_df = _generate_dataset_tab(input_df, dataset_qnode, dataset_id)
attribute_df = _generate_attributes_tab(dataset_qnode, annotation_part)
unit_df = _generate_unit_tab(dataset_qnode, content_part, annotation_part)
extra_df, wikifier_df1 = _process_main_subject(dataset_qnode, content_part, annotation_part, data_row)
wikifier_df2 = _generate_wikifier_part(content_part, annotation_part, data_row)
wikifier_df = pd.concat([wikifier_df1, wikifier_df2])
output_df_dict = {
'dataset_file': dataset_df,
'attributes_file': attribute_df,
'units_file': unit_df,
"extra_edges": extra_df,
"Wikifier_t2wml": wikifier_df,
"wikifier": None,
"qualifiers": None,
}
return output_df_dict
def generate_template(input_path: str, output_path: str, dataset_qnode: str = None) -> None:
"""
generate the template xlsx file from the input xlsx file
:param dataset_qnode:
:param input_path:
:param output_path:
:return:
"""
input_df = pd.read_excel(input_path, index_col=0, header=None)
output_df_dict = generate_template_from_df(input_df, dataset_qnode=dataset_qnode)
output_folder = output_path[:output_path.rfind("/")]
os.makedirs(output_folder, exist_ok=True)
save_template_file(output_df_dict, output_path)
def save_template_file(output_df_dict: dict, output_path: str) -> None:
with pd.ExcelWriter(output_path) as writer:
output_df_dict["dataset_file"].to_excel(writer, sheet_name='Dataset', index=False)
output_df_dict["attributes_file"].to_excel(writer, sheet_name='Attributes', index=False)
output_df_dict["units_file"].to_excel(writer, sheet_name='Units', index=False)
output_df_dict["extra_edges"].to_excel(writer, sheet_name="Extra Edges", index=False)
output_df_dict["Wikifier_t2wml"].to_excel(writer, sheet_name="Wikifier_t2wml", index=False)
def _generate_dataset_tab(input_df: pd.DataFrame, dataset_qnode: str, dataset_id: str) -> pd.DataFrame:
"""
A sample dataset file looks like: here {dataset_qnode} = "aid-security"
node1 label node2 id
Qaid-security P31 Q1172284 aid-security-P31
Qaid-security label aid-security dataset aid-security-label
Qaid-security P1476 aid-security dataset aid-security-P1476
Qaid-security description aid-security dataset aid-security-description
Qaid-security P2699 aid-security aid-security-P2699
Qaid-security P1813 aid-security aid-security-P1813
:param dataset_qnode: input dataset id
:return:
"""
dataset_qnode_df_list = []
name = input_df.iloc[0, 1] if input_df.shape[1] > 1 and input_df.iloc[0, 1] else '{} dataset'.format(dataset_id)
description = input_df.iloc[0, 2] if input_df.shape[1] > 2 and input_df.iloc[0, 2] else '{} dataset'.format(dataset_id)
url = input_df.iloc[0, 3] if input_df.shape[1] > 3 and input_df.iloc[0, 3] else 'http://not/defined/{}'.format(dataset_id)
dataset_labels = ["P31", "label", "P1476", "description", "P2699", "P1813"]
dataset_node2s = ["Q1172284", '"{}"'.format(name), '"{}"'.format(name),
'"{}"'.format(description), '"{}"'.format(url), dataset_qnode]
for label, node2 in zip(dataset_labels, dataset_node2s):
dataset_qnode_df_list.append({"dataset": dataset_qnode, "label": label, "node2": node2})
dataset_df = pd.DataFrame(dataset_qnode_df_list)
return dataset_df
def _generate_attributes_tab(dataset_qnode: str, annotation_part: pd.DataFrame) -> pd.DataFrame:
"""
codes used to generate the template attribute tab
1. add for columns with role = variable or role = qualifier.
"""
attributes_df_list = []
seen_attributes = {}
# for causx country as main subject
# # update 2020.11.11, always check if P131 is needed
# all_column_types = set(annotation_part.T['type'].unique())
# for types, edge_info in ADDITIONAL_QUALIFIER_MAP.items():
# if len(set(types).intersection(all_column_types)) > 0:
# attributes_df_list.append({"Attribute": edge_info["Attribute"],
# "Property": edge_info["Property"], "Role": "qualifier",
# "Relationship": "", "type": "WikibaseItem",
# "label": edge_info["Attribute"],
# "description": edge_info["Attribute"]})
for i in range(annotation_part.shape[1]):
each_col_info = annotation_part.iloc[:, i]
role_info = each_col_info["role"].split(";")
role_lower = role_info[0].lower()
if role_lower in {"variable", "qualifier"}:
# if ";" exists, we need to use those details on variables
if len(role_info) > 1:
relationship = role_info[1]
# otherwise apply this variable / qualifier for all by give empty cell
else:
relationship = ""
attribute = each_col_info["header"]
role_type = each_col_info["type"].lower()
if role_type == "":
continue
if role_type not in TYPE_MAP_DICT:
raise ValueError("Column type {} for column {} is not valid!".format(role_type, i))
data_type = TYPE_MAP_DICT[each_col_info["type"]]
label = "{}".format(attribute) if not each_col_info['name'] else each_col_info['name']
description = "{} column in {}".format(role_lower, dataset_qnode) if not each_col_info['description'] \
else each_col_info['description']
tag = each_col_info['tag'] if 'tag' in each_col_info else ""
# qualifier and variables have been deduplicated already in validation. Now if anything is repeating,
# it is meant to be same.
if attribute not in seen_attributes:
attributes_df_list.append({"Attribute": attribute, "Property": "", "Role": role_lower,
"Relationship": relationship, "type": data_type,
"label": label, "description": description, "tag": tag})
seen_attributes[attribute] = 1
if len(attributes_df_list) == 0:
attributes_df = | pd.DataFrame(columns=['Attribute', 'Property', 'label', 'description']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_non_participants,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_one_strict_assort_by_group,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_participants,
)
from src.create_initial_states.make_educ_group_columns import _determine_group_sizes
from src.create_initial_states.make_educ_group_columns import _get_id_to_weak_group
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_longest_value,
)
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_shortest_value,
)
from src.create_initial_states.make_educ_group_columns import _split_data_by_query
def test_get_id_to_weak_group():
raw_id = pd.Series([2, 2, 3, 3, 4, 4, 5, 5]) # dtype int is right.
participants = pd.DataFrame(index=[2, 3, 4, 5])
participants["__weak_group_id"] = [0, 1] + [1, 0]
expected = pd.Series([0, 1], index=[3, 4])
res = _get_id_to_weak_group(participants, raw_id)
pd.testing.assert_series_equal(res, expected, check_names=False)
def test_split_data_by_query():
df = pd.DataFrame(index=list("abcde"))
df["to_select"] = [True, True, False, True, False]
query = "to_select"
res_selected, res_others = _split_data_by_query(df, query)
expected_selected = df.loc[["a", "b", "d"]]
expected_other = df.loc[["c", "e"]]
pd.testing.assert_frame_equal(res_selected, expected_selected)
pd.testing.assert_frame_equal(res_others, expected_other)
def test_create_group_id_for_participants():
df = pd.DataFrame()
df["state"] = ["BY"] * 4 + ["NRW"] * 8
df["county"] = ["N", "N", "M", "M"] + ["K"] * 5 + ["D"] * 3
group_size = 2
strict_assort_by = "state"
weak_assort_by = "county"
res = _create_group_id_for_participants(
df=df,
group_size=group_size,
strict_assort_by=strict_assort_by,
weak_assort_by=weak_assort_by,
)
expected = pd.Series(
[2, 2, 1, 1, 4, 4, 6, 6, 7, 5, 5, 7], dtype=float, name="group_id"
)
pd.testing.assert_series_equal(res, expected)
def test_create_group_id_for_one_strict_assort_by_group_one_county_size_one():
df = pd.DataFrame()
df["weak_assort_by"] = ["a", "a", "a", "a"]
group_size = 1
weak_assort_by = "weak_assort_by"
start_id = 20
res, end_id = _create_group_id_for_one_strict_assort_by_group(
df=df, group_size=group_size, weak_assort_by=weak_assort_by, start_id=start_id
)
expected = pd.Series([20.0, 21.0, 22.0, 23.0], index=df.index, name="group_id")
pd.testing.assert_series_equal(expected, res)
assert end_id == 24
def test_create_group_id_for_one_strict_assort_by_group_no_remainder():
df = pd.DataFrame()
df["weak_assort_by"] = ["a", "b", "a", "b"]
group_size = 2
weak_assort_by = "weak_assort_by"
start_id = 20
res, end_id = _create_group_id_for_one_strict_assort_by_group(
df=df, group_size=group_size, weak_assort_by=weak_assort_by, start_id=start_id
)
expected = pd.Series([21.0, 20.0, 21.0, 20.0], index=df.index, name="group_id")
pd.testing.assert_series_equal(expected, res)
assert end_id == 22
def test_create_group_id_for_one_strict_assort_by_group_with_remainder():
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 16:43:25 2018
@author: nce3xin
"""
from scipy.io import arff
import pandas as pd
# .xlsx data file path
root="../data/"
origin_pt=root+"origin.xlsx"
train_pt=root+"train.xlsx"
test_pt=root+"test.xlsx"
# .arff data file path
train_arff_pt="../data/train.arff"
test_arff_pt="../data/test.arff"
# read .xlsx file
usecols=[0,2]
train_df=pd.read_excel(train_pt,usecols=usecols)
test_df=pd.read_excel(test_pt,usecols=usecols)
origin_weibo_df= | pd.read_excel(origin_pt,sheetname=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
import fitz
import logging
import os
import pandas as pd
class PdfMerge:
def __init__(self):
formatter = logging.Formatter('%(asctime)s [%(threadName)s] %(levelname)s: %(message)s')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.DEBUG)
self.logger = logging.getLogger(__file__)
self.logger.addHandler(sh)
self.logger.setLevel(logging.DEBUG)
def save_file_name(self, dir_path, save_file_path, encoding='gbk'):
"""
获取文件夹dir下PDF文件的名称,并保存到CSV文件中
:param dir_path: 文件夹路径
:param encoding: 文件编码
:param save_file_path: 保存文件名的文件路径
:return:
"""
index = ['学号']
student_number = []
all_files_name = os.listdir(dir_path) # 读取文件夹下的所有文件名
for file_name in all_files_name:
if file_name.endswith('.pdf'): # 判断是否为PDF文件名
file_name = file_name[:-6] # 1800271038-1.pdf ——> 1800271038
if file_name not in student_number: # 存在1800271038-1.pdf,1800271038-2.pdf的情况
student_number.append(file_name)
csv_file = pd.DataFrame(columns=index, data=student_number) # 把list保存成CSV文件
csv_file.to_csv(save_file_path, encoding=encoding)
def get_file_name(self, file_dir_path, encoding='gbk'):
"""
读取CSV文件中的PDF文件名,并保存到list中
:param file_dir_path: CSV文件的路径
:param encoding: 文件编码
:return: PDF文件名
"""
all_files_name = []
csv_data = | pd.read_csv(file_dir_path, encoding=encoding) | pandas.read_csv |
""" Creates the index of files from the specific parser objects """
from pathlib import Path
from collections import Counter
from functools import wraps
import datetime
from typing import Tuple, List, Dict, Union, Collection
# import re
# import copy
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# from elchempy.indexer.filepath_parser import FilePathParser
# from elchempy.indexer.extra_EC_info import loading_ref, WE_surface_area_cm2
from elchempy.dataloaders.files_func_collector import run_func_on_files
from elchempy.dataloaders.fetcher import ElChemData
from elchempy.indexer.EC_path_parser import ElChemPathParser
from elchempy.indexer.helpers import (
find_path_of_common_folder,
relative_parent_paths_to_common_folder,
find_relevant_files_in_folder,
)
### 3rd Party imports
import pandas as pd
#%%
def create_index(files, include_metadata=False, multi_run=False):
"""creates the index from list of files, by crawling over each file"""
ecpps = None
ecpps = run_func_on_files(ElChemPathParser, files, multi_run=multi_run)
ecds = None
if include_metadata:
ecds = run_func_on_files(
ElChemData,
files,
multi_run=multi_run,
metadata_only=include_metadata,
)
index = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy
from sklearn import metrics
from FPMax import FPMax
from Apriori import Apriori
from MASPC import MASPC
import csv
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import linkage
from optbinning import ContinuousOptimalBinning
# pd.set_option('display.max_colwidth', -1)
# pd.options.display.max_columns = None
pd.options.display.width = 0
class MASPC_Engine():
def __init__(self, inputFileName, myMinAc=None, myMinOv=None,
myMinSup=None, myK=None,myContainsTemporal=False,
myAutoDiscretize=False,myDiscretizeStrategy=None,myQ=4):
print("inside maspc_engine constructor with "+inputFileName)
self.inputFileName = inputFileName
self.outputFileFolder = '/tmp/'
self.sortedInputFile = self.outputFileFolder+'sortedInputFile.csv'
self.myMinAc = myMinAc
self.myMinOv = myMinOv
self.myMinSup = myMinSup
self.myK = myK
self.myContainsTemporal = myContainsTemporal
self.myAutoDiscretize = myAutoDiscretize
self.myDiscretizeStrategy = myDiscretizeStrategy
self.myQ = myQ
# First thing we do is sort input file
self.__sortInputFile(self.inputFileName)
self.rtDataFrame = pd.read_csv(self.sortedInputFile, dtype=str)
# remove any rows that may have empty diagnosis, sex, or age
self.rtDataFrame.dropna(subset=['DX1', 'age', 'sex'], inplace=True)
self.rtDataFrame.reset_index(drop=True, inplace=True)
# Extract diagnoses for processing (Diagnosis DataFrame)
rtDataFrameDiags = self.rtDataFrame.drop(['age', 'sex'], axis=1).copy()
diagColumns = [str(i) for i in list(rtDataFrameDiags.columns.values) if i.startswith('D')]
uvalues = list(pd.unique(self.rtDataFrame[diagColumns].values.ravel('K')))
uniqueDiags = [x for x in uvalues if str(x) != 'nan']
# process the list of unique diagnoses to update diagnosis dataframe and write file for fpmax and apriori
self.__writeInputFile(uniqueDiags, rtDataFrameDiags)
# # To do one hot encoding of sex:
self.demographic = | pd.get_dummies(self.rtDataFrame['sex']) | pandas.get_dummies |
import pandas as pd
import numpy as np
from scipy import integrate, stats
from numpy import absolute, mean
from itertools import islice
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
import seaborn as sns
import matplotlib.pyplot as plt
headers = [
'participant_id',
'type',
'block',
'occurence',
'response_time',
'switch_type',
]
df = pd.read_csv(r'C:\Users\danie\Documents\SURREY\Project_1\task_switching_paradigm\pilot4_withoccurence.csv', usecols = headers)
df_behavstats1 = pd.DataFrame()
df_behavstats2 = pd.DataFrame()
df_behavstats = pd.DataFrame()
df_switch_type = pd.DataFrame()
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# LOOP WHICH CALCULATES AND CONCATS MAD, SD, MRT, MED
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
df.set_index(['participant_id', 'block', 'type', 'occurence'], inplace = True)
df_switch_type = df
df_rt = df.drop(columns = [
'switch_type'
])
for group_i, group_v in df_rt.groupby(level=[0, 1, 2, 3]):
group_v = group_v.apply(pd.to_numeric, errors = 'coerce').dropna(how = 'all')
mask = group_v.index.get_level_values(3)
n = 0
for index, row in group_v.iterrows():
n = n + 1
mrt = group_v.mean()
SD = group_v.std()
MAD = mean(absolute(group_v - mean(group_v)))
med = group_v.median()
switchtrial0 = group_v['response_time'].iloc[0]
switchtrial1 = group_v['response_time'].iloc[1]
if n > 2:
switchtrial2 = group_v['response_time'].iloc[2]
for index, row in group_v.iterrows():
group_v.at[index, 'mean_rt'] = mrt
group_v.at[index, 'SD_rt'] = SD
group_v.at[index, 'MAD_rt'] = MAD
group_v.at[index, 'median_rt'] = med
group_v.at[index, 'rt_trial_1'] = switchtrial0
group_v.at[index, 'rt_trial_2'] = switchtrial1
if n < 3:
group_v.at[index, 'rt_trial_3'] = np.nan
else:
group_v.at[index, 'rt_trial_3'] = switchtrial2
group_v.reset_index(drop = False, inplace = True)
df_behavstats1 = pd.concat([df_behavstats1, group_v], sort=False)
df_behavstats1.set_index(['participant_id', 'block', 'type', 'occurence'], inplace = True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# LOOP WHICH CALCULATES AND CONCATS SWITCH RT
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for group_i, group_v in df_behavstats1.groupby(level=[0, 1, 2, 3]):
n = 0
for index, row in group_v.iterrows():
n = n + 1
# here dicates over how many trials the RT is averaged over (m), dependant on how many
# trials are in the overall group (n).
##
# eg, when the number of overall trials in the group is less than 3 (if n < 3), then
# the number of trials to average over is 0 (m = 0), and the rows are left empty (np.nan).
if n < 3:
m = 0
for index, row in group_v.iterrows():
group_v.at[index, 'switch_rt'] = np.nan
elif n >= 3 and n < 5:
m = 2
elif n >= 5:
m = 3
number_of_trials = 0
overall_rt = 0
# the 'islice' tells pandas to iterate with iterrows over the first 'm' rows, where 'm' is
# dictated above and depends on the overall number of trials, 'n', in the group.
for index, row in islice(group_v.iterrows(), m):
number_of_trials = number_of_trials + 1
overall_rt = overall_rt + row['response_time']
j = (overall_rt/number_of_trials)
group_v.at[index, 'switch_rt'] = j
group_v.reset_index(drop = True, inplace = False)
df_behavstats = pd.concat([df_behavstats, group_v], sort=True)
df_behavstats = pd.concat([df_behavstats, df_switch_type.reindex(columns=df.columns)], axis=1)
df_behavstats = df_behavstats.drop(columns=['response_time'])
df_behavstats.drop_duplicates(subset="MAD_rt", keep='first', inplace=True)
# when a group has less than 3 trials in it, the switch_rt is not calculated (m = 0).
# if there are NaN values in any of the rows of a column, that column returns NaN as a t-test
# value for any t-test calculations it is involved in. therefore i have excluded those rows below:
print("")
print("")
print('BELOW DISPLAYS THE GROUP(S) WHICH HAVE BEEN EXCLUDED AS THERE WERE LESS THAN')
print('3 TRIALS IN THE GROUP, CAUSING A NaN VALUE FOR THE T-TEST CALCULATIONS:')
print("")
print(df_behavstats[df_behavstats.isna().any(axis=1)].index)
df_behavstats = df_behavstats[pd.notnull(df_behavstats['switch_rt'])]
print("")
print("")
df_behavstats.reset_index(drop=False, inplace=True)
df_behavstats.to_csv('pilot4_RT_stats.csv')
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ANOVAs
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
model = ols(
'switch_rt ~ C(block) + C(type) + C(participant_id) + C(switch_type) + C(block):C(switch_type) + C(type):C(switch_type) + C(participant_id):C(switch_type) + C(block):C(type) + C(block):C(participant_id) + C(type):C(participant_id)',
data=df_behavstats
).fit()
anova_table = sm.stats.anova_lm(model, typ=2)
print(anova_table)
model1 = ols(
'switch_rt ~ C(block) + C(type) + C(participant_id) + C(switch_type) + C(block):C(switch_type) + C(type):C(switch_type) + C(participant_id):C(switch_type) + C(block):C(type) + C(block):C(participant_id) + C(type):C(participant_id)',
data=df_behavstats
).fit()
anova_table1 = sm.stats.anova_lm(model1, typ=2)
print(anova_table1)
model2 = ols(
'mean_rt ~ C(block) + C(type) + C(participant_id) + C(switch_type) + C(block):C(switch_type) + C(type):C(switch_type) + C(participant_id):C(switch_type) + C(block):C(type) + C(block):C(participant_id) + C(type):C(participant_id)',
data=df_behavstats
).fit()
anova_table2 = sm.stats.anova_lm(model2, typ=2)
print(anova_table2)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# T-TESTS AND WRITING TO .TXT
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
mean = df_behavstats['mean_rt']
SD = df_behavstats['SD_rt']
MAD = df_behavstats['MAD_rt']
median = df_behavstats['median_rt']
# Check here is mean or median is different from one another; if so, decide which to use. If not, move ahead with one or the other.
g1 = stats.ttest_ind(median, mean, equal_var = False)
print(g1)
rt1 = df_behavstats['rt_trial_1']
rt2 = df_behavstats['rt_trial_2']
rt3 = df_behavstats['rt_trial_3']
rt123 = df_behavstats['switch_rt']
a = stats.ttest_ind(mean, rt1)
b = stats.ttest_ind(mean, rt2)
c = stats.ttest_ind(mean, rt3)
MEANvsAVRT = stats.ttest_ind(mean, rt123)
d = stats.ttest_ind(median, rt1)
e = stats.ttest_ind(median, rt2)
f = stats.ttest_ind(median, rt3)
MEDvsAVRT = stats.ttest_ind(median, rt123)
standard_t_tests = [a,b,c,MEANvsAVRT,d,e,f,MEDvsAVRT]
a1 = stats.ttest_ind(mean, rt1, equal_var = False)
b1 = stats.ttest_ind(mean, rt2, equal_var = False)
c1 = stats.ttest_ind(mean, rt3, equal_var = False)
MEANvsAVRT1 = stats.ttest_ind(mean, rt123, equal_var = False)
d1 = stats.ttest_ind(median, rt1, equal_var = False)
e1 = stats.ttest_ind(median, rt2, equal_var = False)
f1 = stats.ttest_ind(median, rt3, equal_var = False)
MEDvsAVRT1 = stats.ttest_ind(median, rt123, equal_var = False)
welchs_t_tests = [a1,b1,c1,MEANvsAVRT1,d1,e1,f1,MEDvsAVRT1]
t_data = {'standard':standard_t_tests, 'welchs':welchs_t_tests}
t_rows = ['mean_vs_rt1', 'mean_vs_rt2', 'mean_vs_rt3', 'mean_vs_rt123', 'med_vs_rt1', 'med_vs_rt2', 'med_vs_rt3', 'med_vs_rt123']
df_t_tests = | pd.DataFrame(data=t_data, index=t_rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import subprocess
import json
import os
import io
from multiprocessing import Pool
import multiprocessing
import multiprocessing.pool
from operator import itemgetter
import random
import string
import pickle
import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import pysam
import mip_classes as mod
import pandas as pd
from pandas.errors import MergeError
import gzip
from primer3 import calcHeterodimerTm
import primer3
import traceback
from msa_to_vcf import msa_to_vcf as msa_to_vcf
import itertools
import sys
import allel
from Bio import SeqIO
print("functions reloading")
# backbone dictionary
mip_backbones = {
"hybrid_bb": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNNNN",
"hybrid_split": "NNNNAGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"hybrid_split_hp": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"gc_bb": "GCAGATCGGAAGAGCACACCTCGCCAAGCTTTCGGCNNNNNNNNNNNN",
"slx_bb": "CTTCAGCTTCCCGATCCGACGGTAGTGTNNNNNNNNNNNN"
}
"""
# Below class allows processors from a pool from multiprocessing module to
create processor pools of their own.
# http://mindcache.io/2015/08/09/python-multiprocessing-module-daemonic-processes-are-not-allowed-to-have-children.html
class NoDaemonProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
Process = NoDaemonProcess
"""
# above code was broken when switching to python 3. Below is taken from:
# https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic/8963618#8963618
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super(NoDaemonProcessPool, self).__init__(*args, **kwargs)
# Exception wrapper for multiprocessing taken from
# https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process/26096355#26096355
class ExceptionWrapper(object):
def __init__(self, ee, exc):
self.ee = ee
self.exc = exc
__, __, self.tb = sys.exc_info()
def re_raise(self):
print(self.exc)
raise self.ee.with_traceback(self.tb)
###############################################################
# Region prep related functions
###############################################################
def coordinate_to_target(coordinates, snp_locations, capture_size):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
snp_chroms = {}
reference_snp_locations = rsl = coordinates
for r in rsl:
chrom = rsl[r]["chrom"]
try:
snp_chroms[chrom].append([rsl[r]["begin"],
rsl[r]["end"]])
except KeyError:
snp_chroms[chrom] = [[rsl[r]["begin"],
rsl[r]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_snp_chroms = {}
for c in snp_chroms:
merged_snp_chroms[c] = merge_overlap(snp_chroms[c], 2 * capture_size)
# create regions for alignment
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for r in regions:
snps_in_region = []
for s in reference_snp_locations:
if ((reference_snp_locations[s]["chrom"] == c)
and (r[0] <= reference_snp_locations[s]["begin"]
<= reference_snp_locations[s]["end"] <= r[1])):
snps_in_region.append(s)
r.append(snps_in_region)
for reg in regions:
snps = reg[2]
reg_begin = reg[0]
reg_end = reg[1]
reg_locations = []
for s in snps:
s_locations = []
locations = snp_locations[s]
ref_location = reference_snp_locations[s]
ref_begin = ref_location["begin"]
ref_end = ref_location["end"]
left_flank_buffer = ref_begin - reg_begin + capture_size
right_flank_buffer = reg_end - ref_end + capture_size
for l in locations:
snp_chrom = l["chrom"]
snp_begin = l["begin"]
snp_end = l["end"]
tar_begin = snp_begin - left_flank_buffer
tar_end = snp_end + right_flank_buffer
s_locations.append([snp_chrom, tar_begin, tar_end])
reg_locations.append(s_locations)
reg.append(reg_locations)
# create target coordinate for each region
target_coordinates = {}
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for reg in regions:
region_name = "-".join(reg[2])
region_targets = reg[3][0]
for i in range(len(region_targets)):
reg_name = region_name + "-" + str(i)
if reg_name in target_coordinates:
print((reg_name, " is already in targets!"))
else:
target_coordinates[reg_name] = region_targets[i]
return target_coordinates
def rsid_to_target(resource_dir, snp_file):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
content: chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# one snp can have multiple locations on the reference genome,
# this can happen with snps in regions where there are multiple different
# assemblies (HLA locus, for example). So first step is to get each of
# these locations in the genome.
snp_locations = {}
capture_types = {}
with io.open(os.path.join(resource_dir, snp_file),
encoding="utf-8") as infile:
for line in infile:
newline = line.strip().split("\t")
rsid = newline[5]
try:
# update the location dictionary if the rsid is already present
temp_dic = {"chrom": newline[0],
"begin": int(newline[1]),
"end": int(newline[2]),
"ref_base": newline[3],
"alt_bases": [newline[4]]}
# check if this location is already in the dict
# append the new alternative base to the dict
for snp in snp_locations[rsid]:
if ((snp["begin"] == temp_dic["begin"])
and (snp["end"] == temp_dic["end"])
and (snp["chrom"] == temp_dic["chrom"])
and (snp["ref_base"] == temp_dic["ref_base"])):
snp["alt_bases"].append(temp_dic["alt_bases"][0])
break
else:
# add the snp dict if the location is different than what
# is present in the location dict.
snp_locations[rsid].append(temp_dic)
except KeyError:
# add the new rsid to location dict if it is not already there
snp_locations[rsid] = [temp_dic]
capture_types[rsid] = newline[6]
# one reference location for each snp is required
# alternative assambly chromosomes have an underscore in their names,
# so that will be utilized to get the location in the orignal assembly,
# i.e. the chromosome that does not have the underscore
# (chr7 and not chr7_alt08)
reference_snp_locations = {}
problem_snps = []
for s in snp_locations:
if len(snp_locations[s]) == 1:
reference_snp_locations[s] = snp_locations[s][0]
else:
for i in range(len(snp_locations[s])):
if len(snp_locations[s][i]["chrom"].split("_")) == 1:
reference_snp_locations[s] = snp_locations[s][i]
break
else:
print(("Short chromosome name not found! "
"Please check the output list."))
problem_snps.append(s)
reference_snp_locations[s]["capture_type"] = capture_types[s]
return reference_snp_locations, snp_locations
def gene_to_target(gene_list, species):
target_coordinates = {}
for gene in gene_list:
e = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
try:
target_coordinates[gene] = {"chrom": e["chrom"],
"begin": e["begin"],
"end": e["end"]}
except KeyError:
target_coordinates[gene] = {"chrom": np.nan,
"begin": np.nan,
"end": np.nan}
return target_coordinates
def gene_to_target_exons(gene_list, species, exon_list):
target_coordinates = {}
for i in range(len(gene_list)):
gene = gene_list[i]
exons_wanted = exon_list[i]
gene_exons = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
exons = gene_exons["exons"]
if gene_exons["orientation"] == "-":
exons.reverse()
if exons_wanted == "all":
for j in range(len(exons)):
e = exons[j]
tar_name = "-".join([gene, "exon", str(j)])
target_coordinates[tar_name] = {"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
else:
for j in exons_wanted:
try:
e = exons[j]
tar_name = "-".join(gene, "exon", str(j))
target_coordinates[tar_name] = {
"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
except IndexError:
print(("Exon ", j, " does not exist for gene ", gene))
return target_coordinates
def parse_alignment(reg_file):
""" Create a rinfo dictionary from a rinfo file."""
reg_dic = {}
with open(reg_file, "r") as infile:
for line in infile:
if line.startswith("REGION"):
newline = line.strip().split("\t")
key1 = newline[1].split(":")[0]
key2 = newline[1].split(":")[1]
if key1 not in reg_dic:
reg_dic[key1] = {key2: {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}}
else:
reg_dic[key1][key2] = {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}
return reg_dic
def update_rinfo_file(rinfo_file, update_file, output_file):
"""Update a rinfo file with the lines provided in the update_file.
This function will read all lines from a rinfo file and an update file.
First two columns of rinfo files describe the parameters while the
rest assign values. All the lines in the update file which share the
first column with a line in the original file will replace that line
in the original file. All other lines in the original file will remain.
"""
# read the update file
update_dict = {}
with open(update_file) as infile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
update_dict[(newline[0], newline[1])] = line
# read the rinfo file and update as appropriate
with open(rinfo_file) as infile, open(output_file, "w") as outfile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
line_key = (newline[0], newline[1])
try:
outfile.write(update_dict[line_key])
except KeyError:
outfile.write(line)
else:
outfile.write(line)
def get_target_coordinates(res_dir, species, capture_size,
coordinates_file=None, snps_file=None,
genes_file=None):
"""Extract MIP target coordinates from provided files."""
capture_types = {}
# Get target coordinates specified as genomic coordinates
if coordinates_file is None:
region_coordinates = {}
coord_names = []
else:
coordinates_file = os.path.join(res_dir, coordinates_file)
try:
coord_df = | pd.read_table(coordinates_file, index_col=False) | pandas.read_table |
"""
.. module:: projectdirectory
:platform: Unix, Windows
:synopsis: A module for examining collections of git repositories as a whole
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import math
import sys
import os
import numpy as np
import pandas as pd
from git import GitCommandError
from gitpandas.repository import Repository
__author__ = 'willmcginnis'
class ProjectDirectory(object):
"""
An object that refers to a directory full of git repositories, for bulk analysis. It contains a collection of
git-pandas repository objects, created by os.walk-ing a directory to file all child .git subdirectories.
:param working_dir: (optional, default=None), the working directory to search for repositories in, None for cwd, or an explicit list of directories containing git repositories
:param ignore: (optional, default=None), a list of directories to ignore when searching for git repos.
:param verbose: (default=True), if True, will print out verbose logging to terminal
:return:
"""
def __init__(self, working_dir=None, ignore=None, verbose=True):
if working_dir is None:
self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(os.getcwd()) if '.git' in x[0]])
elif isinstance(working_dir, list):
self.repo_dirs = working_dir
else:
self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(working_dir) if '.git' in x[0]])
self.repos = [Repository(r, verbose=verbose) for r in self.repo_dirs]
if ignore is not None:
self.repos = [x for x in self.repos if x._repo_name not in ignore]
def _repo_name(self):
"""
Returns a DataFrame of the repo names present in this project directory
:return: DataFrame
"""
ds = [[x._repo_name()] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository'])
return df
def is_bare(self):
"""
Returns a dataframe of repo names and whether or not they are bare.
:return: DataFrame
"""
ds = [[x._repo_name(), x.is_bare()] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository', 'is_bare'])
return df
def has_coverage(self):
"""
Returns a DataFrame of repo names and whether or not they have a .coverage file that can be parsed
:return: DataFrame
"""
ds = [[x._repo_name(), x.has_coverage()] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository', 'has_coverage'])
return df
def coverage(self):
"""
Will return a DataFrame with coverage information (if available) for each repo in the project).
If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which
will contain the columns:
* repository
* filename
* lines_covered
* total_lines
* coverage
If it can't be found or parsed, an empty DataFrame of that form will be returned.
:return: DataFrame
"""
df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository'])
for repo in self.repos:
try:
cov = repo.coverage()
cov['repository'] = repo._repo_name()
df = df.append(cov)
except GitCommandError as err:
print('Warning! Repo: %s seems to not have coverage' % (repo, ))
pass
df.reset_index()
return df
def file_change_rates(self, branch='master', limit=None, extensions=None, ignore_dir=None, coverage=False):
"""
This function will return a DataFrame containing some basic aggregations of the file change history data, and
optionally test coverage data from a coverage.py .coverage file. The aim here is to identify files in the
project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has
a high change rate and poor test coverage, then it is a great candidate for writing more tests.
:param branch: (optional, default=master) the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.
:return: DataFrame
"""
columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository']
if coverage:
columns += ['lines_covered', 'total_lines', 'coverage']
df = pd.DataFrame(columns=columns)
for repo in self.repos:
try:
fcr = repo.file_change_rates(branch=branch, limit=limit, extensions=extensions, ignore_dir=ignore_dir, coverage=coverage)
fcr['repository'] = repo._repo_name()
df = df.append(fcr)
except GitCommandError as err:
print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))
pass
df.reset_index()
return df
def commit_history(self, branch, limit=None, extensions=None, ignore_dir=None, days=None):
"""
Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories
are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is
divided by the number of repositories in the project directory to find out how many commits to pull from each
project. Future implementations will use date ordering across all projects to get the true most recent N commits
across the project.
Included in that DataFrame will be the columns:
* repository
* date (index)
* author
* committer
* message
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:param days: (optional, default=None) number of days to return if limit is None
:return: DataFrame
"""
if limit is not None:
limit = int(limit / len(self.repo_dirs))
df = pd.DataFrame(columns=['author', 'committer', 'date', 'message', 'lines', 'insertions', 'deletions', 'net'])
for repo in self.repos:
try:
ch = repo.commit_history(branch, limit=limit, extensions=extensions, ignore_dir=ignore_dir, days=days)
ch['repository'] = repo._repo_name()
df = df.append(ch)
except GitCommandError as err:
print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))
pass
df.reset_index()
return df
def file_change_history(self, branch='master', limit=None, extensions=None, ignore_dir=None):
"""
Returns a DataFrame of all file changes (via the commit history) for the specified branch. This is similar to
the commit history DataFrame, but is one row per file edit rather than one row per commit (which may encapsulate
many file changes). Included in the DataFrame will be the columns:
* repository
* date (index)
* author
* committer
* message
* filename
* insertions
* deletions
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:return: DataFrame
"""
if limit is not None:
limit = int(limit / len(self.repo_dirs))
df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])
for repo in self.repos:
try:
ch = repo.file_change_history(branch, limit=limit, extensions=extensions, ignore_dir=ignore_dir)
ch['repository'] = repo._repo_name()
df = df.append(ch)
except GitCommandError as err:
print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))
pass
df.reset_index()
return df
def blame(self, extensions=None, ignore_dir=None, committer=True, by='repository'):
"""
Returns the blame from the current HEAD of the repositories as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to all repositories by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:return: DataFrame
"""
df = pd.DataFrame(columns=['loc'])
for repo in self.repos:
try:
df = df.append(repo.blame(extensions=extensions, ignore_dir=ignore_dir, committer=committer, by=by))
except GitCommandError as err:
print('Warning! Repo: %s couldnt be blamed' % (repo, ))
pass
df = df.groupby(df.index).agg({'loc': np.sum})
df = df.sort_values(by=['loc'], ascending=False)
return df
def branches(self):
"""
Returns a data frame of all branches in origin. The DataFrame will have the columns:
* repository
* local
* branch
:returns: DataFrame
"""
df = pd.DataFrame(columns=['repository', 'local', 'branch'])
for repo in self.repos:
try:
df = df.append(repo.branches())
except GitCommandError as err:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
pass
df.reset_index()
return df
def revs(self, branch='master', limit=None, skip=None, num_datapoints=None):
"""
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame
"""
if limit is not None:
limit = math.floor(float(limit) / len(self.repos))
if num_datapoints is not None:
num_datapoints = math.floor(float(num_datapoints) / len(self.repos))
df = pd.DataFrame(columns=['repository', 'rev'])
for repo in self.repos:
try:
revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
revs['repository'] = repo._repo_name()
df = df.append(revs)
except GitCommandError as err:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
pass
df.reset_index()
return df
def cumulative_blame(self, branch='master', extensions=None, ignore_dir=None, by='committer', limit=None, skip=None, num_datapoints=None, committer=True):
"""
Returns a time series of cumulative blame for a collection of projects. The goal is to return a dataframe for a
collection of projects with the LOC attached to an entity at each point in time. The returned dataframe can be
returned in 3 forms (switched with the by parameter, default 'committer'):
* committer: one column per committer
* project: one column per project
* raw: one column per committed per project
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default='committer') whether to arrange the output by committer or project
:return: DataFrame
"""
blames = []
for repo in self.repos:
try:
blame = repo.cumulative_blame(branch=branch, extensions=extensions, ignore_dir=ignore_dir, limit=limit, skip=skip, num_datapoints=num_datapoints, committer=committer)
blames.append((repo._repo_name(), blame))
except GitCommandError as err:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
pass
global_blame = blames[0][1]
global_blame.columns = [x + '__' + str(blames[0][0]) for x in global_blame.columns.values]
blames = blames[1:]
for reponame, blame in blames:
blame.columns = [x + '__' + reponame for x in blame.columns.values]
global_blame = pd.merge(global_blame, blame, left_index=True, right_index=True, how='outer')
global_blame.fillna(method='pad', inplace=True)
global_blame.fillna(0.0, inplace=True)
if by == 'committer':
committers = [(str(x).split('__')[0].lower().strip(), x) for x in global_blame.columns.values]
if sys.version_info.major == 2:
committer_mapping = dict([(c, [x[1] for x in committers if x[0] == c]) for c in set([x[0] for x in committers])])
else:
committer_mapping = {c: [x[1] for x in committers if x[0] == c] for c in {x[0] for x in committers}}
for committer in committer_mapping.keys():
global_blame[committer] = 0
for col in committer_mapping.get(committer, []):
global_blame[committer] += global_blame[col]
global_blame = global_blame.reindex(columns=list(committer_mapping.keys()))
elif by == 'project':
projects = [(str(x).split('__')[1].lower().strip(), x) for x in global_blame.columns.values]
if sys.version_info.major == 2:
project_mapping = dict([(c, [x[1] for x in projects if x[0] == c]) for c in set([x[0] for x in projects])])
else:
project_mapping = {c: [x[1] for x in projects if x[0] == c] for c in {x[0] for x in projects}}
for project in project_mapping.keys():
global_blame[project] = 0
for col in project_mapping.get(project, []):
global_blame[project] += global_blame[col]
global_blame = global_blame.reindex(columns=list(project_mapping.keys()))
global_blame = global_blame[~global_blame.index.duplicated()]
return global_blame
def tags(self):
"""
Returns a data frame of all tags in origin. The DataFrame will have the columns:
* repository
* tag
:returns: DataFrame
"""
df = pd.DataFrame(columns=['repository', 'tag'])
for repo in self.repos:
try:
df = df.append(repo.tags())
except GitCommandError as err:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
pass
df.reset_index()
return df
def repo_information(self):
"""
Returns a DataFrame with the properties of all repositories in the project directory. The returned DataFrame
will have the columns:
* local_directory
* branches
* bare
* remotes
* description
* references
* heads
* submodules
* tags
* active_branch
:return: DataFrame
"""
data = [[repo.git_dir,
repo.repo.branches,
repo.repo.bare,
repo.repo.remotes,
repo.repo.description,
repo.repo.references,
repo.repo.heads,
repo.repo.submodules,
repo.repo.tags,
repo.repo.active_branch] for repo in self.repos]
df = pd.DataFrame(data, columns=[
'local_directory',
'branches',
'bare',
'remotes',
'description',
'references',
'heads',
'submodules',
'tags',
'active_branch'
])
return df
def bus_factor(self, extensions=None, ignore_dir=None, by='projectd'):
"""
An experimental heuristic for truck factor of a repository calculated by the current distribution of blame in
the repository's primary branch. The factor is the fewest number of contributors whose contributions make up at
least 50% of the codebase's LOC
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:return:
"""
if by == 'file':
raise NotImplementedError('File-wise bus factor')
elif by == 'projectd':
blame = self.blame(extensions=extensions, ignore_dir=ignore_dir, by='repository')
blame = blame.sort_values(by=['loc'], ascending=False)
total = blame['loc'].sum()
cumulative = 0
tc = 0
for idx in range(blame.shape[0]):
cumulative += blame.ix[idx, 'loc']
tc += 1
if cumulative >= total / 2:
break
return | pd.DataFrame([['projectd', tc]], columns=['projectd', 'bus factor']) | pandas.DataFrame |
from tl4sm.prepare_data import split_dataset
from numpy import array, stack
from pandas import read_csv, DataFrame
from pathlib import Path
from keras.models import load_model, clone_model
import time
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM, BatchNormalization, RepeatVector, ConvLSTM2D
from keras.layers import Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import optimizers
from tl4sm.prepare_data import to_supervised
from keras.utils import to_categorical
from numpy import argmax
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, classification_report
#function to bin data and return dataframe
def bin_data(dataset, med, high):
dataset['bin'] = 0
dataset['bin'][(dataset['intSpeed'] < high) & (dataset['intSpeed'] > med)] = 1
dataset['bin'][(dataset['intSpeed'] >= high)] = 2
return dataset
#function to view the training history of each model
def view_loss(history, exp_num):
plt.plot(history.history['loss'],label='Train')
plt.plot(history.history['val_loss'],label='Val')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig('../Plots/loss-history'+str(exp_num)+'.png')
plt.show()
#function to view the training history of each model
def view_acc(history, exp_num):
plt.close()
plt.plot(history.history['acc'],label='Accuracy')
plt.plot(history.history['loss'],label='Loss')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig('../Plots/loss-history'+str(exp_num)+'.png')
plt.show()
# function to build a ConvLSTM model
def build_model(train, epochs, batch_size, lr, n_steps, n_length, n_input, source, exp_num, data_percent, verbose, n_out=10, batch_norm=True, plot=False):
#prepare data
train_x, train_y = to_supervised(train, n_input, step_size=1, n_out=1, is_y=True)
#data percentage
train_ind = int(round(len(train_x)*(data_percent)))
train_x = train_x[-train_ind:, :]
train_y = train_y[-train_ind:, :]
# define parameters
n_features, n_outputs = train_x.shape[2], train_y.shape[1]
# reshape into subsequences [samples, timesteps, rows, cols, channels]
train_x = train_x.reshape((train_x.shape[0], n_steps, 1, n_length, n_features))
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
train_y =to_categorical(train_y)
# define model
model = Sequential()
if batch_norm:
model.add(BatchNormalization(input_shape=(n_steps, 1, n_length, n_features)))
model.add(ConvLSTM2D(filters=128, kernel_size=(1,4), activation='relu', return_sequences=True, input_shape=(n_steps, 1, n_length, n_features)))
model.add(ConvLSTM2D(filters=128, kernel_size=(1,4), activation='relu', input_shape=(n_steps, 1, n_length, n_features)))
model.add(Flatten())
model.add(RepeatVector(n_outputs))
if batch_norm:
model.add(BatchNormalization(input_shape=(n_steps, 1, n_length, n_features)))
model.add((LSTM(100, activation='relu', dropout=0.05, return_sequences=True)))
model.add((LSTM(100, activation='relu', dropout=0.1, return_sequences=True)))
model.add((Dense(500, activation='relu')))
model.add((Dense(3, activation='softmax')))
opt = optimizers.Adam(lr=lr)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics = ['acc'])
model.build()
#use epoch checkpoints to ensure the best epoch is used
es = EarlyStopping(monitor='val_acc', mode='max', verbose=1, patience=epochs)
checkpointer = ModelCheckpoint(filepath="../Models/best_weights.hdf5",
monitor = 'val_acc',
verbose=2,
save_best_only=True)
callbacks_list = [checkpointer, es] #early
#record time
tic = time.time()
# fit network
history = model.fit(train_x, train_y, epochs=epochs, shuffle=False, callbacks=callbacks_list, batch_size=batch_size, verbose=verbose, validation_split=0.1)
#record time
toc = time.time()
totalTime = toc-tic
if plot:
view_loss(history, str(exp_num))
model.load_weights('../Models/best_weights.hdf5')
model.save('../Models/model_'+str(source)+'.h5')
return model, totalTime
# make a forecast
def forecast(model, history, n_steps, n_length, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_ = data[-n_input:, :]
# reshape into [samples, timesteps, rows, cols, channels]
input_x = input_.reshape((1, n_steps, 1, n_length, 2))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
#function to evaluate the model
def evaluate_model(train, test, n_input, n_length, batch_size, lr, source, exp_num, epochs, n_out, data_percent, verbose, batch_norm=True, plot=False):
#define number of subsequence time steps
n_steps = int(n_input/n_length)
#build model
model, tr_time = build_model(train, epochs, batch_size, lr, n_steps, n_length, n_input, source, exp_num, data_percent, verbose, n_out=10, batch_norm=batch_norm, plot=plot)
# history is a list of training data
history = [x for x in train]
# walk-forward validation over each timestep
predictions = list()
for i in range(len(test)):
# predict the timestep
yhat_sequence = forecast(model, history, n_steps, n_length, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next timestep
history.append(test[i, :])
# evaluate predictions days for each timestep
predictions = array(predictions)
test1 = test[:, :, -1]
YPred = argmax(predictions.reshape(predictions.shape[0], (predictions.shape[1]*predictions.shape[2])), out=None, axis=1)
YPred = YPred.reshape(YPred.shape[0], 1)
df = stack((YPred, test1))
df = df.transpose()
df = df.reshape(df.shape[1], 2)
DataFrame(df).to_csv('../Results/Files/2DConvLSTMAE_TL_'+str(exp_num)+'.csv')
cm = confusion_matrix(test1, YPred)
print(cm)
f1 = f1_score(test1, YPred, average='weighted')
acc = accuracy_score(test1, YPred)
print(classification_report(test1, YPred))
return f1, acc, tr_time, f1, acc, tr_time
#function to evaluate the model
def evaluate_model_reuse(train, test, n_input, n_length, batch_size, lr, source, exp_num, epochs, model_name, data_percent, n_out, batch_norm=True, plot=False):
#define number of subsequence time steps
n_steps = int(n_input/n_length)
#prepare data
train_x, train_y = to_supervised(train, n_input, step_size=1, n_out=1, is_y=True)
# define parameters
n_features = train_x.shape[2]
# define model
model = load_model(model_name)
model = clone_model(model)
model.build()
opt = optimizers.Adam()
model.compile(loss='categorical_crossentropy', metrics = ['acc'], optimizer=opt)
#model.summary()
model.load_weights(model_name)
#data percentage
train_ind = int(round(len(train_x)*(data_percent)))
train_x = train_x[-train_ind:, :]
train_y = train_y[-train_ind:, :]
# define parameters
n_features = train_x.shape[2]
# reshape into subsequences [samples, timesteps, rows, cols, channels]
train_x = train_x.reshape((train_x.shape[0], n_steps, 1, n_length, n_features))
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
train_y =to_categorical(train_y)
#record time
tic = time.time()
# fit network
history = model.fit(train_x, train_y, epochs=epochs, shuffle=False, batch_size=batch_size, verbose=2)
#record time
toc = time.time()
totTime = toc-tic
#model.load_weights('../Models/best_weights_TL.hdf5')
model.save('../Models/model_TL_'+str(exp_num)+'.h5')
if plot:
view_loss(history, str(exp_num))
# history is a list of training data
history = [x for x in train]
# walk-forward validation over each timestep
predictions = list()
for i in range(len(test)):
# predict the timestep
yhat_sequence = forecast(model, history, n_steps, n_length, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next timestep
history.append(test[i, :])
# evaluate predictions days for each timestep
predictions = array(predictions)
test1 = test[:, :, -1]
YPred = argmax(predictions.reshape(predictions.shape[0], (predictions.shape[1]*predictions.shape[2])), out=None, axis=1)
YPred = YPred.reshape(YPred.shape[0], 1)
df = stack((YPred, test1))
df = df.transpose()
df = df.reshape(df.shape[1], 2)
DataFrame(df).to_csv('../Results/Files/2DConvLSTMAE_TL_'+str(exp_num)+'.csv')
cm = confusion_matrix(test1, YPred)
print(cm)
f1 = f1_score(test1, YPred, average='weighted')
acc = accuracy_score(test1, YPred)
print(classification_report(test1, YPred))
return f1, acc, totTime
#function to evaluate the model
def evaluate_model_tl(train, test, n_input, n_length, batch_size, lr, source, exp_num, epochs, model_name, data_percent, n_layers, n_out, batch_norm=False, plot=False):
#define number of subsequence time steps
n_steps = int(n_input/n_length)
# load pretrained model
model = load_model(model_name)
#prepare data
train_x, train_y = to_supervised(train, n_input, step_size=1, n_out=1, is_y=True)
#data percentage
train_ind = int(round(len(train_x)*(data_percent)))
train_x = train_x[-train_ind:, :]
train_y = train_y[-train_ind:, :]
# define parameters
n_features = train_x.shape[2]
# reshape into subsequences [samples, timesteps, rows, cols, channels]
train_x = train_x.reshape((train_x.shape[0], n_steps, 1, n_length, n_features))
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
train_y =to_categorical(train_y)
# fix the layers as indicated by the papameter 'layers'
for layer in model.layers[:-n_layers]:
layer.trainable = False
#check trainable status of individual layers
for layer in model.layers:
print(layer, layer.trainable)
opt = optimizers.Adam(lr=lr)
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer=opt)
#model.summary()
#record time
tic = time.time()
# fit network
history = model.fit(train_x, train_y, epochs=epochs, shuffle=False, batch_size=batch_size, verbose=2)
#record time
toc = time.time()
totTime = toc-tic
#model.load_weights('../Models/best_weights_TL.hdf5')
model.save('../Models/model_TL_'+str(exp_num)+'.h5')
if plot:
view_acc(history, str(exp_num))
history = [x for x in train]
# walk-forward validation over each timestep
predictions = list()
for i in range(len(test)):
# predict the timestep
yhat_sequence = forecast(model, history, n_steps, n_length, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next timestep
history.append(test[i, :])
# evaluate predictions days for each timestep
predictions = array(predictions)
test1 = test[:, :, -1]
YPred = argmax(predictions.reshape(predictions.shape[0], (predictions.shape[1]*predictions.shape[2])), out=None, axis=1)
YPred = YPred.reshape(YPred.shape[0], 1)
df = stack((YPred, test1))
df = df.transpose()
df = df.reshape(df.shape[1], 2)
DataFrame(df).to_csv('../Results/Files/2DConvLSTMAE_TL_'+str(exp_num)+'.csv')
cm = confusion_matrix(test1, YPred)
print(cm)
f1 = f1_score(test1, YPred, average='weighted')
acc = accuracy_score(test1, YPred)
print(classification_report(test1, YPred))
return f1, acc, totTime
def perform_experiment(resFile, file_name, n_test, model_, n_out, verbose, med, high):
#load experimental config from csv file
df_exp = | read_csv(resFile, header=0) | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.