prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
from numpy.core.numeric import _rollaxis_dispatcher
import pandas as pd
from pymbar import BAR as BAR_
from pymbar import MBAR as MBAR_
from alchemlyb.estimators import MBAR
from sklearn.base import BaseEstimator
import copy
import re
import itertools
import logging
logger = logging.getLogger(__name__)
class Estimators():
"""
Return Estimated binding free energy (dG).
Returns the dG between state A and state B using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
"""
def Zwanzig(dEs,steps):
"""
Return the estimated binding free energy using Zwanzig estimator.
Computes the binding free (dG) form molecular dynamics simulation
between state A and state B using Zwanzig estimator.
Parameters
----------
dEs : Pandas Dataframe
contains the reduced potentail (dE) between the states.
steps : interger
the number of the steps to be included in the calculation, set to "None" if all steps are needed.
Returns
---------
Zwanzig_df : Pandas Dataframe
contains the binding free energy (dG) between the states.
Examples
--------
>>> Zwanzig(dEs,None)
>>> Zwanzig(dEs,1000)
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:steps]/0.592))))
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))-pd.DataFrame(dGR[1:][::-1]))/2
for i in range(len(list(dG_average_raw.values))):
dG_Average.append(np.sum(dG_average_raw.values[:i+1]))
Zwanzig_df=pd.DataFrame.from_dict({"Lambda":Lambdas,"dG_Forward":dGF,"SUM_dG_Forward":dGF_sum,"dG_Reverse":dGR[::-1],"SUM_dG_Reverse":dGR_sum[::-1],"dG_Average":dG_Average})
Zwanzig_Final_dG = Zwanzig_df['dG_Average'].iloc[-1]
logger.info('Final DG computed from Zwanzig estimator: ' +str(Zwanzig_Final_dG))
return Zwanzig_df, Zwanzig_Final_dG
def Create_df_TI(State_A_df, State_B_df):
"""
create the input dataframe needed for the Thermodynamic Integration (TI) function.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
dU_dH_df : Pandas DataFrame
"""
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
return dU_dH_df
def TI(State_A_df,State_B_df,steps):
"""
Return the estimated binding free energy using Thermodynamic integration (TI) estimator.
Compute free energy differences between each state by integrating
dHdl across lambda values.
Parameters
----------
dHdl : Pandas DataFrame
----------
Returns
----------
delta_f_ : DataFrame
The estimated dimensionless free energy difference between each state.
d_delta_f_ : DataFrame
The estimated statistical uncertainty (one standard deviation) in
dimensionless free energy differences.
states_ : list
Lambda states for which free energy differences were obtained.
TI : float
The free energy difference between state 0 and state 1.
"""
if steps != None:
Energies_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('lambda',sort=False)['fep'].apply(list)),orient='index')
Energies_df=Energies_df.transpose()
Energies_df=Energies_df.iloc[:steps]
dfl=pd.DataFrame(columns=['lambda','fep'])
dU_dH_df=pd.DataFrame(columns=['lambda','fep'])
for state in range (len(Energies_df.columns)):
dfl=pd.DataFrame(columns=['lambda','fep'])
dfl['fep']=Energies_df.iloc[:,state]
dfl['lambda']=Energies_df.columns.values[state]
dU_dH_df=dU_dH_df.append(dfl)
else:
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
# dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"][:steps],"fep":State_B_df["Q_sum"][:steps] - State_A_df["Q_sum"][:steps] })).sort_values('lambda')
# dU_dH_df.reset_index(drop=True,inplace=True)
# dU_dH_df.index.names = ['time']
# dU_dH_df.set_index(['lambda'], append=True,inplace=True)
dHdl=dU_dH_df
# sort by state so that rows from same state are in contiguous blocks,
# and adjacent states are next to each other
dHdl = dHdl.sort_index(level=dHdl.index.names[1:])
# obtain the mean and variance of the mean for each state
# variance calculation assumes no correlation between points
# used to calculate mean
means = dHdl.mean(level=dHdl.index.names[1:])
variances = np.square(dHdl.sem(level=dHdl.index.names[1:]))
# get the lambda names
l_types = dHdl.index.names[1:]
# obtain vector of delta lambdas between each state
dl = means.reset_index()[means.index.names[:]].diff().iloc[1:].values
# apply trapezoid rule to obtain DF between each adjacent state
deltas = (dl * (means.iloc[:-1].values + means.iloc[1:].values)/2).sum(axis=1)
# build matrix of deltas between each state
adelta = np.zeros((len(deltas)+1, len(deltas)+1))
ad_delta = np.zeros_like(adelta)
for j in range(len(deltas)):
out = []
dout = []
for i in range(len(deltas) - j):
out.append(deltas[i] + deltas[i+1:i+j+1].sum())
# Define additional zero lambda
a = [0.0] * len(l_types)
# Define dl series' with additional zero lambda on the left and right
dll = np.insert(dl[i:i + j + 1], 0, [a], axis=0)
dlr = np.append(dl[i:i + j + 1], [a], axis=0)
# Get a series of the form: x1, x1 + x2, ..., x(n-1) + x(n), x(n)
dllr = dll + dlr
# Append deviation of free energy difference between state i and i+j+1
dout.append((dllr ** 2 * variances.iloc[i:i + j + 2].values / 4).sum(axis=1).sum())
adelta += np.diagflat(np.array(out), k=j+1)
ad_delta += np.diagflat(np.array(dout), k=j+1)
# yield standard delta_f_ free energies between each state
delta_f_ = pd.DataFrame(adelta - adelta.T,
columns=means.index.values,
index=means.index.values)
# yield standard deviation d_delta_f_ between each state
d_delta_f_ = pd.DataFrame(np.sqrt(ad_delta + ad_delta.T),
columns=variances.index.values,
index=variances.index.values)
states_ = means.index.values.tolist()
TI=( delta_f_.loc[0.00, 1.00])
return delta_f_ , TI
def Create_df_BAR_MBAR(State_A_df, State_B_df):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) and multistate Bennett Acceptance Ratio (MBAR) estimators.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
u_nk_df : Pandas DataFrame
"""
Energies_df=(pd.DataFrame({"State_A_Lambda":State_A_df["Lambda"],"State_A_G":State_A_df["Q_sum"] ,"State_B_Lambda":State_B_df["Lambda"],"State_B_G":State_B_df["Q_sum"],"E":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('State_A_Lambda')
State_A_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_A_Lambda',sort=False)['State_A_G'].apply(list)),orient='index')
State_A_Energies_df=State_A_Energies_df.transpose()
State_B_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_B_Lambda',sort=False)['State_B_G'].apply(list)),orient="index")
State_B_Energies_df=State_B_Energies_df.transpose()
lambdas_list_A=list(State_A_Energies_df.columns)
lambdas_list_B=list(State_B_Energies_df.columns)
time= [i for i in range(len(State_A_Energies_df))]
lambdas_df=[i for i in State_A_Energies_df.columns]
States={i:[] for i in range(len(lambdas_list_A))}
States_dicts={i:[] for i in range(len(lambdas_list_A))}
for i in range(len(State_A_Energies_df.columns)):
State_A_Energies=State_A_Energies_df.iloc[:,[i]]
State_A_Energies.columns=["0"]
State_A_Lambda_float=State_A_Energies_df.columns[i]
State_B_Energies=State_B_Energies_df.iloc[:,[i]]
State_B_Energies.columns=["0"]
State_B_Lambda_float=State_B_Energies_df.columns[i]
E0=State_A_Energies*State_A_Lambda_float+State_B_Energies*State_B_Lambda_float
for x in range(len(lambdas_list_A)):
E1=State_A_Energies*lambdas_list_A[x]+State_B_Energies*lambdas_list_B[x]
dE=E1-E0
dE=dE.values.tolist()
dE=list(itertools.chain(*dE))
States_dicts[i].append(dE)
for i in range(len(States_dicts)):
States[i]=list(itertools.chain(*States_dicts[i]))
u_nk_df=pd.DataFrame.from_dict(States)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df)
lambdas_df.sort()
u_nk_df['time']=time*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
return u_nk_df,States_dicts,State_A_Energies_df
def Create_df_dG_BAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) estimator and calculates the free energy.
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
BAR_dG : float
"""
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
BAR_df=BAR().fit(u_nk_df)
BAR_dG = BAR_df.delta_f_.loc[0.00, 1.00]
return BAR_dG
def Create_df_dG_MBAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the multistate Bennett Acceptance Ratio (MBAR) estimator and calculates the free energy..
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
MBAR_dG : float
"""
States_length=[]
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_length.append(len(States_dicts[x][i]))
if min(States_length)==max(States_length):
States_dicts2=copy.deepcopy(States_dicts)
else:
print("energy files dosen't have the same length",'min',min(States_length),'max',max(States_length))
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts[x][i]=States_dicts[x][i][:min(States_length)]
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
MBAR_df= MBAR().fit(u_nk_df)
MBAR_dG = MBAR_df.delta_f_.loc[0.00, 1.00]
return MBAR_dG
def Convergence(df1,df2,Estimator,StepsChunk_Int,ReplicatiesCount_Int,EnergyOutputInterval_Int):
# the last and first steps are not included in the reading
"""
Convergence analysis
Retrun a dateframe contains computed free energy dG at a differant steps intervals using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
Parameters
----------
df : Pandas DataFrame
Contains the dEs between the states
Estimator : funcation
The Free energy estimating method (Zwanzig or TI or BAR)
StepsChunk_Int: integer
The Number of Steps(fs) to be used.
ReplicatiesCount_Int: integer
The Number of used replicates.
EnergyOutputInterval_Int: integer
The interval which the molecular dynamics simulation softwear
is writing the energies at.
----------
Returns
----------
Convergence_df : Pandas DataFrame
Contains the computed dG at each interval.
Examples
--------
>>> Convergence(dEs,Zwanzig,1000,1,10)
>>> Convergence(dEs,TI,10000,3,10)
"""
if isinstance(df1, pd.DataFrame) and isinstance(df2, pd.DataFrame) :
dGs_Lst=[Estimator(df1,df2,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
elif isinstance(df2, pd.DataFrame) and not isinstance(df1, pd.DataFrame):
dGs_Lst=[Estimator(df1,df2,steps_limit) for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
else:
dGs_Lst=[Estimator(df1,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
#StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
Convergence_df=pd.DataFrame({'Number of Steps':StepsChunk_Lst, 'dG':dGs_Lst })
return Convergence_df
def Zwanzig_matrix_AI(dEs,steps):
"""
Development in Progress.
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:None]/0.592))))
Lambdas_F=[]
Lambdas_R=[]
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
Lambdas_F.append((re.split('_|-',dEs_df.index[-1])[0])+'_'+(re.split('_|-',dEs_df.index[-1])[0]))
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
Lambdas_R.append((re.split('_|-',dEs_df.index[i])[1])+'_'+(re.split('_|-',dEs_df.index[i])[3]))
Lambdas_F.append((re.split('_|-',dEs_df.index[i-1])[1])+'_'+(re.split('_|-',dEs_df.index[i-1])[3]))
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas_R.append((re.split('_|-',dEs_df.index[-1])[1])+'_'+(re.split('_|-',dEs_df.index[-1])[1]))
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=(( | pd.DataFrame(dGF[1:]) | pandas.DataFrame |
import string
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import tikzplotlib
import utils
import networkx as nx
import extensionanalysis as extanalysis
from plotfig import PlotFig
class EvaluationWar:
output_dir = "results/war/plots"
plot_color_dark = "#003f5c"
plot_color_less_dark = "#346888"
@staticmethod
def analyse_extensions(requested_extensions: dict):
chrome_extensions = requested_extensions["chrome"]
# General info
extensions = pd.DataFrame(chrome_extensions)
print(f"Number of distinct requested extensions {len(chrome_extensions)}")
chromecast_requests = extensions[extensions["id"].isin(extanalysis.chromecast_extension_ids)]
print(f"Requests for Chromecast:\n {chromecast_requests.to_string()}")
# By request count
by_request_count = extanalysis.analyse_extensions_by_request_count_clean(chrome_extensions, min_request_count_clean=14)
print("Extensions by request count\n", by_request_count[["title", "request_count_clean", "request_count"]].to_latex())
print("Extensions by request count (with IDs)\n", by_request_count[["title", "id"]].to_latex(index=False))
# Remove chromecast extensions from the list for plotting the diagram
by_request_count = by_request_count[~by_request_count["id"].isin(extanalysis.chromecast_extension_ids)]
with PlotFig(EvaluationWar.output_dir, "requested_extensions_by_request_count") as fig:
ax = by_request_count.plot(kind="barh", x="title", y=["request_count_clean", "request_count"], color=["#004c6d", "#7aa6c2"])
ax.update({"xlabel": "Number of Requests", "ylabel": "Extension Name"})
# By extension category
by_category = extanalysis.analyse_extensions_by_category(chrome_extensions)
print("Requested extensions grouped by category. Number of requests per category and Number of extensions")
print(by_category.to_string())
with PlotFig(EvaluationWar.output_dir, "requested_extensions_by_category") as fig:
ax = by_category.plot(kind="barh", y=["extension_count"], label=["Extensions"], color=EvaluationWar.plot_color_less_dark, edgecolor=EvaluationWar.plot_color_dark)
ax.update({"xlabel": "Number of extensions", "ylabel": "Extension category"})
ax.grid(axis="x", which="major", alpha=0.6, linestyle="--", color="#808080"),
# By category with requests
with PlotFig(EvaluationWar.output_dir, "requested_extensions_by_category_requests") as fig:
ax = by_category.plot(kind="barh", y=["request_count_clean"], label=["Requests (max. 1 per website)"], color=EvaluationWar.plot_color_less_dark, edgecolor=EvaluationWar.plot_color_dark)
ax.update({"xlabel": "Number of requests", "ylabel": "Extension category"})
ax.grid(axis="x", which="major", alpha=0.6, linestyle="--", color="#808080")
# By user count (ranges)
by_user_count = extanalysis.analyse_extensions_by_user_count(chrome_extensions)
print("Requested extensions grouped by user count:", by_user_count.to_string())
utils.draw_donut_chart(
df=by_user_count,
output_path=f"{EvaluationWar.output_dir}/requested_extensions_by_user_count.pdf",
legend_labels=["No data available", "0 - 100", "101 - 1000", "1001 - 10,000", "10,001 - 100,000", "100,001 - 1,000,000", "1,000,001+"],
descriptions={"ylabel": ""},
)
# By frequent keywords
frequent_keywords = extanalysis.analyse_extensions_by_keyword(chrome_extensions)
print("Most frequent keywords in extension titles \n", frequent_keywords)
with PlotFig(EvaluationWar.output_dir, "requested_extensions_by_frequent_keywords", tikz_axis_width=310) as fig:
ax = frequent_keywords.plot(kind="bar", x="Word", y=["Frequency"], color=EvaluationWar.plot_color_less_dark, edgecolor=EvaluationWar.plot_color_dark)
ax.update({"xlabel": "Keyword", "ylabel": "Number of extensions"})
ax.legend(labels=["Extensions"])
ax.grid(axis="y", which="major"),
plt.xticks(rotation=50, ha="right", fontsize="small")
# TODO: add "xticklabel style = {rotate=50,anchor=east,yshift=-2mm, font=\\small}")
@staticmethod
def analyse_extension_groups(requested_extension_groups: dict, war_websites: list):
results = extanalysis.analyse_extension_groups(requested_extension_groups, war_websites)
dfa = pd.DataFrame()
# Print the results
for i, result in enumerate(results):
df = pd.DataFrame(result["extensions"])
df = df[["title", "category", "user_count", "id"]]
df["count"] = result["count"]
df.insert(loc=0, column="group", value=string.ascii_uppercase[i])
df.fillna("-", inplace=True)
df.replace("?", "-", inplace=True)
df.replace(-1.0, "-", inplace=True)
df.replace(-1, "-", inplace=True)
df.replace("", "-", inplace=True)
dfa = | pd.concat([dfa, df]) | pandas.concat |
"""
Written by <NAME>, 22-10-2018
This script contains functions for data formatting and accuracy assessment of keras models
"""
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from math import sqrt
import numpy as np
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# model cost function
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
# scale and format observed data as train/test inputs/labels
def format_obs_data(full_data, n_lags, n_ahead, n_train):
# split datetime column into train and test for plots
train_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
values = full_data[['GWL', 'Tide', 'Precip.']].values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_fit = gwl_scaler.fit(gwl)
gwl_scaled = gwl_fit.transform(gwl)
tide_fit = tide_scaler.fit(tide)
tide_scaled = tide_fit.transform(tide)
rain_fit = rain_scaler.fit(rain)
rain_scaled = rain_fit.transform(rain)
# frame as supervised learning
gwl_super = series_to_supervised(gwl_scaled, n_lags, n_ahead)
gwl_super_values = gwl_super.values
tide_super = series_to_supervised(tide_scaled, n_lags, n_ahead)
tide_super_values = tide_super.values
rain_super = series_to_supervised(rain_scaled, n_lags, n_ahead)
rain_super_values = rain_super.values
# split groundwater into inputs and labels
gwl_input, gwl_labels = gwl_super_values[:, 0:n_lags+1], gwl_super_values[:, n_lags+1:]
# split into train and test sets
train_X = np.concatenate((gwl_input[:n_train, :], tide_super_values[:n_train, :], rain_super_values[:n_train, :]),
axis=1)
test_X = np.concatenate((gwl_input[n_train:, :], tide_super_values[n_train:, :], rain_super_values[n_train:, :]),
axis=1)
train_y, test_y = gwl_labels[:n_train, :], gwl_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_dates, test_dates, tide_fit, rain_fit, gwl_fit, train_X, test_X, train_y, test_y
# scale and format storm data as train/test inputs/labels
def format_storm_data(storm_data, n_train, tide_fit, rain_fit, gwl_fit):
# separate storm data into gwl, tide, and rain
storm_scaled = pd.DataFrame(storm_data["Datetime"])
for col in storm_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
storm_scaled[col] = col_scaled
# split storm data into inputs and labels
storm_values = storm_scaled[storm_scaled.columns[1:]].values
storm_input, storm_labels = storm_values[:, :-18], storm_values[:, -18:]
# split into train and test sets
train_X, test_X = storm_input[:n_train, :], storm_input[n_train:, :]
train_y, test_y = storm_labels[:n_train, :], storm_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_X, test_X, train_y, test_y
# scale and format forecast data as train/test inputs/labels
def format_fcst_data(fcst_data, tide_fit, rain_fit, gwl_fit):
# separate forecast data into gwl, tide, and rain
fcst_scaled = pd.DataFrame(fcst_data["Datetime"])
for col in fcst_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
fcst_scaled[col] = col_scaled
# split fcst data into inputs and labels
fcst_values = fcst_scaled[fcst_scaled.columns[1:]].values
fcst_input, fcst_labels = fcst_values[:, :-18], fcst_values[:, -18:]
# reshape fcst input to be 3D [samples, timesteps, features]
fcst_test_X = fcst_input.reshape((fcst_input.shape[0], 1, fcst_input.shape[1]))
print("forecast input data shape:", fcst_test_X.shape, "forecast label data shape:", fcst_labels.shape)
return fcst_test_X, fcst_labels
# create df of full observed data and predictions and extract storm data
def full_pred_df(test_dates, storm_data, n_lags, n_ahead, inv_y, inv_yhat):
dates_t1 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 1:-n_ahead + 2])
dates_t1 = dates_t1.reset_index(inplace=False, drop=True)
dates_9 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 9:-n_ahead + 10])
dates_9 = dates_9.reset_index(inplace=False, drop=True)
dates_18 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 18:])
dates_18 = dates_18.reset_index(inplace=False, drop=True)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["Obs. GWL t+1", "Pred. GWL t+1"])
df_t1 = pd.concat([df_t1, dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["Obs. GWL t+9", "Pred. GWL t+9"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = pd.DataFrame(df_t18, index=None, columns=["Obs. GWL t+18", "Pred. GWL t+18"])
df_t18 = pd.concat([df_t18, dates_18], axis=1)
df_t18 = df_t18.set_index("Datetime")
storm_dates_t1 = storm_data[['gwl(t+1)']]
storm_dates_t1.index = storm_dates_t1.index + pd.DateOffset(hours=1)
storm_dates_t9 = storm_data[['gwl(t+9)']]
storm_dates_t9.index = storm_dates_t9.index + pd.DateOffset(hours=9)
storm_dates_t18 = storm_data[['gwl(t+18)']]
storm_dates_t18.index = storm_dates_t18.index + pd.DateOffset(hours=18)
df_t1_storms = np.asarray(df_t1[df_t1.index.isin(storm_dates_t1.index)])
df_t9_storms = np.asarray(df_t9[df_t9.index.isin(storm_dates_t9.index)])
df_t18_storms = np.asarray(df_t18[df_t18.index.isin(storm_dates_t18.index)])
storms_list = [df_t1_storms, df_t9_storms, df_t18_storms]
return df_t1, df_t9, df_t18, storms_list
# create df of storm observed data and predictions
def storm_pred_df(storm_data, n_train, inv_y, inv_yhat):
test_dates_t1 = storm_data[['Datetime', 'tide(t+1)', 'rain(t+1)']].iloc[n_train:]
test_dates_t1 = test_dates_t1.reset_index(drop=True)
test_dates_t1['Datetime'] = pd.to_datetime(test_dates_t1['Datetime'])
test_dates_t1['Datetime'] = test_dates_t1['Datetime'] + | pd.DateOffset(hours=1) | pandas.DateOffset |
from tqdm import tqdm
import pandas as pd
import numpy as np
from pathlib import Path
from hashlib import md5
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import sparse as sp
import argparse
def break_text(raw):
return np.array([ i for i, t in enumerate(raw) if t == '¶' ][::2])
def main(args):
if args.output.exists():
if not args.overwrite():
raise FileExistsError(f"Output directory {args.output} exists.")
print(f"Output directory {args.output} exists. It will be overwritten.")
args.output.mkdir(exist_ok=True, parents=True)
ds_path = Path(args.dataset)
raw_text = {}
break_idx = {}
for fn in tqdm(list((ds_path / "en").glob("*.txt")), desc='Parsing text'):
fid = fn.name.split("_")[2]
raw = fn.read_text()
idx = break_text(raw)
break_idx[ fid ] = np.array(idx)
for i in range(len(idx)):
t = raw[idx[i]:] if i == len(idx)-1 else raw[idx[i]:idx[i+1]]
raw_text[f"{fid}_{i}"] = t.replace('¶', '').strip()
raw_text = | pd.Series(raw_text) | pandas.Series |
import requests
import pandas as pd
import time
import json
import pymysql
pd.set_option('max_rows',500)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
}
url = 'https://c.m.163.com/ug/api/wuhan/app/data/list-total' # 定义要访问的地址
r = requests.get(url, headers=headers) # 使用requests发起请求
data_json = json.loads(r.text)
data = data_json['data'] # 取出json中的数据
#在areaTree键值对中,存放着世界各地的实时数据,areaTree是一个列表,每一个元素都是一个国家的数据,每一个元素的children是各国家省份的数据。
data_province = data['areaTree'][2]['children'] # 取出中国各省的实时数据
info = pd.DataFrame(data_province)[['id', 'lastUpdateTime', 'name']]
# 获取today中的数据
today_data = pd.DataFrame([province['today'] for province in data_province])
today_data.columns = ['today_' + i for i in today_data.columns] # 由于today中键名和total键名相同,因此需要修改列名称
# 获取total中的数据
total_data = | pd.DataFrame([province['total'] for province in data_province]) | pandas.DataFrame |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
import numpy as np
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(df[price].rolling(n).mean(), name=name)
return out(SETTINGS, df, result)
def emaHelper(price, n, alphaIn=None):
"""
Algorithm by Stockchart
"""
length_of_df = len(price.axes[0])
initial_sma = price[0:n].mean()
ema = pd.Series(np.nan, index=range(0, length_of_df))
ema.iat[n-1] = initial_sma
if(not alphaIn):
alpha = (2.0/(n + 1.0))
else:
alpha = alphaIn
for i in range(n, length_of_df):
ema.iat[i] = price.iat[i]* alpha + (1-alpha)* ema.iat[i-1]
return ema
def EMA(df, n=5, price='Close'):
"""
Exponential Moving Average
"""
result = emaHelper(df, n)
return result
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
L = len(df['High'])
TR_l = [None]*L
for i in range(1, L):
TR = max(df['High'].iloc[i] - df['Low'].iloc[i], \
abs(df['High'].iloc[i] - df['Close'].iloc[i-1]), \
abs(df['Low'].iloc[i] - df['Close'].iloc[i-1]) )
TR_l[i] = TR
TR_s = pd.Series(TR_l[1::])
alpha = 1.0/n
result = emaHelper(TR_s, n, alpha)
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(df[price].rolling(n).mean())
MSD = pd.Series(df[price].rolling(n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(SOk.ewm(span=n, min_periods=n - 1).mean(), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = df[key].rolling(timeperiod, min_periods=timeperiod).mean()
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = df['Close'].ewm(span=n, min_periods=n - 1).mean()
EX2 = EX1.ewm(span=n, min_periods=n - 1).mean()
EX3 = EX2.ewm(span=n, min_periods=n - 1).mean()
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('High')]
DoMove = df.iat[i, df.columns.get_loc('Low')] - df.iat[i + 1, df.columns.get_loc('Low')]
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.iat[i + 1, df.columns.get_loc('High')], df.iat[i, df.columns.get_loc('Close')]) - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n - 1).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n - 1).mean() / ATR)
temp = abs(PosDI - NegDI) / (PosDI + NegDI)
result = pd.Series(temp.ewm(span=n_ADX, min_periods=n_ADX - 1).mean(), name='ADX_' + str(n) + '_' + str(n_ADX))
return out(SETTINGS, df, result)
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(df[price].ewm(span=n_fast, min_periods=n_slow - 1).mean())
EMAslow = pd.Series(df[price].ewm(span=n_slow, min_periods=n_slow - 1).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=8).mean(), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = Range.ewm(span=9, min_periods=8).mean()
EX2 = EX1.ewm(span=9, min_periods=8).mean()
Mass = EX1 / EX2
result = pd.Series(Mass.rolling(25).sum(), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.iat[i + 1, df.columns.get_loc('High')], df.iat[i, df.columns.get_loc('Close')]) - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('Low')]) - abs(df.iat[i + 1, df.columns.get_loc('Low')] - df.iat[i, df.columns.get_loc('High')])
VM.append(Range)
i = i + 1
result = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('High')]
DoMove = df.iat[i, df.columns.get_loc('Low')] - df.iat[i + 1, df.columns.get_loc('Low')]
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n - 1).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n - 1).mean())
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(M.ewm(span=r, min_periods=r - 1).mean())
aEMA1 = pd.Series(aM.ewm(span=r, min_periods=r - 1).mean())
EMA2 = pd.Series(EMA1.ewm(span=s, min_periods=s - 1).mean())
aEMA2 = pd.Series(aEMA1.ewm(span=s, min_periods=s - 1).mean())
result = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
return out(SETTINGS, df, result)
def ACCDIST(df, n):
"""
Accumulation/Distribution
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
result = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
return out(SETTINGS, df, result)
def Chaikin(df):
"""
Chaikin Oscillator
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
result = pd.Series(ad.ewm(span=3, min_periods=2).mean() - ad.ewm(span=10, min_periods=9).mean(), name='Chaikin')
return out(SETTINGS, df, result)
def MFI(df, n):
"""
Money Flow Index and Ratio
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < len(df) - 1: # df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.iat[i + 1, df.columns.get_loc('Volume')])
else:
PosMF.append(0)
i=i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
result = pd.Series(MFR.rolling(n).mean(), name='MFI_' + str(n))
return out(SETTINGS, df, result)
def OBV(df, n):
"""
On-balance Volume
"""
i = 0
OBV = [0]
while i < len(df) - 1: # df.index[-1]:
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] > 0:
OBV.append(df.iat[i + 1, df.columns.get_loc('Volume')])
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] == 0:
OBV.append(0)
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] < 0:
OBV.append(-df.iat[i + 1, df.columns.get_loc('Volume')])
i = i + 1
OBV = pd.Series(OBV)
result = pd.Series(OBV.rolling(n).mean(), name='OBV_' + str(n))
return out(SETTINGS, df, result)
def FORCE(df, n):
"""
Force Index
"""
result = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
return out(SETTINGS, df, result)
def EOM(df, n):
"""
Ease of Movement
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
result = pd.Series(EoM.rolling(n).mean(), name='EoM_' + str(n))
return out(SETTINGS, df, result)
def CCI(df, n):
"""
Commodity Channel Index
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
result = pd.Series((PP - PP.rolling(n).mean()) / PP.rolling(n).std(), name='CCI_' + str(n))
return out(SETTINGS, df, result)
def COPP(df, n):
"""
Coppock Curve
"""
M = df['Close'].diff(int(n * 11 / 10) - 1)
N = df['Close'].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df['Close'].diff(int(n * 14 / 10) - 1)
N = df['Close'].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
temp = ROC1 + ROC2
result = pd.Series(temp.ewm(span=n, min_periods=n).mean(), name='Copp_' + str(n))
return out(SETTINGS, df, result)
def KELCH(df, n):
"""
Keltner Channel
"""
temp = (df['High'] + df['Low'] + df['Close']) / 3
KelChM = pd.Series(temp.rolling(n).mean(), name='KelChM_' + str(n))
temp = (4 * df['High'] - 2 * df['Low'] + df['Close']) / 3
KelChU = pd.Series(temp.rolling(n).mean(), name='KelChU_' + str(n))
temp = (-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3
KelChD = pd.Series(temp.rolling(n).mean(), name='KelChD_' + str(n))
result = | pd.DataFrame([KelChM, KelChU, KelChD]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
import numpy as np
from alphaware.base import (Factor,
FactorContainer)
from alphaware.analyzer import FactorSimpleRank
from pandas.util.testing import assert_series_equal
from datetime import datetime as dt
class TestFactorSimpleRank(TestCase):
def test_factor_simple_rank_1(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
factor_test = Factor(data=data1, name='alpha1')
fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test])
t = FactorSimpleRank()
t.fit(fc)
calculate = t.transform(fc)['score']
index = pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['001', '002', '003']],
names=['trade_date', 'ticker'])
expected = pd.Series(index=index,
data=[0.0, 1.0, 2.0, 0.0, 1.0, 2.0], name='score')
assert_series_equal(calculate, expected)
def test_factor_simple_rank_2(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003']],
names=['trade_date', 'ticker'])
data1 = | pd.DataFrame(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pat_str(df_checks):
"""
Test output when names_pattern is a string,
and .value is present.
"""
result = (
df_checks.pivot_longer(
column_names="ht*",
names_to=(".value", "age"),
names_pattern="(.+)(.)",
sort_by_appearance=True,
)
.reindex(columns=["famid", "birth", "age", "ht"])
.astype({"age": int})
)
actual = pd.wide_to_long(
df_checks, stubnames="ht", i=["famid", "birth"], j="age"
).reset_index()
assert_frame_equal(result, actual)
def test_multiindex_column_level(df_multi):
"""
Test output from MultiIndex column,
when column_level is provided.
"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_multiindex(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
and there is no names_sep/names_pattern.
"""
result = df_multi.pivot_longer(index=[("name", "a")])
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
there is no names_sep/names_pattern,
and names_to is provided as a sequence.
"""
result = df_multi.pivot_longer(
index=[("name", "a")], names_to=["variable_0", "variable_1"]
)
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to_length_mismatch(df_multi):
"""
Raise error if the length of names_to does not
match the number of column levels.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_to=["variable_0", "variable_1", "variable_2"],
)
def test_multiindex_incomplete_level_names(df_multi):
"""
Raise error if not all the levels have names.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_multiindex_index_level_names_intersection(df_multi):
"""
Raise error if level names exist in index.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_no_column_names(df_checks):
"""
Test output if all the columns
are assigned to the index parameter.
"""
assert_frame_equal(
df_checks.pivot_longer(df_checks.columns).rename_axis(columns=None),
df_checks,
)
@pytest.fixture
def test_df():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
)
def test_names_pattern_str(test_df):
"""Test output for names_pattern and .value."""
result = test_df.pivot_longer(
column_names="*_*",
names_to=["set", ".value"],
names_pattern="(.+)_(.+)",
sort_by_appearance=True,
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=r".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_sep(test_df):
"""Test output for names_sep and .value."""
result = test_df.pivot_longer(
names_to=["set", ".value"], names_sep="_", sort_by_appearance=True
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pattern_list():
"""Test output for names_pattern if list/tuple."""
df = pd.DataFrame(
{
"Activity": ["P1", "P2"],
"General": ["AA", "BB"],
"m1": ["A1", "B1"],
"t1": ["TA1", "TB1"],
"m2": ["A2", "B2"],
"t2": ["TA2", "TB2"],
"m3": ["A3", "B3"],
"t3": ["TA3", "TB3"],
}
)
result = df.pivot_longer(
index=["Activity", "General"],
names_pattern=["^m", "^t"],
names_to=["M", "Task"],
sort_by_appearance=True,
).loc[:, ["Activity", "General", "Task", "M"]]
actual = (
pd.wide_to_long(
df, i=["Activity", "General"], stubnames=["t", "m"], j="number"
)
.set_axis(["Task", "M"], axis="columns")
.droplevel(-1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def not_dot_value():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012": [48.1, 46.4, 44.3],
"floor_2012": [45.4, 41.6, 40.8],
"vault_2016": [46.9, 45.7, 44.3],
"floor_2016": [46.0, 42.0, 42.1],
}
)
def test_not_dot_value_sep(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep2(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to="event",
names_sep="/",
values_to="score",
)
actual = not_dot_value.melt(
"country", var_name="event", value_name="score"
)
assert_frame_equal(result, actual)
def test_not_dot_value_pattern(not_dot_value):
"""Test output when names_pattern is a string and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_pattern=r"(.+)_(.+)",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep_single_column(not_dot_value):
"""
Test output when names_sep and no dot_value
for a single column.
"""
A = not_dot_value.loc[:, ["country", "vault_2012"]]
result = A.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = A.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_multiple_dot_value():
"""Test output for multiple .value."""
df = pd.DataFrame(
{
"x_1_mean": [1, 2, 3, 4],
"x_2_mean": [1, 1, 0, 0],
"x_1_sd": [0, 1, 1, 1],
"x_2_sd": [0.739, 0.219, 1.46, 0.918],
"y_1_mean": [1, 2, 3, 4],
"y_2_mean": [1, 1, 0, 0],
"y_1_sd": [0, 1, 1, 1],
"y_2_sd": [-0.525, 0.623, -0.705, 0.662],
"unit": [1, 2, 3, 4],
}
)
result = df.pivot_longer(
index="unit",
names_to=(".value", "time", ".value"),
names_pattern=r"(x|y)_([0-9])(_mean|_sd)",
).astype({"time": int})
actual = df.set_index("unit")
cols = [ent.split("_") for ent in actual.columns]
actual.columns = [f"{start}_{end}{middle}" for start, middle, end in cols]
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["x_mean", "y_mean", "x_sd", "y_sd"],
i="unit",
j="time",
)
.sort_index(axis=1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def single_val():
"""fixture dataframe"""
return pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
}
)
def test_multiple_dot_value2(single_val):
"""Test output for multiple .value."""
result = single_val.pivot_longer(
index="id", names_to=(".value", ".value"), names_pattern="(.)(.)"
)
assert_frame_equal(result, single_val)
def test_names_pattern_sequence_single_unique_column(single_val):
"""
Test output if names_pattern is a sequence of length 1.
"""
result = single_val.pivot_longer(
"id", names_to=["x"], names_pattern=("x",)
)
actual = (
pd.wide_to_long(single_val, ["x"], i="id", j="num")
.droplevel("num")
.reset_index()
)
assert_frame_equal(result, actual)
def test_names_pattern_single_column(single_val):
"""
Test output if names_to is only '.value'.
"""
result = single_val.pivot_longer(
"id", names_to=".value", names_pattern="(.)."
)
actual = (
pd.wide_to_long(single_val, ["x"], i="id", j="num")
.droplevel("num")
.reset_index()
)
| assert_frame_equal(result, actual) | pandas.testing.assert_frame_equal |
# Importing packages
import pandas as pd
def reformatData(df, feat, hasCombinations=True):
"""
Reformats data from stacked data to pandas dataframes.
"""
# Initializing variables
new_data = list()
new_index = list()
# Stacking data
stacked = df.T.stack(level=0)
# Iteratting over index on stacked data one for the type of data and other
# for the combination. This will slice the data and generate new indexes.
for name,combination in stacked.index:
new_index.append("{0}_{1}".format(name,combination))
new_data.append(stacked[name][combination])
# Creating serie
reformatted = | pd.Series(data=new_data, index=new_index, name=feat) | pandas.Series |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from collections.abc import Iterable
from datetime import datetime, timedelta
from operator import attrgetter
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.detector_consts import (
AnomalyResponse,
ChangePointInterval,
ConfidenceBand,
PercentageChange,
SingleSpike,
)
from parameterized import parameterized
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class SingleSpikeTest(TestCase):
def test_spike(self) -> None:
spike_time_str = "2020-03-01"
spike_time = datetime.strptime(spike_time_str, "%Y-%m-%d")
spike = SingleSpike(time=spike_time, value=1.0, n_sigma=3.0)
self.assertEqual(spike.time_str, spike_time_str)
class UnivariateChangePointIntervalTest(TestCase):
# test for univariate time series
def setUp(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
self.previous_seq = [date_start + timedelta(days=x) for x in range(15)]
self.current_length = 10
current_seq = [
self.previous_seq[10] + timedelta(days=x)
for x in range(self.current_length)
]
self.previous_values = np.random.randn(len(self.previous_seq))
current_values = np.random.randn(len(current_seq))
# add a very large value to detect spikes
current_values[0] = 100.0
previous = TimeSeriesData(
pd.DataFrame({"time": self.previous_seq, "value": self.previous_values})
)
current = TimeSeriesData(
pd.DataFrame({"time": current_seq, "value": current_values})
)
self.current_mean = np.mean(current_values)
self.current_variance = np.var(current_values)
previous_extend = TimeSeriesData(
pd.DataFrame(
{"time": self.previous_seq[9:], "value": self.previous_values[9:]}
)
)
prev_start = self.previous_seq[0]
prev_end = self.previous_seq[9]
self.current_start = current_seq[0]
self.current_start_time_str = datetime.strftime(self.current_start, "%Y-%m-%d")
self.current_end = current_seq[-1] + timedelta(days=1)
self.current_end_time_str = datetime.strftime(self.current_end, "%Y-%m-%d")
self.previous_int = ChangePointInterval(prev_start, prev_end)
self.previous_int.data = previous
# construct interval to test whether data is clipped property to start and end dates
self.previous_int_test = ChangePointInterval(prev_start, prev_end)
self.previous_int_test.data = previous
# test extending the data
# now the data is extended to include the whole sequence
self.previous_int.end_time = self.previous_seq[-1] + timedelta(days=1)
self.previous_int.extend_data(previous_extend)
self.current_int = ChangePointInterval(self.current_start, self.current_end)
self.current_int.data = current
self.current_int.previous_interval = self.previous_int
self.spike_list = self.current_int.spikes
def test_start_end_date(self) -> None:
# tests whether data is clipped property to start and end dates
np.testing.assert_array_equal(
self.previous_values[0:9], self.previous_int_test.data
)
def test_interval_seq_length(self) -> None:
self.assertEqual(len(self.previous_int), len(self.previous_seq))
# pyre-ignore Undefined attribute [16]: Module parameterized.parameterized has no attribute expand.
@parameterized.expand(
[
["start_time", "current_start"],
["end_time", "current_end"],
["start_time_str", "current_start_time_str"],
["end_time_str", "current_end_time_str"],
["mean_val", "current_mean"],
["variance_val", "current_variance"],
["previous_interval", "previous_int"],
]
)
# check all the properties
def test_properties(self, attribute, initial_object) -> None:
self.assertEqual(
attrgetter(attribute)(self.current_int), attrgetter(initial_object)(self)
)
def test_length(self) -> None:
self.assertEqual(len(self.current_int), self.current_length)
# check spike detection
def test_spike_start_value(self) -> None:
self.assertEqual(self.spike_list[0].value, 100.0)
def test_spike_start_time_str(self) -> None:
self.assertEqual(
self.spike_list[0].time_str,
self.current_start_time_str,
)
class MultivariateChangePointIntervalTest(TestCase):
# test for multivariate time series
def setUp(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
self.previous_seq = [date_start + timedelta(days=x) for x in range(15)]
self.current_length = 10
current_seq = [
self.previous_seq[10] + timedelta(days=x)
for x in range(self.current_length)
]
self.num_seq = 5
self.previous_values = [
np.random.randn(len(self.previous_seq)) for _ in range(self.num_seq)
]
current_values = [
np.random.randn(len(current_seq)) for _ in range(self.num_seq)
]
self.current_values_mean = [
np.mean(current_values[i]) for i in range(self.num_seq)
]
self.current_values_variance = [
np.var(current_values[i]) for i in range(self.num_seq)
]
# add a very large value to detect spikes
for i in range(self.num_seq):
current_values[i][0] = 100 * (i + 1)
previous = TimeSeriesData(
pd.DataFrame(
{
**{"time": self.previous_seq},
**{
f"value_{i}": self.previous_values[i]
for i in range(self.num_seq)
},
}
)
)
current = TimeSeriesData(
pd.DataFrame(
{
**{"time": current_seq},
**{f"value_{i}": current_values[i] for i in range(self.num_seq)},
}
)
)
previous_extend = TimeSeriesData(
pd.DataFrame(
{
**{"time": self.previous_seq[9:]},
**{
f"value_{i}": self.previous_values[i][9:]
for i in range(self.num_seq)
},
}
)
)
prev_start = self.previous_seq[0]
prev_end = self.previous_seq[9]
# `current_start`.
self.current_start = current_seq[0]
self.current_start_date_str = datetime.strftime(self.current_start, "%Y-%m-%d")
self.current_end = current_seq[-1] + timedelta(days=1)
self.current_end_date_str = datetime.strftime(self.current_end, "%Y-%m-%d")
self.previous_int = ChangePointInterval(prev_start, prev_end)
self.previous_int.data = previous
# now the data is extended to include the whole sequence except the last point
self.previous_int.end_time = self.previous_seq[-1] # + timedelta(days=1)
self.previous_int.extend_data(previous_extend)
# construct data to test if clipped properly to start and end dates
self.previous_int1 = ChangePointInterval(prev_start, prev_end)
self.previous_int1.data = previous
# let's repeat this except without truncating the final point
self.previous_int2 = ChangePointInterval(prev_start, prev_end)
self.previous_int2.data = previous
self.previous_int2.end_time = self.previous_seq[-1] + timedelta(days=1)
self.previous_int2.extend_data(previous_extend)
# let's extend the date range so it's longer than the data
# this should not change the results
self.previous_int3 = ChangePointInterval(prev_start, prev_end)
self.previous_int3.data = previous
self.previous_int3.end_time = self.previous_seq[-1] + timedelta(days=2)
self.previous_int3.extend_data(previous_extend)
# let's construct the current ChangePointInterval
self.current_int = ChangePointInterval(self.current_start, self.current_end)
self.current_int.data = current
self.current_int.previous_interval = self.previous_int
self.current_int_length = len(self.current_int)
# spike detection
self.spike_array = self.current_int.spikes
def test_start_end_date(self) -> None:
# tests whether data is clipped properly to start and end dates
for i in range(self.num_seq):
self.assertEqual(
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
self.previous_int1.data[:, i].tolist(),
self.previous_values[i][0:9].tolist(),
)
def test_extend_length_except_last_point(self) -> None:
# test extending the data to include the whole sequence except the last point
self.assertEqual(len(self.previous_int) + 1, len(self.previous_seq))
# pyre-ignore Undefined attribute [16]: Module parameterized.parameterized has no attribute expand.
@parameterized.expand(
[
["previous_int2"],
["previous_int3"],
]
)
def test_extend_length(self, attribute) -> None:
self.assertEqual(len(attrgetter(attribute)(self)), len(self.previous_seq))
# pyre-ignore Undefined attribute [16]: Module parameterized.parameterized has no attribute expand.
@parameterized.expand(
[
["start_time", "current_start"],
["end_time", "current_end"],
["num_series", "num_seq"],
["start_time_str", "current_start_date_str"],
["end_time_str", "current_end_date_str"],
["previous_interval", "previous_int"],
]
)
def check_current_int_properties(self, attribute, initial_object) -> None:
# check all the properties
self.assertEqual(
attrgetter(attribute)(self.current_int), attrgetter(initial_object)(self)
)
# pyre-ignore Undefined attribute [16]: Module parameterized.parameterized has no attribute expand.
@parameterized.expand(
[
["mean_val", "current_values_mean"],
["variance_val", "current_values_variance"],
]
)
def check_current_int_mean_var(self, attribute, initial_object) -> None:
self.assertEqual(
attrgetter(attribute)(self.current_int).tolist(),
attrgetter(initial_object)(self),
)
# pyre-ignore Undefined attribute [16]: Module parameterized.parameterized has no attribute expand.
@parameterized.expand(
[["current_int", "current_length"], ["spike_array", "num_seq"]]
)
def check_length(self, attribute, initial_object) -> None:
self.assertEqual(
len(attrgetter(attribute)(self)), attrgetter(initial_object)(self)
)
def check_spike_array_value(self) -> None:
for i in range(self.num_seq):
self.assertEqual(self.spike_array[i][0].value, 100 * (i + 1))
def check_spike_array_time_str(self) -> None:
for i in range(self.num_seq):
self.assertEqual(
self.spike_array[i][0].time_str,
self.current_start_date_str,
)
class UnivariatePercentageChangeTest(TestCase):
# test for univariate time series
def setUp(self):
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(30)]
current_length = 31
# offset one to make the new interval start one day after the previous one ends
current_seq = [
previous_seq[-1] + timedelta(days=(x + 1)) for x in range(current_length)
]
previous_values = 1.0 + 0.25 * np.random.randn(len(previous_seq))
current_values = 10.0 + 0.25 * np.random.randn(len(current_seq))
previous = TimeSeriesData(
| pd.DataFrame({"time": previous_seq, "value": previous_values}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = | pd.period_range('2011-01', freq='M', periods=5) | pandas.period_range |
from nemosis import data_fetch_methods, defaults
import pandas as pd
aemo_price_names = {'energy': 'RRP',
'raise_regulation': 'RAISEREGRRP',
'raise_6_second': 'RAISE6SECRRP',
'raise_60_second': 'RAISE60SECRRP',
'raise_5_minute': 'RAISE5MINRRP'}
def get_model_training_data(start_time, end_time, region, raw_data_cache):
price_data = get_regional_prices(start_time, end_time, raw_data_cache)
price_data = price_data.loc[:, ['SETTLEMENTDATE', '{}-energy'.format(region)]]
demand_data = get_residual_demand(start_time, end_time, raw_data_cache)
historical_data = pd.merge(price_data, demand_data, on='SETTLEMENTDATE')
historical_data = historical_data.reset_index(drop=True)
historical_data['interval'] = historical_data.index
historical_data['hour'] = historical_data['SETTLEMENTDATE'].dt.hour
historical_data = historical_data.drop(columns=['SETTLEMENTDATE'])
return historical_data
def get_forward_data_for_forecast(start_time, end_time, raw_data_cache):
demand_data = get_residual_demand(start_time, end_time, raw_data_cache)
demand_data = demand_data.sort_values('SETTLEMENTDATE')
demand_data = demand_data.reset_index(drop=True)
forward_data = demand_data.copy()
forward_data['interval'] = demand_data.index
forward_data['hour'] = forward_data['SETTLEMENTDATE'].dt.hour
forward_data = forward_data.drop(columns=['SETTLEMENTDATE'])
return forward_data
def get_regional_prices(start_time, end_time, raw_data_cache):
dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHPRICE', raw_data_cache,
select_columns=['SETTLEMENTDATE', 'INTERVENTION',
'REGIONID', 'RRP', 'RAISEREGRRP',
'RAISE6SECRRP', 'RAISE60SECRRP',
'RAISE5MINRRP'])
dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]
data = pd.DataFrame()
for name, aemo_name in aemo_price_names.items():
dispatch_data[aemo_name] = pd.to_numeric(dispatch_data[aemo_name])
data_temp = dispatch_data.pivot_table(values=aemo_name, index='SETTLEMENTDATE', columns='REGIONID')
data_temp = data_temp.reset_index().fillna('0.0')
data_temp = data_temp.rename(columns={'QLD1': 'qld', 'NSW1': 'nsw', 'VIC1': 'vic', 'SA1': 'sa', 'TAS1': 'tas'})
data_temp.columns = [col + '-' + name if col != 'SETTLEMENTDATE' else col for col in data_temp.columns]
if data.empty:
data = data_temp
else:
data = pd.merge(data, data_temp, on=['SETTLEMENTDATE'])
return data
def get_regional_demand(start_time, end_time, raw_data_cache):
dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHREGIONSUM', raw_data_cache,
select_columns=['SETTLEMENTDATE', 'INTERVENTION',
'REGIONID', 'TOTALDEMAND'])
dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]
dispatch_data['TOTALDEMAND'] = pd.to_numeric(dispatch_data['TOTALDEMAND'])
dispatch_data = dispatch_data.pivot_table(values='TOTALDEMAND', index='SETTLEMENTDATE', columns='REGIONID')
dispatch_data = dispatch_data.reset_index().fillna('0.0')
dispatch_data = dispatch_data.rename(columns={'QLD1': 'qld', 'NSW1': 'nsw', 'VIC1': 'vic', 'SA1': 'sa',
'TAS1': 'tas'})
dispatch_data.columns = [col + '-demand' if col != 'SETTLEMENTDATE' else col for col in dispatch_data.columns]
return dispatch_data
def get_duid_techs(raw_data_cache):
cols = ['DUID', 'Region', 'Fuel Source - Descriptor', 'Technology Type - Descriptor']
tech_data = data_fetch_methods.static_table_xl('Generators and Scheduled Loads', raw_data_cache, select_columns=cols)
def tech_classifier(fuel_source, technology_type):
category = fuel_source
if technology_type == 'Hydro - Gravity':
category = 'Hydro'
elif technology_type == 'Open Cycle Gas turbines (OCGT)':
category = 'OCGT'
elif technology_type == 'Combined Cycle Gas Turbine (CCGT)':
category = 'CCGT'
elif technology_type == 'Run of River' or fuel_source == 'Solar' or fuel_source == 'Wind' or fuel_source == 'Solar ':
category = 'ZEROSRMC'
elif technology_type == 'Spark Ignition Reciprocating Engine':
category = 'Engine'
elif technology_type == 'Compression Reciprocating Engine':
category = 'Engine'
elif technology_type == 'Steam Sub-Critical' and (fuel_source == 'Natural Gas / Fuel Oil' or fuel_source == 'Natural Gas'):
category = 'Gas Thermal'
elif technology_type == 'Pump Storage' or technology_type == 'Battery':
category = 'Storage'
return category
tech_data['TECH'] = tech_data.apply(lambda x: tech_classifier(x['Fuel Source - Descriptor'],
x['Technology Type - Descriptor']),
axis=1)
return tech_data.loc[:, ['DUID', 'Region', 'TECH']]
def get_tech_operating_capacities(start_time, end_time, raw_data_cache):
tech_data = get_duid_techs(raw_data_cache)
dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,
select_columns=['DUID', 'SETTLEMENTDATE',
'INTERVENTION', 'AVAILABILITY'])
dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]
dispatch_data = pd.merge(dispatch_data, tech_data, on='DUID')
dispatch_data['AVAILABILITY'] = pd.to_numeric(dispatch_data['AVAILABILITY'])
dispatch_data = dispatch_data.groupby(['TECH', 'SETTLEMENTDATE'], as_index=False).aggregate({'AVAILABILITY': 'sum'})
dispatch_data['tech_region'] = dispatch_data['TECH'] + '-capacity'
dispatch_data = dispatch_data.pivot_table(values='AVAILABILITY', index='SETTLEMENTDATE', columns='tech_region')
dispatch_data = dispatch_data.reset_index().fillna('0.0')
return dispatch_data
def get_fleet_dispatch(start_time, end_time, fleet_units, region, raw_data_cache):
dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,
select_columns=['DUID', 'SETTLEMENTDATE', 'TOTALCLEARED',
'INTERVENTION', 'RAISEREG', 'RAISE6SEC',
'RAISE60SEC', 'RAISE5MIN'])
dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]
dispatch_data = dispatch_data[dispatch_data['DUID'].isin(fleet_units)]
dispatch_data['TOTALCLEARED'] = pd.to_numeric(dispatch_data['TOTALCLEARED'])
dispatch_data['RAISEREG'] = pd.to_numeric(dispatch_data['RAISEREG'])
dispatch_data['RAISE6SEC'] = pd.to_numeric(dispatch_data['RAISE6SEC'])
dispatch_data['RAISE60SEC'] = pd.to_numeric(dispatch_data['RAISE60SEC'])
dispatch_data['RAISE5MIN'] = pd.to_numeric(dispatch_data['RAISE5MIN'])
dispatch_data = dispatch_data.groupby('SETTLEMENTDATE', as_index=False).aggregate(
{'TOTALCLEARED': 'sum', 'RAISEREG': 'sum', 'RAISE6SEC': 'sum', 'RAISE60SEC': 'sum', 'RAISE5MIN': 'sum'})
aemo_dispatch_names = {'TOTALCLEARED': region + '-energy-fleet-dispatch',
'RAISEREG': region + '-raise_regulation-fleet-dispatch',
'RAISE6SEC': region + '-raise_6_second-fleet-dispatch',
'RAISE60SEC': region + '-raise_60_second-fleet-dispatch',
'RAISE5MIN': region + '-raise_5_minute-fleet-dispatch'}
dispatch_data = dispatch_data.rename(columns=aemo_dispatch_names)
return dispatch_data
def get_unit_dispatch(start_time, end_time, unit, raw_data_cache):
dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,
select_columns=['DUID', 'SETTLEMENTDATE', 'INTERVENTION',
'INITIALMW'])
dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]
dispatch_data = dispatch_data[dispatch_data['DUID'] == unit]
initial_mw = dispatch_data['INITIALMW'].iloc[0]
return float(initial_mw)
def get_residual_demand(start_time, end_time, raw_data_cache):
cols = ['DUID', 'Region', 'Fuel Source - Descriptor']
tech_data = data_fetch_methods.static_table_xl('Generators and Scheduled Loads', raw_data_cache, select_columns=cols)
zero_srmc_techs = ['Wind', 'Solar', 'Solar ']
tech_data = tech_data[tech_data['Fuel Source - Descriptor'].isin(zero_srmc_techs)]
scada_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCH_UNIT_SCADA', raw_data_cache)
scada_data = pd.merge(scada_data, tech_data, on='DUID')
scada_data['SCADAVALUE'] = pd.to_numeric(scada_data['SCADAVALUE'])
scada_data = scada_data.groupby(['SETTLEMENTDATE', 'Region'], as_index=False).agg({'SCADAVALUE': 'sum'})
regional_demand = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHREGIONSUM', raw_data_cache)
regional_demand = regional_demand[regional_demand['INTERVENTION'] == 0]
regional_demand = pd.merge(regional_demand, scada_data, left_on=['SETTLEMENTDATE', 'REGIONID'],
right_on=['SETTLEMENTDATE', 'Region'])
regional_demand['TOTALDEMAND'] = | pd.to_numeric(regional_demand['TOTALDEMAND']) | pandas.to_numeric |
import pandas as pd
class RawReader:
"""
Reads and consumes raw data files (stored as raw.jsonl) from the Music Enabled Running project.
The state can be updated by feeding it additional lines (msg) from the data file.
You can then extract the data of the different sensors and modalities as Pandas dataframes.
"""
def __init__(self):
self.footpods = []
self.footpods_sc = []
self.phone_activity = []
self.phone_motion = []
self.music = []
self.phone_location = []
self.t_range = []
def update_with(self, msg):
"""
Updates the class state with a new message from the raw data file.
Parameters
----------
msg : dict
The dictionary representing the data message from the raw file.
Note, the raw JSON line must be first converted to a dict.
"""
if "t" in msg:
t = msg["t"]
self.t_range = [t, t] if self.t_range == [] else [self.t_range[0], t]
if msg["type"] == "iPhone-pedo":
self.phone_activity.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"activity": msg["pedo"]["activity"],
"speed": 0
if msg["pedo"]["pace"] == 0
else 1.0 / msg["pedo"]["pace"],
"step": msg["pedo"]["step"],
"cadence": msg["pedo"]["cadence"] * 60,
"floors_ascended": msg["pedo"]["floorsAscended"]
if "floorsAscended" in msg["pedo"]
else 0,
"floors_descended": msg["pedo"]["floorsDescended"]
if "floorsDescended" in msg["pedo"]
else 0,
}
)
if msg["type"] == "iPhone-motion":
gx = msg["motion"]["ag"][0] - msg["motion"]["a"][0]
gy = msg["motion"]["ag"][1] - msg["motion"]["a"][1]
gz = msg["motion"]["ag"][2] - msg["motion"]["a"][2]
a_vert = (
msg["motion"]["a"][0] * gx
+ msg["motion"]["a"][1] * gy
+ msg["motion"]["a"][2] * gz
)
self.phone_motion.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"ax": msg["motion"]["a"][0],
"ay": msg["motion"]["a"][1],
"az": msg["motion"]["a"][2],
"gx": gx,
"gy": gy,
"gz": gz,
"a_vert": a_vert,
"rx": msg["motion"]["r"][0],
"ry": msg["motion"]["r"][1],
"rz": msg["motion"]["r"][2],
}
)
if msg["type"] == "iPhone-location":
self.phone_location.append(
{
"t": pd.Timestamp(msg["location"]["timestamp"], unit="s"),
"lon": msg["location"]["coordinate"]["lon"],
"lat": msg["location"]["coordinate"]["lat"],
"lonlat_acc": msg["location"]["coordinate"]["acc"],
"alt": msg["location"]["altitude"]["val"],
"alt_acc": msg["location"]["altitude"]["acc"],
"course": msg["location"]["course"]["val"],
"course_acc": msg["location"]["course"]["acc"],
"speed": msg["location"]["speed"]["val"],
"speed_acc": msg["location"]["speed"]["acc"],
}
)
if msg["type"] == "RunScribe-speedcadence":
self.footpods_sc.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"foot": msg["runscribe"]["foot"],
"cadence": msg["rsc"]["cadence"],
"speed": msg["rsc"]["speed"],
}
)
if msg["type"] == "RunScribe-metrics":
self.footpods.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"foot": msg["runscribe"]["foot"],
"pronation": msg["metrics"]["pronation"],
"braking": msg["metrics"]["braking"],
"impact": msg["metrics"]["impact"],
"contact_time": msg["metrics"]["contactTime"],
"flight_ratio": msg["metrics"]["flightRatio"],
"strike": msg["metrics"]["strikeType"],
"power": msg["metrics"]["power"],
}
)
if msg["type"] == "Spotify" and "playstate" in msg:
if "name" in msg["playstate"]:
split_track = msg["playstate"]["name"].split("-", 2)
msg["playstate"]["artist"] = split_track[0].strip()
msg["playstate"]["track"] = split_track[1].strip()
self.music.append(
{
"t": | pd.Timestamp(msg["t"], unit="s") | pandas.Timestamp |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
| MultiIndex.from_tuples([]) | pandas.MultiIndex.from_tuples |
from contextlib import contextmanager
import pandas as pd
from dataviper.logger import IndentLogger
from dataviper.report.profile import Profile
from dataviper.source.datasource import DataSource
import pymysql
class MySQL(DataSource):
"""
class MySQL is a connection provider for MySQL
and query builder as well.
"""
def __init__(self, config={}, sigfig=4, logger=IndentLogger()):
self.config = config
self.sigfig = sigfig
self.logger = logger
@contextmanager
def connect(self, config=None):
config = config if config is not None else self.config
self.__conn = pymysql.connect(**config)
try:
yield
finally:
self.__conn.close()
def get_schema(self, table_name):
self.logger.enter("START: get_schema")
query = self.__get_schema_query(table_name)
schema_df = pd.read_sql(query, self.__conn)
schema_df = schema_df[['column_name', 'data_type']].set_index('column_name')
schema_df.index = schema_df.index.str.lower()
profile = Profile(table_name, schema_df)
profile = self.count_total(profile)
self.logger.exit("DONE: get_schema")
return profile
def count_total(self, profile):
self.logger.enter("START: count_total")
query = "SELECT COUNT(*) AS total FROM {}".format(profile.table_name)
df = | pd.read_sql(query, self.__conn) | pandas.read_sql |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = | pd.Timestamp('2020-09-04', tz='utc') | pandas.Timestamp |
import time
import pandas as pd
from googlegeocoder import GoogleGeocoder
geocoder = GoogleGeocoder()
def geocode(row):
"""
Accepts a row from our fatalities list. Returns it with geocoded coordinates.
"""
# If it's already been geocoded, it's already mapped and just return the row.
if hasattr(row, 'geocoder_x') and not | pd.isnull(row.geocoder_x) | pandas.isnull |
import numpy as np
import pandas as pd
import pdb
from dku_data_processing.filtering import filter_dataframe
def generate_sample_df():
data = {
'id': {0: 2539, 1: 2595, 2: 3647},
'name': {0: 'Clean & quiet apt home by the park', 1: 'Skylit Midtown Castle', 2: 'THE VILLAGE OF HARLEM....NEW YORK !'},
'host_id': {0: 2787, 1: 2845, 2: 4632},
'host_name': {0: 'John', 1: 'Jennifer', 2: 'Elisabeth'},
'neighbourhood_group': {0: 'Brooklyn', 1: 'Manhattan', 2: 'Manhattan'},
'neighbourhood': {0: 'Kensington', 1: 'Midtown', 2: 'Harlem'},
'latitude': {0: 40.647490000000005, 1: 40.75362, 2: 40.809020000000004},
'longitude': {0: -73.97237, 1: -73.98376999999999, 2: -73.9419},
'room_type': {0: 'Private room', 1: 'Entire home/apt', 2: 'Private room'},
'price': {0: 149, 1: 225, 2: 150},
'minimum_nights': {0: 1, 1: 1, 2: 3},
'number_of_reviews': {0: 9, 1: 45, 2: 0},
'last_review': {0: '2018-10-19', 1: '2019-05-21', 2: np.nan},
'reviews_per_month': {0: 0.21, 1: 0.38, 2: np.nan},
'calculated_host_listings_count': {0: 6, 1: 2, 2: 1},
'availability_365': {0: 365, 1: 355, 2: 365},
'coordinates': {0: 'POINT(-73.97237 40.64749)', 1: np.nan, 2: 'POINT(-73.9419 40.80902)'}
}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import pandas as pd
import ast
from collections import Counter
data = pd.read_csv('../reddit_data_preprocessing/data/curated_pattern_lists.csv')
data.pattern.str.count("QLTY").sum()
qualities_list = []
for idx, row in data.iterrows():
qualities_list += ast.literal_eval(row['QLTY'])
counter = Counter(qualities_list)
df = | pd.DataFrame.from_dict(counter, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 25 17:54:30 2020
@author: Administrator
"""
import pandas as pd
import numpy as np
fns = [#'../checkpoints/eval_resnet50_singleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_resnet50_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_resnet50_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_resnet50_metasingleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_singleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_effnetb4_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-1-test.csv',
#
#
#
# '../checkpoints/19/eval_effnetb4_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/19/eval_effnetb4_metasingleview-Loss-ce-tta-1-test.csv',
#
# '../checkpoints/19/eval_effnetb4_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/19/eval_effnetb4_metasingleview-Loss-ce-tta-1-test.csv',
'../checkpoint/eval_resnet50_SVMeta-Loss-ce-tta-1-test.csv',
#'../checkpoint/eval_resnet50_SingleView-Loss-ce-tta-1-test.csv',
#'../checkpoint/eval_resnet50_SVBNN-Loss-ce-tta-1-test.csv',
'../checkpoint/eval_effb4_SingleView-Loss-ce-tta-1-test.csv',
'../checkpoint/eval_effb4_SVMeta-Loss-ce-tta-1-test.csv',
'../checkpoint/eval_sk50_SVMeta-Loss-ce-tta-1-test.csv',
'../checkpoint/eval_effb3_SVMeta-Loss-ce-tta-1-test.csv'
]
#fns = ['../checkpoints/eval_resnet50_singleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_resnet50_singleview-Loss-ce-tta-1-test.csv'
#
#
# ]
##mean mode
mean_mode = 'gmean' #'mean
if mean_mode =='mean':
y_pred_total = np.zeros((1512,7),dtype= 'float32')
else:
y_pred_total = np.ones((1512,7),dtype= 'float32')
for fn in fns:#
#fn = fns[0]
print('*'*32)
print(fn)
kl1 = pd.read_csv(fn).values
n_samp = kl1.shape[0]
n_class = 7
y_pred = np.zeros((n_samp,n_class),dtype= 'float32')
pos = 0
for kk in range(1):
y_pred = y_pred + kl1[pos:pos+n_samp,1:].astype('float32')
pos = pos +n_samp
#y_pred = y_pred/1.0
if mean_mode =='mean':
y_pred_total = y_pred_total + y_pred
else:
y_pred_total = y_pred_total * y_pred
if mean_mode =='mean':
y_pred_total = y_pred_total /len(fns)
else:
y_pred_total = np.power(y_pred_total,1.0/len(fns))
y_pred_total = y_pred_total /np.sum(y_pred_total,axis = 1,keepdims = True)
y_pred_total = np.round_(y_pred_total,decimals = 4)
df = | pd.DataFrame(data = y_pred_total,index =kl1[:n_samp,0], columns = [ 'MEL', 'NV','BCC', 'AKIEC', 'BKL', 'DF','VASC']) | pandas.DataFrame |
from . import pyheclib
import pandas as pd
import numpy as np
import os
import time
import warnings
# some static functions
def set_message_level(level):
"""
set the verbosity level of the HEC-DSS library
level ranges from "bort" only (level 0) to "internal" (level >10)
"""
pyheclib.hec_zset('MLEVEL','',level)
def set_program_name(program_name):
"""
sets the name of the program (upto 6 chars long) to store with data
"""
name=program_name[:min(6,len(program_name))]
pyheclib.hec_zset('PROGRAM',name,0)
def get_version(fname):
"""
Get version of DSS File
returns a tuple of string version of 4 characters and integer version
"""
return pyheclib.hec_zfver(fname);
class DSSFile:
#DSS missing conventions
MISSING_VALUE=-901.0
MISSING_RECORD=-902.0
FREQ_EPART_MAP = {
pd.tseries.offsets.Minute(n=1):"1MIN",
pd.tseries.offsets.Minute(n=2):"2MIN",
pd.tseries.offsets.Minute(n=3):"3MIN",
pd.tseries.offsets.Minute(n=4):"4MIN",
pd.tseries.offsets.Minute(n=5):"5MIN",
pd.tseries.offsets.Minute(n=10):"10MIN",
pd.tseries.offsets.Minute(n=15):"15MIN",
pd.tseries.offsets.Minute(n=20):"20MIN",
pd.tseries.offsets.Minute(n=30):"30MIN",
pd.tseries.offsets.Hour(n=1):"1HOUR",
pd.tseries.offsets.Hour(n=2):"2HOUR",
pd.tseries.offsets.Hour(n=3):"3HOUR",
pd.tseries.offsets.Hour(n=4):"4HOUR",
pd.tseries.offsets.Hour(n=6):"6HOUR",
pd.tseries.offsets.Hour(n=8):"8HOUR",
pd.tseries.offsets.Hour(n=12):"12HOUR",
pd.tseries.offsets.Day(n=1):"1DAY",
pd.tseries.offsets.Week(n=1):"1WEEK",
pd.tseries.offsets.MonthEnd(n=1):"1MON",
pd.tseries.offsets.YearEnd(n=1):"1YEAR"
}
EPART_FREQ_MAP={v: k for k, v in FREQ_EPART_MAP.items()}
#
def __init__(self,fname):
self.ifltab=pyheclib.intArray(600)
self.istat=0
self.fname=fname
self.isopen=False
self.open()
def __del__(self):
self.close()
def open(self):
"""
Open DSS file
"""
if (self.isopen): return
self.istat=pyheclib.hec_zopen(self.ifltab,self.fname)
self.isopen=True
def close(self):
"""
Close DSS File
"""
#FIXME: remove all created arrays and pointers
if (self.isopen):
pyheclib.zclose_(self.ifltab)
self.isopen=False
def get_version(self):
"""
Get version of DSS File
returns a tuple of string version of 4 characters and integer version
"""
#needs to be done on a closed file
if (self.isopen): self.close()
return pyheclib.hec_zfver(self.fname);
def catalog(self):
"""
Catalog DSS Files
"""
opened_already= self.isopen
try:
if not opened_already: self.open()
icunit=pyheclib.new_intp() # unit (fortran) for catalog
pyheclib.intp_assign(icunit,12)
fcname=self.fname[:self.fname.rfind(".")]+".dsc"
pyheclib.fortranopen_(icunit,fcname, len(fcname))
icdunit=pyheclib.new_intp() # unit (fortran) for condensed catalog
fdname=self.fname[:self.fname.rfind(".")]+".dsd"
pyheclib.intp_assign(icdunit,13)
pyheclib.fortranopen_(icdunit,fdname,len(fdname))
inunit=pyheclib.new_intp()
pyheclib.intp_assign(inunit,0) # new catalog, if non-zero no cataloging
cinstr="" # catalog instructions : None = ""
labrev = pyheclib.new_intp()
pyheclib.intp_assign(labrev,0) # 0 is unabbreviated.
ldsort = pyheclib.new_intp()
pyheclib.intp_assign(ldsort,1) # 1 is sorted
lcdcat = pyheclib.new_intp() # output if condensed created
nrecs = pyheclib.new_intp() # number of records cataloged
pyheclib.zcat_(self.ifltab, icunit, icdunit, inunit, cinstr,
labrev, ldsort, lcdcat, nrecs, len(cinstr))
return pyheclib.intp_value(nrecs)
except:
#warnings.warn("Exception occurred while catalogging")
pass
finally:
pyheclib.fortranflush_(icunit)
pyheclib.fortranclose_(icunit)
pyheclib.fortranflush_(icdunit)
pyheclib.fortranclose_(icdunit)
pyheclib.fortranflush_(inunit)
pyheclib.fortranclose_(inunit)
if not opened_already: self.close()
def read_catalog(self):
"""
Reads .dsd (condensed catalog) for the given dss file.
Will run catalog if it doesn't exist or is out of date
"""
fdname=self.fname[:self.fname.rfind(".")]+".dsd"
if not os.path.exists(fdname):
print("NO CATALOG FOUND: Generating...")
self.catalog()
else:
if os.path.exists(self.fname):
ftime=pd.to_datetime(time.ctime(os.path.getmtime(self.fname)))
fdtime=pd.to_datetime(time.ctime(os.path.getmtime(fdname)))
if ftime > fdtime:
print("CATALOG FILE OLD: Generating...")
self.catalog()
else:
print("Warning: No DSS File found. Using catalog file as is")
#
with open(fdname,'r') as fd:
lines=fd.readlines()
columns=['Tag','A Part','B Part','C Part','F Part','E Part','D Part']
if len(lines) < 9:
print("Warning: catalog is empty! for filename: ",fdname)
return None
colline=lines[7]
column_indices=[]
for c in columns:
column_indices.append(colline.find(c))
a=np.empty([len(columns),len(lines)-9],dtype='U132')
ilx=0
for line in lines[9:]:
cix=0
isx=column_indices[0]
for iex in column_indices[1:]:
s=line[isx:iex].strip()
if s.startswith("-"):
s=a[cix,ilx-1]
a[cix,ilx]=s
cix=cix+1
isx=iex
s=line[isx:].strip()
a[cix,ilx]=s
ilx=ilx+1
df=pd.DataFrame(a.transpose(),columns=list('TABCFED'))
return df
def get_pathnames(self,catalog_dataframe=None):
"""
converts a catalog data frame into pathnames
If catalog_dataframe is None then reads catalog to populate it
returns a list of pathnames (condensed version, i.e. D part is time window)
/A PART/B PART/C PART/DPART (START DATE "-" END DATE)/E PART/F PART/
"""
if catalog_dataframe is None:
catalog_dataframe=self.read_catalog()
pdf=catalog_dataframe.iloc[:,[1,2,3,6,5,4]]
return pdf.apply(func=lambda x: '/'+('/'.join(list(x.values)))+'/',axis=1).values.tolist()
def num_values_in_interval(self,sdstr,edstr,istr):
"""
Get number of values in interval istr, using the start date and end date
string
"""
if istr.find('MON') >= 0: # less number of estimates will lead to overestimating values
td=pd.to_timedelta(int(istr[:istr.find('MON')]),'M')
elif istr.find('YEAR') >= 0:
td=pd.to_timedelta(int(istr[:istr.find('YEAR')]),'Y')
else:
td=pd.to_timedelta(istr)
return int((pd.to_datetime(edstr)-pd.to_datetime(sdstr))/td)+1
def julian_day(self, date):
"""
get julian day for the date. (count of days since beginning of year)
"""
return date.dayofyear
def m2ihm(self, minute):
"""
24 hour style from mins
"""
ihr=minute/60
imin=minute-(ihr*60)
itime=ihr*100+imin
return itime
def parse_pathname_epart(self,pathname):
return pathname.split('/')[1:7][4]
def _number_between(startDateStr, endDateStr, delta=pd.to_timedelta(1,'Y')):
return (pd.to_datetime(endDateStr)-pd.to_datetime(startDateStr))/delta
def _get_timedelta_unit(epart):
if 'YEAR' in epart:
return 'Y'
elif 'MON' in epart:
return 'M'
elif 'WEEK' in epart:
return 'W'
elif 'DAY' in epart:
return 'D'
elif 'HOUR' in epart:
return 'H'
elif 'MIN' in epart:
return 'm'
else:
raise Exception("Unknown epart to time delta conversion for epart=%s"%epart)
def _pad_to_end_of_block(self, endDateStr, interval):
if interval.find('MON') >=0 or interval.find('YEAR') >=0:
buffer=pd.DateOffset(years=10)
elif interval.find('DAY') >=0 :
buffer=pd.DateOffset(years=1)
elif interval.find('HOUR') >=0 or interval.find('MIN') >= 0:
buffer = pd.DateOffset(months=1)
else:
buffer=pd.DateOffset(days=1)
return (pd.to_datetime(endDateStr) + buffer).strftime('%d%b%Y').upper()
def _get_istat_for_zrrtsxd(self, istat):
"""
C ISTAT: Integer status parameter, indicating the
C successfullness of the retrieval.
C ISTAT = 0 All ok.
C ISTAT = 1 Some missing data (still ok)
C ISTAT = 2 Missing data blocks, but some data found
C ISTAT = 3 Combination of 1 and 2 (some data found)
C ISTAT = 4 No data found, although a pathname was read
C ISTAT = 5 No pathname(s) found
C ISTAT > 9 Illegal call to ZRRTS
"""
if istat == 0:
return "All good"
msg = "ISTAT: %d --> "%istat
if istat == 1: msg = msg + "Some missing data (still ok)"
elif istat == 2: msg = msg + "Missing data blocks, but some data found"
elif istat == 3: msg = msg + "Combination of 1 and 2 (some data found)"
elif istat == 4: msg = msg + "No data found, although a pathname was read"
elif istat == 5: msg = msg + "No pathname(s) found"
elif istat > 9: msg = msg + "Illegal call to ZRRTS"
return msg
def _respond_to_istat_state(self, istat):
if istat == 0:
# everything is ok
pass
elif istat == 1 or istat == 2 or istat == 3:
warnings.warn("Some data or data blocks are missing [istat=" + str(istat) + "]", RuntimeWarning)
elif istat == 4:
warnings.warn("Found file but failed to load any data", RuntimeWarning)
elif istat == 5:
# should this be an exception?
raise RuntimeError("Path not found")
elif istat > 9:
# should this be an exception?
raise RuntimeError("Illegal internal call")
def read_rts(self,pathname,startDateStr=None, endDateStr=None):
"""
read regular time series for pathname.
if pathname D part contains a time window (START DATE "-" END DATE) and
either start or end date is None it uses that to define start and end date
"""
opened_already=self.isopen
try:
if not opened_already: self.open()
interval = self.parse_pathname_epart(pathname)
trim_first=False
trim_last=False
if startDateStr is None or endDateStr is None:
twstr=pathname.split("/")[4]
if twstr.find("-") < 0 :
if len(twstr.strip())==0:
raise Exception("No start date or end date and twstr is "+twstr)
sdate = edate = twstr
else:
sdate,edate=twstr.split("-")
if startDateStr is None:
trim_first=True
startDateStr=sdate.strip()
if endDateStr is None:
trim_last=True
endDateStr=edate.strip()
endDateStr=self._pad_to_end_of_block(endDateStr,interval)
nvals = self.num_values_in_interval(startDateStr, endDateStr, interval)
sdate = pd.to_datetime(startDateStr)
cdate = sdate.date().strftime('%d%b%Y').upper()
ctime = ''.join(sdate.time().isoformat().split(':')[:2])
dvalues = np.zeros(nvals,'d') # PERF: could be np.empty if all initialized
nvals,cunits,ctype,iofset,istat=pyheclib.hec_zrrtsxd(self.ifltab, pathname, cdate, ctime,
dvalues)
if iofset !=0 :
print('Warning: iofset value of non-zero is not handled: ',iofset)
#FIXME: raise appropriate exception for istat value
#if istat != 0:
# raise Exception(self._get_istat_for_zrrtsxd(istat))
self._respond_to_istat_state(istat)
#FIXME: deal with non-zero iofset
freqoffset=DSSFile.EPART_FREQ_MAP[interval]
if ctype.startswith('INST'):
dindex=pd.date_range(startDateStr,periods=nvals,freq=freqoffset)
else:
sp=pd.Period(startDateStr,freq=freqoffset)-pd.tseries.frequencies.to_offset(freqoffset)
dindex=pd.period_range(sp,periods=nvals,freq=freqoffset)
df1=pd.DataFrame(data=dvalues,index=dindex,columns=[pathname])
# cleanup missing values --> NAN, trim dataset and units and period type strings
df1.replace([DSSFile.MISSING_VALUE,DSSFile.MISSING_RECORD],[np.nan,np.nan],inplace=True)
if trim_first or trim_last:
if trim_first:
first_index=df1.first_valid_index()
else:
first_index=0
if trim_last:
last_index = df1.last_valid_index()
else:
last_index=None
df1 = df1[first_index:last_index]
else:
df1 = df1
return df1,cunits.strip(),ctype.strip()
finally:
if not opened_already: self.close()
def write_rts(self, pathname, df, cunits, ctype):
"""
write time series to this DSS file with the given pathname.
The time series is passed in as a pandas DataFrame
and associated units and types of length no greater than 8.
"""
parts=pathname.split('/')
parts[5]=DSSFile.FREQ_EPART_MAP[df.index.freq]
pathname="/".join(parts)
istat=pyheclib.hec_zsrtsxd(self.ifltab, pathname,
df.index[0].strftime("%d%b%Y").upper(), df.index[0].strftime("%H%M"),
df.iloc[:,0].values, cunits[:8], ctype[:8])
# pyheclib.hec_zsrtsxd(d.ifltab, pathname, df.index[0].strftime("%d%b%Y").upper(), df.index[0].strftime("%H%M"), df.iloc[:,0].values, cunits[:8], ctype[:8])
self._respond_to_istat_state(istat)
def read_its(self, pathname, startDateStr=None, endDateStr=None, guess_vals_per_block=10000):
"""
reads the entire irregular time series record. The timewindow is derived
from the D-PART of the pathname so make sure to read that from the catalog
before calling this function
"""
parts=pathname.split('/')
epart=parts[5]
if len(parts[4].strip()) == 0:
if startDateStr == None or endDateStr == None:
raise Exception("Either pathname D PART contains timewindow or specify in startDateStr and endDateStr for this call")
startDateStr=(pd.to_datetime(startDateStr)-pd.offsets.YearBegin(0)).strftime('%d%b%Y').upper()
endDateStr=(pd.to_datetime(endDateStr)+pd.offsets.YearBegin(0)).strftime('%d%b%Y').upper()
parts[4]=startDateStr+" - "+endDateStr
else:
tw=list(map(lambda x: x.strip(),parts[4].split('-')))
startDateStr=tw[0]
endDateStr=self._pad_to_end_of_block(tw[1],epart)
juls,istat=pyheclib.hec_datjul(startDateStr)
jule,istat=pyheclib.hec_datjul(endDateStr)
ietime=istime=0
# guess how many values to be read based on e part approximation
ktvals=DSSFile._number_between(startDateStr, endDateStr,
pd.to_timedelta(1,unit=DSSFile._get_timedelta_unit(epart)))
ktvals=guess_vals_per_block*int(ktvals)
kdvals=ktvals
itimes = np.zeros(ktvals,'i')
dvalues = np.zeros(kdvals,'d')
inflag = 0; # Retrieve both values preceding and following time window in addtion to time window
nvals, ibdate, cunits, ctype, istat = pyheclib.hec_zritsxd(self.ifltab, pathname, juls, istime, jule, ietime, itimes, dvalues, inflag)
self._respond_to_istat_state(istat)
if nvals == ktvals:
raise Exception("More values than guessed! %d. Call with guess_vals_per_block > 10000 "%ktvals)
base_date=pd.to_datetime('31DEC1899')+ | pd.to_timedelta(ibdate,'D') | pandas.to_timedelta |
import librosa
import sys
import argparse
import numpy as np
import pandas as pd
import matplotlib as plt
from pipeline.common.file_utils import ensure_destination_exists
def get_librosa_features(src_audio: str, dst_csv: str):
"""Extract basic audio features from an audio file for HRI with Librosa
TODO: Allow specification of which librosa features to extract.
Args:
src_audio (str): Path to src audio
dst_csv (str): Path to dst feature csv
Returns:
df (DataFrame): Audio features dataframe with features as columns
with the format [feature type]_[number]
"""
y, sr = librosa.load(src_audio)
hop_length = int(sr / 30) # gives 1 feature per frame
features = _get_features(y, sr, hop_length)
# _plot_features(features["MFCC"]) # Example plot
df = _features_to_df(features)
ensure_destination_exists(dst_csv)
df.to_csv(dst_csv, index=False)
return df
def _get_features(y: np.ndarray, sr: int, hop_length: int):
"""Extracts audio features with the librosa library.
Currently set up to get MFCC, Chroma, Mel_Spect, and Rolloff as
these features have been identified as well suited for use with
machine learning on human audio data. Tonnetz is excluded because
it doesn't produce the same vector length as the others.
Args:
y (np.ndarray): Input audio wave form
sr (int): Sample Rate
hop_length (int): Hop Length dictates the number of features per frame
and is calculated by dividing the sample rate by the desired number of
features per frame. (e.g. sr/30 gives 30 features per frame)
Returns:
dictionary: a dictionary of feature types
"""
features = {
"MFCC": librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length),
"Chroma": librosa.feature.chroma_stft(y=y, sr=sr, hop_length=hop_length),
"Mel_Spect": librosa.power_to_db(
librosa.feature.melspectrogram(y=y, sr=sr, hop_length=hop_length),
ref=np.max,
),
"Spect_Contrast": librosa.feature.spectral_contrast(
S=np.abs(librosa.stft(y, hop_length=hop_length)),
sr=sr,
hop_length=hop_length,
),
# "Tonnetz":librosa.feature.tonnetz(y=librosa.effects.harmonic(y), sr=sr, hop_length=704),
"Rolloff": librosa.power_to_db(
librosa.feature.spectral_rolloff(
y=y, sr=sr, hop_length=hop_length, roll_percent=0.95
),
ref=np.max,
),
}
return features
def _features_to_df(features):
"""Converts dictionary of audio features to df
Args:
features (dictionary): dictionary of features
Returns:
df (pd.DataFrame): standard dataframe of features
"""
df = pd.concat(
[ | pd.DataFrame(v) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#author: kai.zhang
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
'''
币安历史数据处理
'''
class HisDataHandler(object):
def __init__(self):
self.data = open('../data/his_data.csv').readlines()
def handler(self):
klink_data = eval(self.data[0])
kd = | DataFrame(klink_data) | pandas.core.frame.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 09:04:41 2019
@author: michaelek
"""
import io
import numpy as np
import requests
from gistools import vector
from allotools import AlloUsage
from hydrolm import LM
from tethysts import Tethys
from tethysts import utils
import os
import sys
import yaml
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from multiprocessing.pool import ThreadPool
import pyproj
try:
import plotly.offline as py
import plotly.graph_objs as go
except:
print('install plotly for plot functions to work')
#####################################
### Parameters
# base_dir = os.path.dirname(os.path.abspath( __file__ ))
base_dir = os.path.realpath(os.path.dirname(__file__))
print(base_dir)
with open(os.path.join(base_dir, 'parameters.yml')) as param2:
param = yaml.safe_load(param2)
# datasets_path = os.path.join(base_dir, 'datasets')
outputs = param['output']
catch_key_base = 'tethys/station_misc/{station_id}/catchment.geojson.zst'
####################################
### Testing
# base_dir = os.path.split(os.path.realpath(os.path.dirname(__file__)))[0]
# with open(os.path.join(base_dir, 'parameters.yml')) as param2:
# param1 = yaml.safe_load(param2)
# flow_remote = param1['remote']['flow']
# usage_remote = param1['remote']['usage']
#
# from_date='2010-07-01'
# from_date=None
# to_date='2020-06-30'
# product_code='quality_controlled_data'
# min_gaugings=10
# output_path=os.path.join(base_dir, 'tests')
# local_tz='Etc/GMT-12'
# station_id=['0bc0762fac7423261610b50f', '0ba603f66f55a19d18cbeb81', '0c6b76f9ff6fcf2e103f5e84', '2ec4a2cfa71dd4811eec25e4', '0d1024b9975b573e515ebd62']
# station_id=['0d1024b9975b573e515ebd62']
# ref=None
#
#
# self = FlowNat(flow_remote, usage_remote, from_date, to_date, product_code, min_gaugings, station_id, ref, output_path)
#
# stns_all = self.stations_all.station_id.unique().tolist().copy()
#
# stns1 = self.process_stations(stns_all)
#
# nat_flow = self.naturalisation()
# wap1 = 'SW/0082'
#
# a1 = AlloUsage(from_date='2015-06-30', to_date='2016-06-30', wap_filter={'wap': [wap1]})
#
# res1 = a1.get_ts(['allo', 'usage'], 'D', ['wap'])
#######################################
### Class
class FlowNat(object):
"""
Class to perform several operations to ultimately naturalise flow data.
Initialise the class with the following parameters.
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
output_path : str or None
Path to save the processed data, or None to not save them.
load_rec : bool
should the REC rivers and catchment GIS layers be loaded in at initiation?
Returns
-------
FlowNat instance
"""
def __init__(self, flow_remote, usage_remote, from_date=None, to_date=None, product_code='quality_controlled_data', min_gaugings=10, station_id=None, ref=None, output_path=None, local_tz='Etc/GMT-12'):
"""
Class to perform several operations to ultimately naturalise flow data.
Initialise the class with the following parameters.
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
output_path : str or None
Path to save the processed data, or None to not save them.
catch_del : str
Defines what should be used for the catchments associated with flow sites. 'rec' will perform a catchment delineation on-the-fly using the REC rivers and catchments GIS layers, 'internal' will use the pre-generated catchments stored in the package, or a path to a shapefile will use a user created catchments layer. The shapefile must at least have a column named ExtSiteID with the flow site numbers associated with the catchment geometry.
Returns
-------
FlowNat instance
"""
setattr(self, 'from_date', from_date)
setattr(self, 'to_date', to_date)
setattr(self, 'min_gaugings', min_gaugings)
setattr(self, 'flow_remote', flow_remote)
setattr(self, 'usage_remote', usage_remote)
setattr(self, 'product_code', product_code)
setattr(self, 'local_tz', local_tz)
# setattr(self, 'rec_data_code', rec_data_code)
# setattr(self, 'ts_server', param['input']['ts_server'])
# setattr(self, 'permit_server', param['input']['permit_server'])
self.save_path(output_path)
stns_summ = self.get_all_flow_stations()
if (isinstance(station_id, list)) or (isinstance(ref, list)):
stns1 = self.process_stations(station_id=station_id, ref=ref)
# summ1 = self.flow_datasets(from_date=from_date, to_date=to_date, min_gaugings=8, rec_data_code=rec_data_code)
# if input_sites is not None:
# input_summ1 = self.process_sites(input_sites)
#
# if not isinstance(catch_del, str):
# raise ValueError('catch_del must be a string')
#
# if catch_del == 'rec':
# self.load_rec()
# elif catch_del == 'internal':
# catch_gdf_all = pd.read_pickle(os.path.join(base_dir, 'datasets', param['input']['catch_del_file']))
# setattr(self, 'catch_gdf_all', catch_gdf_all)
# elif catch_del.endswith('shp'):
# catch_gdf_all = gpd.read_file(catch_del)
# setattr(self, 'catch_gdf_all', catch_gdf_all)
# else:
# raise ValueError('Please read docstrings for options for catch_del argument')
pass
# def flow_datasets_all(self, rec_data_code='Primary'):
# """
#
# """
# ## Get dataset types
# datasets1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_dataset_table'], where_in={'Feature': ['River'], 'MeasurementType': ['Flow'], 'DataCode': ['Primary', 'RAW']})
# man_datasets1 = datasets1[(datasets1['CollectionType'] == 'Manual Field') & (datasets1['DataCode'] == 'Primary')].copy()
# rec_datasets1 = datasets1[(datasets1['CollectionType'] == 'Recorder') & (datasets1['DataCode'] == rec_data_code)].copy()
#
# ## Get ts summaries
# man_summ1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_summ_table'], ['ExtSiteID', 'DatasetTypeID', 'Min', 'Median', 'Mean', 'Max', 'Count', 'FromDate', 'ToDate'], where_in={'DatasetTypeID': man_datasets1['DatasetTypeID'].tolist()}).sort_values('ToDate')
# man_summ2 = man_summ1.drop_duplicates(['ExtSiteID'], keep='last').copy()
# man_summ2['CollectionType'] = 'Manual Field'
#
# rec_summ1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_summ_table'], ['ExtSiteID', 'DatasetTypeID', 'Min', 'Median', 'Mean', 'Max', 'Count', 'FromDate', 'ToDate'], where_in={'DatasetTypeID': rec_datasets1['DatasetTypeID'].tolist()}).sort_values('ToDate')
# rec_summ2 = rec_summ1.drop_duplicates(['ExtSiteID'], keep='last').copy()
# rec_summ2['CollectionType'] = 'Recorder'
#
# ## Combine
# summ2 = pd.concat([man_summ2, rec_summ2], sort=False)
#
# summ2['FromDate'] = pd.to_datetime(summ2['FromDate'])
# summ2['ToDate'] = pd.to_datetime(summ2['ToDate'])
#
# ## Add in site info
# sites1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['sites_table'], ['ExtSiteID', 'NZTMX', 'NZTMY', 'SwazGroupName', 'SwazName'])
#
# summ3 = pd.merge(summ2, sites1, on='ExtSiteID')
#
# ## Assign objects
# setattr(self, 'sites', sites1)
# setattr(self, 'rec_data_code', rec_data_code)
# setattr(self, 'summ_all', summ3)
def get_all_flow_stations(self):
"""
Function to process the flow datasets
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
Returns
-------
DataFrame
"""
tethys1 = Tethys([self.flow_remote])
flow_ds = [ds for ds in tethys1.datasets if (ds['parameter'] == 'streamflow') and (ds['product_code'] == self.product_code) and (ds['frequency_interval'] == '24H') and (ds['utc_offset'] == '12H') and (ds['method'] == 'sensor_recording')]
flow_ds.extend([ds for ds in tethys1.datasets if (ds['parameter'] == 'streamflow') and (ds['product_code'] == self.product_code) and (ds['frequency_interval'] == 'T') and (ds['method'] == 'field_activity')])
stns_list = []
for ds in flow_ds:
stns1 = tethys1.get_stations(ds['dataset_id'])
stns_list.extend(stns1)
stns_list2 = [s for s in stns_list if s['stats']['count'] >= self.min_gaugings]
# stns_list2 = stns_list
stns_list3 = [{'dataset_id': s['dataset_id'], 'station_id': s['station_id'], 'ref': s['ref'], 'geometry': Point(s['geometry']['coordinates']), 'min': s['stats']['min'], 'max': s['stats']['max'], 'count': s['stats']['count'], 'from_date': s['time_range']['from_date'], 'to_date': s['time_range']['to_date']} for s in stns_list2]
[s.update({'from_date': s['from_date'] + '+00:00', 'to_date': s['to_date'] + '+00:00'}) for s in stns_list3 if not '+00:00' in s['from_date']]
stns_summ = gpd.GeoDataFrame(pd.DataFrame(stns_list3), geometry='geometry', crs=4326)
stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
# stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_localize(None)
# stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_localize(None)
if isinstance(self.from_date, str):
from_date1 = pd.Timestamp(self.from_date)
stns_summ = stns_summ[stns_summ['from_date'] <= from_date1]
if isinstance(self.to_date, str):
to_date1 = pd.Timestamp(self.to_date)
stns_summ = stns_summ[stns_summ['to_date'] >= to_date1]
setattr(self, 'stations_all', stns_summ)
setattr(self, '_tethys_flow', tethys1)
setattr(self, 'flow_datasets_all', flow_ds)
return stns_summ
def save_path(self, output_path=None):
"""
"""
if output_path is None:
pass
elif isinstance(output_path, str):
if not os.path.exists(output_path):
os.makedirs(output_path)
setattr(self, 'output_path', output_path)
# output_dict1 = {k: v.split('_{run_date}')[0] for k, v in param['output'].items()}
# file_list = [f for f in os.listdir(output_path) if ('catch_del' in f) and ('.shp' in f)]
def process_stations(self, station_id=None, ref=None):
"""
Function to process the sites.
Parameters
----------
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
Returns
-------
DataFrame
"""
## Checks
# if isinstance(input_sites, (str, int)):
# input_sites = [input_sites]
# elif not isinstance(input_sites, list):
# raise ValueError('input_sites must be a str, int, or list')
if (not isinstance(station_id, list)) and (not isinstance(ref, list)):
raise ValueError('station_id and ref must be lists')
## Filter
stns1 = self.stations_all.copy()
bad_stns = []
if isinstance(station_id, list):
stns1 = stns1[stns1['station_id'].isin(station_id)]
[bad_stns.extend([s['ref']]) for i, s in stns1.iterrows() if s['station_id'] not in station_id]
if isinstance(ref, list):
stns1 = stns1[stns1['ref'].isin(ref)]
[bad_stns.extend([s['ref']]) for i, s in stns1.iterrows() if s['ref'] not in ref]
if bad_stns:
print(', '.join(bad_stns) + ' stations are not available for naturalisation')
## Save if required
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
flow_sites_shp = outputs['flow_sites_shp'].format(run_date=run_time)
save1 = stns1.copy()
save1['from_date'] = save1['from_date'].astype(str)
save1['to_date'] = save1['to_date'].astype(str)
save1.to_file(os.path.join(self.output_path, flow_sites_shp))
## Drop duplicate stations
stns2 = stns1.sort_values('count', ascending=False).drop_duplicates('station_id')
# stns2 = stns1.drop_duplicates('station_id')
setattr(self, 'stations', stns2)
## Filter flow datasets
stn_ds = stns2['dataset_id'].unique()
flow_ds1 = self.flow_datasets_all.copy()
flow_ds2 = [ds for ds in flow_ds1 if ds['dataset_id'] in stn_ds]
setattr(self, 'flow_datasets', flow_ds2)
## Remove existing attributes if they exist
if hasattr(self, 'catch'):
delattr(self, 'catch')
if hasattr(self, 'waps'):
delattr(self, 'waps')
if hasattr(self, 'flow'):
delattr(self, 'flow')
if hasattr(self, 'usage_rate'):
delattr(self, 'usage_rate')
if hasattr(self, 'nat_flow'):
delattr(self, 'nat_flow')
return stns1
# def load_rec(self):
# """
#
# """
#
# if not hasattr(self, 'rec_rivers'):
# try:
# with lzma.open(os.path.join(datasets_path, param['input']['rec_rivers_file'])) as r:
# rec_rivers = pickle.loads(r.read())
# with lzma.open(os.path.join(datasets_path, param['input']['rec_catch_file'])) as r:
# rec_catch = pickle.loads(r.read())
# except:
# print('Downloading rivers and catchments files...')
#
# url1 = 'https://cybele.s3.us-west.stackpathstorage.com/mfe;rec;v2.4;rivers.gpd.pkl.xz'
# r_resp = requests.get(url1)
# with open(os.path.join(datasets_path, param['input']['rec_rivers_file']), 'wb') as r:
# r.write(r_resp.content)
# with lzma.open(os.path.join(datasets_path, param['input']['rec_rivers_file'])) as r:
# rec_rivers = pickle.loads(r.read())
#
# url2 = 'https://cybele.s3.us-west.stackpathstorage.com/mfe;rec;v2.4;catchments.gpd.pkl.xz'
# r_resp = requests.get(url2)
# with open(os.path.join(datasets_path, param['input']['rec_catch_file']), 'wb') as r:
# r.write(r_resp.content)
# with lzma.open(os.path.join(datasets_path, param['input']['rec_catch_file'])) as r:
# rec_catch = pickle.loads(r.read())
#
# rec_rivers.rename(columns={'order': 'ORDER'}, inplace=True)
# setattr(self, 'rec_rivers', rec_rivers)
# setattr(self, 'rec_catch', rec_catch)
#
# pass
@staticmethod
def _get_catchment(inputs):
"""
"""
station_id = inputs['station_id']
bucket = inputs['bucket']
conn_config = inputs['conn_config']
key1 = catch_key_base.format(station_id=station_id)
try:
obj1 = utils.get_object_s3(key1, conn_config, bucket, 'zstd', 0)
b2 = io.BytesIO(obj1)
c1 = gpd.read_file(b2)
except:
c1 = gpd.GeoDataFrame(columns=['id', 'area', 'dataset_id', 'distance', 'nzsegment', 'ref', 'station_id', 'geometry'])
return c1
def get_catchments(self, threads=30):
"""
"""
stns = self.stations.copy()
stn_ids = stns.station_id.unique()
conn_config = self.flow_remote['connection_config']
bucket = self.flow_remote['bucket']
input_list = [{'conn_config': conn_config, 'bucket': bucket, 'station_id': s} for s in stn_ids]
output = ThreadPool(threads).map(self._get_catchment, input_list)
catch1 = pd.concat(output).drop('id', axis=1)
catch1.crs = pyproj.CRS(2193)
catch1 = catch1.to_crs(4326)
## Save if required
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
catch_del_shp = outputs['catch_del_shp'].format(run_date=run_time)
catch1.to_file(os.path.join(self.output_path, catch_del_shp))
setattr(self, 'catch', catch1)
return catch1
def get_waps(self):
"""
"""
tethys1 = Tethys([self.usage_remote])
usage_ds = [ds for ds in tethys1.datasets if (ds['parameter'] == 'water_use') and (ds['product_code'] == 'raw_data') and (ds['frequency_interval'] == '24H') and (ds['utc_offset'] == '12H') and (ds['method'] == 'sensor_recording')]
stns_list = []
for ds in usage_ds:
stns1 = tethys1.get_stations(ds['dataset_id'])
stns_list.extend(stns1)
stns_list3 = [{'dataset_id': s['dataset_id'], 'station_id': s['station_id'], 'ref': s['ref'], 'geometry': Point(s['geometry']['coordinates']), 'from_date': s['time_range']['from_date'], 'to_date': s['time_range']['to_date']} for s in stns_list]
[s.update({'from_date': s['from_date'] + '+00:00', 'to_date': s['to_date'] + '+00:00'}) for s in stns_list3 if not '+00:00' in s['from_date']]
stns_summ = gpd.GeoDataFrame( | pd.DataFrame(stns_list3) | pandas.DataFrame |
import Functions
import pandas as pd
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date'])
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfWMR = dfWMR.resample('W', loffset=offset).apply(logic)
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date'])
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfMarket = dfMarket.resample('W', loffset=offset).apply(logic)
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = | pd.DataFrame() | pandas.DataFrame |
import wavefront_api_client as wave_api
from utils.converterutils import addHeader
import datetime as dt
from datetime import datetime
import numpy as np
import pandas as pd
import time
from dateutil.parser import parse
APP_PLACEHOLDER = '[APP]'
SEVEN_DAY = 24*7*60*60*1000
ONE_MINUTE = 60*1000
def retrieveQueryUrl(app, url):
return url.replace(APP_PLACEHOLDER, app)
def executeQuery( appname, query, client, start_time, end_time, query_granularity, dateFormat=True):
query_api = wave_api.QueryApi(client)
app_query = retrieveQueryUrl(appname, query)
#print(app_query)
result = query_api.query_api(query, str(start_time), query_granularity, e=str(end_time))
return formatData(result, dateFormat)
#enter resulting data into the dataframe with the timestamp as the index
def formatData(result, dateFormat=True):
if result.timeseries is not None:
for entry in result.timeseries:
data = np.array(entry.data)
idx = | pd.Series(data[:,0]) | pandas.Series |
"""
Gather data about tweet engagement over time.
"""
# Copyright (c) 2020 <NAME>. All rights reserved.
from typing import List, Tuple
from datetime import datetime
import json
import os
import pickle
import dateutil
import pytz
import pandas as pd
import tweepy
import plot
CREDS_FILENAME = "creds.json"
USER_IDS_FILENAME = "userids.txt"
DATA_DIRNAME = "data"
TWEETS_FILENAME = "tweets.pkl"
TWEET_COUNT = 100
DO_PLOT = True # for convenience
DATETIME_FORMAT = "%Y%m%d%H%M%S"
LOCAL_TIMEZONE = pytz.timezone("America/Chicago")
def main():
"""main program"""
# pylint: disable=invalid-name
# load credentials and user ids
with open(CREDS_FILENAME) as creds_file:
creds = json.load(creds_file)
with open(USER_IDS_FILENAME) as userids_file:
user_ids = userids_file.readlines()
user_ids = [x.rstrip() for x in user_ids]
auth = tweepy.OAuthHandler(
creds["consumer_api"], creds["consumer_api_secret"])
auth.set_access_token(
creds["access_token"], creds["access_token_secret"])
api = tweepy.API(
auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
# api.verify_credentials()
# ~~~~ download ~~~~
for idx, user_id in enumerate(user_ids):
print(idx + 1, "/", len(user_ids), user_id)
# collect status objects for offline processing
query_time = datetime.now()
cursor = tweepy.Cursor(api.user_timeline, id=user_id)
tweets = []
for status in cursor.items(TWEET_COUNT):
tweets.append(status)
datetime_str = query_time.strftime(DATETIME_FORMAT)
with open(
os.path.join(DATA_DIRNAME, user_id + "." + datetime_str + ".pkl"),
"wb") as pickle_file:
pickle.dump(tweets, pickle_file)
# ~~~~ analyze ~~~~
# build dataframe of tweets by time
pickle_filenames = [x for x in os.listdir(DATA_DIRNAME) if x.endswith(".pkl")]
dfs = []
for pickle_filename in pickle_filenames:
df = load_tweets(os.path.join(DATA_DIRNAME, pickle_filename))
# add a column for the datetime of the pull
_, datetime_str, _ = pickle_filename.split(".")
datetime_obj = datetime.strptime(datetime_str, DATETIME_FORMAT)
datetime_obj = LOCAL_TIMEZONE.localize(datetime_obj)
df.insert(0, "query_datetime", datetime_obj)
dfs.append(df)
tweets_df = | pd.concat(dfs) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 22:28:42 2018
@author: Erkin
"""
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# T VALUE FUNCTION
#def t_ind(quotes, tgt_margin=0.2, n_days=30):
# quotes=quotes[['date','lowest_newprice']]
# quotes=quotes.reset_index(drop=True)
#
# t_matrix=pd.DataFrame(quotes.date).iloc[0:len(quotes)-n_days,]
# t_matrix['T']=0
# t_matrix['decision']='hold'
# for i in range(len(quotes)-n_days):
# a=quotes.iloc[i:i+n_days,:]
# a['first_price']=quotes.iloc[i,1]
# a['variation']=(a.lowest_newprice-a.first_price)/a.first_price
# t_value=len(a[(a.variation>tgt_margin)]) - len(a[(a.variation<-tgt_margin)])
# t_matrix.iloc[i,1]=t_value
# if (t_value > 10):
# t_matrix.iloc[i,2]='buy'
# elif(t_value < -10):
# t_matrix.iloc[i,2]='sell'
#
# plt.subplot(2, 1, 1)
# dates = matplotlib.dates.date2num(t_matrix.date)
# plt.plot_date(dates, t_matrix['T'],linestyle='solid', marker='None')
# plt.title(' T vs time ')
# plt.xlabel('Time')
# plt.ylabel('T value')
#
#
# plt.subplot(2, 1, 2)
# dates = matplotlib.dates.date2num(quotes.iloc[0:len(quotes)-n_days,].date)
# plt.plot_date(dates, quotes.iloc[0:len(quotes)-n_days,]['lowest_newprice'],linestyle='solid', marker='None')
# plt.xlabel('Time')
# plt.ylabel('price')
# plt.show()
# plt.show()
# return t_matrix
#
# FUNCTION ENDS
# importing necessary datasets.
product_info=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_info.csv')
product_info=product_info.drop('Unnamed: 0',axis=1)
product_info_head=product_info.head(1000)
product=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product.csv')
product=product.drop('Unnamed: 0',axis=1)
product_head=product.head(1000)
map_product=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/map_product.csv')
map_product=map_product.drop('Unnamed: 0',axis=1)
map_product_head=map_product.head(1000)
product_answer=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_answer.csv')
product_answer=product_answer.drop('Unnamed: 0',axis=1)
product_answer_head=product_answer.head(1000)
product_question=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_question.csv')
product_question=product_question.drop('Unnamed: 0',axis=1)
product_question_head=product_question.head(1000)
product_review=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_review.csv')
product_review=product_review.drop('Unnamed: 0',axis=1)
product_review_head=product_review.head(1000)
merged=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/merged.csv')
merged=merged.drop('Unnamed: 0',axis=1)
merged_head=merged.head(1000)
#product names
#product_names=product.iloc[:,1:3]
#merged=pd.merge(product_names,product_info,how='right',on='asin')
#merged_head=merged.head(1000)
#
##lowest price na replacement
#merged=merged.drop('Unnamed: 0',axis=1)
#merged['lowest_newprice']=merged['lowest_newprice'].fillna(merged['list_price'])
#merged_head=merged.head(1000)
#merged.isna().sum()
#removing values with less than 200 observations.
#asd=merged.groupby(['asin']).count()
#asd=asd[asd.date > 200]
#asd.reset_index(level=0, inplace=True)
#merged=merged[merged.asin.isin(asd.asin)]
#merged=merged.reset_index(drop=True)
#merged['date'] = pd.to_datetime(merged['date']).dt.date
#
#unique_asins=merged.asin.unique()
#merged['T']=99
#for asin in unique_asins:
# print(asin)
# quotes=merged[merged.asin==asin]
# iterable=quotes.iloc[0:len(quotes)-n_days,]
# for i, row in iterable.iterrows():
# a=quotes.loc[i:i+n_days,:]
# a['first_price']=quotes.loc[i,'lowest_newprice']
# a['variation']=(a.lowest_newprice-a.first_price)/a.first_price
# t_value=len(a[(a.variation>tgt_margin)]) - len(a[(a.variation<-tgt_margin)])
# merged.loc[i,'T']=t_value
#asins=merged.asin.unique().tolist()
#product0=t_ind(quotes=merged[merged.asin==asins[0]])
#product1=t_ind(quotes=merged[merged.asin==asins[1]])
#product2=t_ind(quotes=merged[merged.asin==asins[2]])
#product3=t_ind(quotes=merged[merged.asin==asins[3]])
#product4=t_ind(quotes=merged[merged.asin==asins[4]])
#product5=t_ind(quotes=merged[merged.asin==asins[5]])
#product6=t_ind(quotes=merged[merged.asin==asins[6]])
#
## Create the time index
#product6.set_index('date', inplace=True)
#ts=product6.drop('decision',axis=1)
#
#
## Verify the time index
#product6.head()
#product6.info()
#product6.index
## Run the AutoRegressive model
#from statsmodels.tsa.ar_model import AR
#ar1=AR(ts)
#model1=ar1.fit()
## View the results
#print('Lag: %s' % model1.k_ar)
#print('Coefficients: %s' % model1.params)
#
## Separate the data into training and test
#split_size = round(len(ts)*0.3)
#ts_train,ts_test = ts[0:len(ts)-split_size], ts[len(ts)-split_size:]
#
## Run the model again on the training data
#ar2=AR(ts_train)
#model2=ar2.fit()
#
## Predicting the outcome based on the test data
#ts_test_pred_ar = model2.predict(start=len(ts_train),end=len(ts_train)+len(ts_test)-1,dynamic=False)
#ts_test_pred_ar.index=ts_test.index
#
## Calculating the mean squared error of the model
#from sklearn.metrics import mean_squared_error
#error = mean_squared_error(ts_test,ts_test_pred_ar)
#print('Test MSE: %.3f' %error)
#
## Plot the graph comparing the real value and predicted value
#from matplotlib import pyplot
#fig = plt.figure(dpi=100)
#pyplot.plot(ts_test)
#pyplot.plot(ts_test_pred_ar)
#df_dateasin=merged[['date','asin']]
#
#
#
#reviews_sorted=product_review.sort_values('review_date')
#reviews_sorted['number_of_reviews']=reviews_sorted.groupby(['asin','review_date']).cumcount()+1
#reviews_sorted['star_tot']=reviews_sorted.groupby('asin').star.cumsum()
#reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
#df_dateasin = df_dateasin.drop_duplicates(['asin','date'], keep='last')
#df_dateasin.columns=['review_date','asin']
#reviews_sorted = pd.merge(df_dateasin,reviews_sorted,how='left')
#reviews_sorted_head=reviews_sorted.head(1000)
#
#
#
#
#
#reviews_sorted['reviews_total']=reviews_sorted.groupby('asin').number_of_reviews.cumsum()
#reviews_sorted['star_avg']=reviews_sorted.star_tot/reviews_sorted.number_of_reviews
#reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
#t1=reviews_sorted[['review_date','asin','number_of_reviews','star_avg']]
#t1.columns=['date','asin','number_of_reviews','star_avg']
#merged2 = pd.merge(merged,t1,how='left')
#
#t2 = pd.merge(df_dateasin,t1,how='left')
#
#nul = merged2['number_of_reviews'].isnull()
#nul.groupby((nul.diff() == 1).cumsum()).cumsum()*3 + merged2['number_of_reviews'].ffill()
#
# FEATURE ENGINEERING
# aggregation of number of reviews and average star rating
reviews_sorted=product_review.set_index('review_date').sort_index()
reviews_sorted['number_of_reviews']=reviews_sorted.groupby('asin').cumcount()+1
reviews_sorted['star_tot']=reviews_sorted.groupby('asin').star.cumsum()
reviews_sorted=reviews_sorted.reset_index()
reviews_sorted['star_avg']=reviews_sorted.star_tot/reviews_sorted.number_of_reviews
reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
t1=reviews_sorted[['review_date','asin','number_of_reviews','star_avg']]
t1.columns=['date','asin','number_of_reviews','star_avg']
merged2 = pd.merge(merged,t1,how='left')
merged2['number_of_reviews']=merged2.groupby('asin').number_of_reviews.fillna(method='ffill')
merged2['number_of_reviews']=merged2.groupby('asin').number_of_reviews.fillna(method='bfill')
merged2['star_avg']=merged2.groupby('asin').star_avg.fillna(method='ffill')
merged2['star_avg']=merged2.groupby('asin').star_avg.fillna(method='bfill')
# hold and buy
merged2_head=merged2.head(10000)
df_pred = merged2[merged2['T'] < 40]
df_pred_head=df_pred.head(10000)
df_pred['decision']=0 #don't buy
df_pred.loc[(df_pred['T']>5),'decision']=1 #buy
df_pred_head=df_pred.head(10000)
#
## price diff
#price_diff=[]
#df_pred['price_diff']=0
#for game in df_pred.asin.unique():
# price_diff.append(0)
# for row in range(1,len(df_pred[df_pred.asin==game])):
# price_diff.append((df_pred.iloc[row,4]-df_pred.iloc[row-1,4])/df_pred.iloc[row-1,4])
#df_pred['price_diff']=price_diff
#df_pred_head=df_pred.head(10000)
#
#
#removing products less than 150 datapoints
asd=df_pred.groupby(['asin']).count()
asd=asd[asd.date > 150]
asd.reset_index(level=0, inplace=True)
df_pred=df_pred[df_pred.asin.isin(asd.asin)]
df_pred=df_pred.reset_index(drop=True)
df_pred=df_pred.dropna(subset=['sales_rank'])
#%%
# BENCHMARK MODEL
#random forest
asins=df_pred.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = df_pred[df_pred.asin==asins[i]]
benchmark_model={}
benchmark_ytest={}
for key, value in d.items():
X=value[['lowest_newprice','total_new','total_used','sales_rank']]
y=value.decision
split_size = round(len(X)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# sfm = SelectFromModel(model, threshold=0.03)
# sfm.fit(X_train, y_train)
# for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
y_test_pred=pd.DataFrame(model.predict(X_test))
test_pred=pd.concat([y_test,y_test_pred],axis=1)
benchmark_ytest[str(key)]=test_pred
from sklearn.metrics import accuracy_score
benchmark_model[str(key)]=accuracy_score(y_test,y_test_pred)
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(y_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df=pd.DataFrame(supports, columns=['support_hold','support_buy'])
benchmark_scores=pd.concat([products_df,accuracies_df,precisions_df,recalls_df,fscores_df,supports_df],axis=1)
benchmark_scores=benchmark_scores.dropna()
benchmark_scores=benchmark_scores[benchmark_scores['support_buy']>10]
benchmark_scores.precision_buy.mean() #precision is 52.7%
benchmark_scores.recall_buy.mean() #recall is 38%
benchmark_scores.accuracy.mean() #accuracy is 70%
#%%
#regression
asins=df_pred.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = df_pred[df_pred.asin==asins[i]]
benchmark_model={}
benchmark_ytest={}
for key, value in d.items():
X=value[['lowest_newprice','total_new','total_used','sales_rank']]
y=value['T']
dec=value.decision
split_size = round(len(X)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
dec_train,dec_test=dec[0:len(dec)-split_size], dec[len(dec)-split_size:]
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestRegressor(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# sfm = SelectFromModel(model, threshold=0.03)
# sfm.fit(X_train, y_train)
# for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
y_test_pred=pd.DataFrame(model.predict(X_test))
y_test_pred['decision']=0
y_test_pred.loc[y_test_pred[0]>5,'decision']=1
y_test_pred=y_test_pred.drop([0],axis=1)
test_pred=pd.concat([dec_test,y_test_pred],axis=1)
benchmark_ytest[str(key)]=test_pred
benchmark_model[str(key)]=accuracy_score(dec_test,y_test_pred)
precision, recall, fscore, support = score(dec_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(dec_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df=pd.DataFrame(supports, columns=['support_hold','support_buy'])
benchmark_scores=pd.concat([products_df,accuracies_df,precisions_df,recalls_df,fscores_df,supports_df],axis=1)
benchmark_scores=benchmark_scores.dropna()
benchmark_scores=benchmark_scores[benchmark_scores['support_buy']>10]
benchmark_scores.precision_buy.mean() #precision is 51% - 57
benchmark_scores.recall_buy.mean() #recall is 56% - 46
benchmark_scores.accuracy.mean() #accuracy is 78% - 71
#%%
# all products (# just a random trial)
#
#X=df_pred[['total_new','total_used','sales_rank','price_diff']]
#y=df_pred.decision
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
#model = randomforest.fit(X_train, y_train)
#
#sfm = SelectFromModel(model, threshold=0.03)
#sfm.fit(X_train, y_train)
#for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#
#from sklearn import metrics
#from sklearn.metrics import classification_report
#
#print("MODEL B1: All Products \n")
#
#print ('The precision for this classifier is ' + str(metrics.precision_score(y_test, y_test_pred)))
#print ('The recall for this classifier is ' + str(metrics.recall_score(y_test, y_test_pred)))
#print ('The f1 for this classifier is ' + str(metrics.f1_score(y_test, y_test_pred)))
#print ('The accuracy for this classifier is ' + str(metrics.accuracy_score(y_test, y_test_pred)))
#
#print ('\nHere is the classification report:')
#print (classification_report(y_test, y_test_pred))
#
#from sklearn.metrics import confusion_matrix
#print(pd.DataFrame(confusion_matrix(y_test, y_test_pred, labels=[1, 0]), index=['true:1', 'true:0'], columns=['pred:1', 'pred:0']))
#
# IMPROVED MODEL
#asins=df_pred.asin.unique().tolist()
#
##Accuracy for product 0
#product0=df_pred[df_pred.asin==asins[0]]
#X=product0[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product0.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
##Accuracy for product 1
#
#product2=df_pred[df_pred.asin==asins[2]]
#X=product2[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product2.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
##Accuracy for product 7
#
#product7=df_pred[df_pred.asin==asins[7]]
#X=product7[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product7.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
#
#
#
##Accuracy for product 9
#
#product9=df_pred[df_pred.asin==asins[9]]
#X=product9[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product9.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#print(accuracy_score(y_test,y_test_pred))
#y_test_pred['actual']=y_test.reset_index(drop=True)
#
#%%
# MATCHING QUESTION AND ANSWERS WITH PRODUCTS
matching=product[['asin','forum_id']]
product_question_asin = pd.merge(product_question,matching, on=['forum_id'])
matching=product_question_asin[['asin','forum_id','question_id']]
product_answer_asin=pd.merge(product_answer,matching, on=['question_id'])
# FEATURE ENGINEERING FOR QUESTIONS AND ANSWERS
# for questions
questions_sorted=product_question_asin.set_index('question_date').sort_index()
questions_sorted['sentiment_total']=questions_sorted.groupby('asin').sentiment.cumsum()
questions_sorted['number_of_questions']=questions_sorted.groupby('asin').sentiment.cumcount()+1
questions_sorted['sentiment_avg']=questions_sorted.sentiment_total/questions_sorted.number_of_questions
questions_sorted['polarity_total']=questions_sorted.groupby('asin').polarity.cumsum()
questions_sorted['polarity_avg']=questions_sorted.polarity_total/questions_sorted.number_of_questions
questions_sorted['subjectivity_total']=questions_sorted.groupby('asin').subjectivity.cumsum()
questions_sorted['subjectivity_avg']=questions_sorted.subjectivity_total/questions_sorted.number_of_questions
questions_sorted['len_question']=questions_sorted.question.apply(len)
questions_sorted['len_question_total']=questions_sorted.groupby('asin').len_question.cumsum()
questions_sorted['question_lenght_avg']=questions_sorted['len_question_total']/questions_sorted.number_of_questions
questions_sorted=questions_sorted.reset_index()
questions_sorted = questions_sorted.drop_duplicates(['asin','question_date'], keep='last')
questions_useful=questions_sorted[['question_date','asin', 'sentiment_total', 'number_of_questions', 'sentiment_avg',
'polarity_total', 'polarity_avg', 'subjectivity_total',
'subjectivity_avg', 'len_question_total',
'question_lenght_avg']]
questions_useful.columns=['date','asin', 'sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']
merged_ques = pd.merge(df_pred,questions_useful,how='left')
merged_ques[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']]=merged_ques.groupby('asin')[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']].fillna(method='ffill')
merged_ques[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']]=merged_ques.groupby('asin')[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']].fillna(method='bfill')
merged_ques_head=merged_ques.head(10000)
#for answers
product_answer_sorted=product_answer_asin.set_index('answer_date').sort_index()
product_answer_sorted['number_of_answers']=product_answer_sorted.groupby('asin').cumcount()+1
product_answer_sorted['sentiment_total']=product_answer_sorted.groupby('asin').sentiment.cumsum()
product_answer_sorted['sentiment_avg']=product_answer_sorted.sentiment_total/product_answer_sorted.number_of_answers
product_answer_sorted['polarity_total']=product_answer_sorted.groupby('asin').polarity.cumsum()
product_answer_sorted['polarity_avg']=product_answer_sorted.polarity_total/product_answer_sorted.number_of_answers
product_answer_sorted['subjectivity_total']=product_answer_sorted.groupby('asin').subjectivity.cumsum()
product_answer_sorted['subjectivity_avg']=product_answer_sorted.subjectivity_total/product_answer_sorted.number_of_answers
product_answer_sorted['len_answer']=product_answer_sorted.answer.apply(len)
product_answer_sorted['len_answer_total']=product_answer_sorted.groupby('asin').len_answer.cumsum()
product_answer_sorted['answer_lenght_avg']=product_answer_sorted['len_answer_total']/product_answer_sorted.number_of_answers
product_answer_sorted=product_answer_sorted.reset_index()
product_answer_useful=product_answer_sorted[['answer_date','asin',
'number_of_answers', 'sentiment_total', 'sentiment_avg',
'polarity_total', 'polarity_avg', 'subjectivity_total',
'subjectivity_avg', 'len_answer_total',
'answer_lenght_avg']]
product_answer_useful.columns=['date','asin',
'number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']
merged_ans = pd.merge(merged_ques,product_answer_useful,how='left')
merged_ans[ ['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']]=merged_ans.groupby('asin')[['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']].fillna(method='ffill')
merged_ans[ ['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']]=merged_ans.groupby('asin')[['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']].fillna(method='bfill')
merged_ans_head=merged_ans.head(20000)
merged_ans.len_answer_total.isna().sum()
merged_ans_dropedna=merged_ans.dropna()
len(df_pred[df_pred.decision==0])/len(df_pred)
#%%
# #### IMPROVED MODEL ####
#random forest classificiation
## keeping products with more than 150 data points
asd=merged_ans_dropedna.groupby(['asin']).count()
asd=asd[asd.date > 150]
asd.reset_index(level=0, inplace=True)
merged_ans_dropedna=merged_ans_dropedna[merged_ans_dropedna.asin.isin(asd.asin)]
merged_ans_dropedna=merged_ans_dropedna.reset_index(drop=True)
asins=merged_ans_dropedna.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
#
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = merged_ans_dropedna[merged_ans_dropedna.asin==asins[i]]
importance = pd.DataFrame()
improved_model={}
improved_ytest={}
for key, value in d.items():
print(key)
X=value[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
# X=value.drop(['asin', 'name', 'date', 'list_price','lowest_usedprice','tradein_value','T','decision'],axis=1)
y=value.decision
## feature selection
randomforest = RandomForestClassifier(random_state=0)
model = randomforest.fit(X, y)
sfm = SelectFromModel(model, threshold=0.01)
sfm.fit(X, y)
for feature_list_index in sfm.get_support(indices=True):
print(X.columns[feature_list_index])
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
print(pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient']).sort_values('Gini coefficient',ascending=False))
temp_importance=pd.DataFrame([list(model.feature_importances_)],columns=X.columns)
key_index=[key]
temp_importance.index = key_index
importance=importance.append(temp_importance)
X_important = pd.DataFrame(sfm.transform(X))
X_important.columns = feature_name
# model
split_size = round(len(X_important)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
# X_train,X_test = X_important[0:len(X_important)-split_size], X_important[len(X_important)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# prediction
y_test_pred=pd.DataFrame(model.predict(X_test))
test_pred=pd.concat([y_test,y_test_pred],axis=1)
improved_ytest[str(key)]=test_pred
improved_model[str(key)]=accuracy_score(y_test,y_test_pred)
precision, recall, fscore, support = score(y_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(y_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df= | pd.DataFrame(supports, columns=['support_hold','support_buy']) | pandas.DataFrame |
from sklearn.dummy import DummyClassifier
from sklearn.metrics import roc_auc_score
from bac.models.model_base import ModelBase
import pandas as pd
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
class DummyModel(ModelBase):
def __init__(self, **kwargs):
"""Majority Class kwargs: strategy="constant", constant=1
"""
super().__init__()
self.model = DummyClassifier(**kwargs)
self.params = kwargs
def do_fit(self, X_train: pd.DataFrame, y_train: pd.Series, **kwargs):
"""Train model. Extends model_base abstract class.
Can be used to fit a Majority Class or Random Model
Args:
X_train (pd.DataFrame): feature set (user-day)
y_train (pd.Series): targets. required, but not used.
**fit_args: kwargs for DummyClassifier.fit() method.
-- not used here, just for compatibility
"""
logging.info("\nTraining Dummy Model...")
logger.info(f"Model Params: \n{self.params}")
self.model.fit(X_train, y_train)
self.fitted = True
def save_training_info(self, X_train: pd.DataFrame):
"""Save columns used in training as instance variables
Args:
X_train (pd.DataFrame): feature set for training
"""
assert self.fitted
self.columns = X_train.columns.tolist()
self.n_userdays = len(X_train)
def do_predict(self, X: pd.DataFrame) -> pd.Series:
"""
Return probability scores [0, 1] for each row of X.
First checks that columns match self.columns
"""
scores = self.model.predict_proba(X.values)
if len(scores.shape) == 2:
scores = scores[:, 1]
return | pd.Series(scores, index=X.index) | pandas.Series |
from model.toolkits.parse_conf import parse_config_vina, parse_protein_vina, parse_ligand_vina
import os
import pandas as pd
import numpy as np
from pathlib import Path
import argparse
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import Descriptors, rdMolDescriptors, AllChem, QED
try:
from openbabel import pybel
except:
import pybel
# from metrics_utils import logP, QED, SA, weight, NP
from functools import partial
from multiprocessing import Pool
from tqdm.auto import tqdm
def walk_folder(path, suffix):
# processed = []
files = os.listdir(path)
print(f"{len(files)} files have been detected!")
outpdbqt = []
for file in files:
# print(file)
if suffix in file:
outpdbqt.append(file)
# base_name = os.path.basename(file)
# # print(base_name)
# simple_name = base_name.replace('_', '.').split('.')
# simple_name = simple_name[0]
# processed.append({'simple_name': simple_name,
# 'base_name': base_name, 'full_name': file})
# # print(processed)
return outpdbqt
def prepare_ecfp(args):
dataset = args.dataset
path = args.path
df = pd.read_csv(dataset)
# smi_df = pd.read_csv(args.smi)
# smi_df = smi_df.set_index('ChEMBL ID')
df['index'] = df['Molecule']
df = df.set_index('index')
# counts = 0
# for index in df.index:
# smi_index = index.strip().split("_")[0]
# counts += 1
# try:
# # print(smi_df.loc[smi_index, 'Smiles'])
# smiRaw = smi_df.loc[smi_index, 'Smiles']
# mol = pybel.readstring("smi", smiRaw)
# # strip salt
# mol.OBMol.StripSalts(10)
# mols = mol.OBMol.Separate()
# # print(pybel.Molecule(mols))
# mol = pybel.Molecule(mols[0])
# for imol in mols:
# imol = pybel.Molecule(imol)
# if len(imol.atoms) > len(mol.atoms):
# mol = imol
# smi_clean = mol.write('smi')
# smi_clean = smi_clean.replace('\n', '')
# smi_clean = smi_clean.split()[0]
# df.loc[index, 'smi'] = smi_clean
# print(f'NO.{counts}: {smi_clean} was processed successfully')
# except Exception as e:
# print(e)
# continue
df = df.dropna(axis=0, how='any')
smiList = df['smi']
index = df['Molecule']
# print(smiList)
new_index, ecfpList = [], []
for i in range(len(index)):
try:
smi = smiList[i]
if i % 1000 == 0:
print(f"index: {i}; smi= {smi}")
mol = Chem.MolFromSmiles(smi)
ecfp = AllChem.GetMorganFingerprintAsBitVect(
mol, 3, nBits=1024)
ecfp=[index[i]]+list(ecfp)
ecfpList.append(ecfp)
# new_index.append()
except:
continue
# molList = [Chem.MolFromSmiles(smi)
# for smi in smiList]
# ecfpList = [list(AllChem.GetMorganFingerprintAsBitVect(
# mol, 3, nBits=1024)) for mol in molList]
# print(ecfpList)
colName = ['index']+[f'ecfp{i}' for i in range(len(ecfpList[0])-1)]
# print(colName)
dfEcfp = pd.DataFrame(ecfpList, columns=colName)
# dfEcfp['index'] = new_index
dfEcfp = dfEcfp.set_index('index')
# print(dfEcfp)
# print(df)
dfEcfp = pd.concat([df, dfEcfp], axis=1)
dfEcfp = dfEcfp.dropna(axis=0, how='any')
suffix = '_ecfpSmi.csv'
outdf = dataset.replace('.csv', suffix)
# dfEcfp = dfEcfp.dropna(axis=0, how='any')
# if not os.path.exists(outdf):
dfEcfp.to_csv(outdf, index=False)
def prepare_DScorePPropIFP(args, getScore=True, getSMILES=True):
dataset = args.dataset
# smiDataset = args.smi
df = pd.read_csv(dataset)
df = df.set_index('Molecule')
smi_df = pd.read_csv(args.smi)
smi_df = smi_df.set_index('ChEMBL ID')
path = args.path
# index = df.index
counts = 0
if getSMILES:
for index in df.index:
smi_index = index.strip().split("_")[0]
try:
counts += 1
# print(smi_df.loc[smi_index, 'Smiles'])
smiRaw = smi_df.loc[smi_index, 'Smiles']
mol = pybel.readstring("smi", smiRaw)
# strip salt
mol.OBMol.StripSalts(10)
mols = mol.OBMol.Separate()
# print(pybel.Molecule(mols))
mol = pybel.Molecule(mols[0])
for imol in mols:
imol = pybel.Molecule(imol)
if len(imol.atoms) > len(mol.atoms):
mol = imol
smi_clean = mol.write('smi')
smi_clean = smi_clean.replace('\n', '')
smi_clean = smi_clean.split()[0]
df.loc[index, 'smi'] = smi_clean
print(f'NO.{counts}: {smi_clean} was processed successfully')
except Exception as e:
print(e)
continue
# df = df.dropna(axis=0, how='any')
if getScore:
files = walk_folder(path, '_out.pdbqt')
count = 0
for file in files:
count += 1
# if count > 10:
# break
print(f'count: {count}')
try:
# if 1:
outfile = os.path.join(path, file)
ligand_dic = parse_ligand_vina(outfile)
score = ligand_dic['scorelist']
filename = file.replace('_out.pdbqt', '')
cal_switch = 0
for pose_idx in range(5):
df.loc[f'{filename}_{pose_idx}',
'score_0'] = score[pose_idx]
smi = df.loc[f'{filename}_{pose_idx}', 'smi']
print(smi)
if cal_switch < 1:
mol = Chem.MolFromSmiles(smi)
logp = Descriptors.MolLogP(mol)
tpsa = Descriptors.TPSA(mol)
molwt = Descriptors.ExactMolWt(mol)
hba = rdMolDescriptors.CalcNumHBA(mol)
hbd = rdMolDescriptors.CalcNumHBD(mol)
qed = QED.qed(mol)
cal_switch = 3
df.loc[f'{filename}_{pose_idx}', 'logP'] = logp
df.loc[f'{filename}_{pose_idx}', 'TPSA'] = tpsa
df.loc[f'{filename}_{pose_idx}', 'MW'] = molwt
df.loc[f'{filename}_{pose_idx}', 'HBA'] = hba
df.loc[f'{filename}_{pose_idx}', 'HBD'] = hbd
df.loc[f'{filename}_{pose_idx}', 'QED'] = qed
# logp = logP(mol)
# df.loc[filename, 'logP'] = logp
# qed = QED(mol)
# df.loc[filename, 'QED'] = qed
# sa = SA(mol)
# df.loc[filename, 'SA'] = sa
# wt = weight(mol)
# df.loc[filename, 'Wt'] = wt
# np = NP(mol)
# df.loc[filename, 'NP'] = np
except Exception as e:
print(e)
continue
suffix = '_dScorePP.csv'
# df = df.sort_values(by='score_0', ascending=True)
outdf = dataset.replace('.csv', suffix)
df = df.dropna(axis=0, how='any')
# df['score_0'] = df['score_0'].astype(float)
# if not os.path.exists(outdf):
df.to_csv(outdf, index=True)
def prepare_IFPsmi(args, getScore=False, getSMILES=True):
dataset = args.dataset
# smiDataset = args.smi
df = | pd.read_csv(dataset) | pandas.read_csv |
import os
import pandas as pd
import argparse
from argparse import ArgumentParser
from datetime import timedelta
from datetime import datetime
from sklearn.model_selection import train_test_split
ARG_PARSER = ArgumentParser()
ARG_PARSER.add_argument("--test_size", default=0.1, type=float)
ARG_PARSER.add_argument("--val_size", default=0.05, type=float)
ARG_PARSER.add_argument("--seq_len", default=20, type=float)
ARG_PARSER.add_argument("--target_field", default="zigduino-3:temperature", type=str)
ARG_PARSER.add_argument("--path_folder", default='/scratch/stoll/BiGAN/data/ibat', type=str)
ARG_PARSER.add_argument("--dataset", default='raw_results_demo.csv', type=str)
ARGS = ARG_PARSER.parse_args(args=[])
# decay
def decay(data=None, seq_len=ARGS.seq_len, target_field=ARGS.target_field):
data['interval'] = 0
j = 0
for n in range(int(data.shape[0] / seq_len)):
i = 0
df_group = data.iloc[n * seq_len:(n * seq_len) + seq_len, :]
for index, row in df_group.iterrows(): # go over mask
try:
if(i == 0):
row['interval'] = 0
i = 1
else:
if(prev[target_field] == 1):
row['interval'] = timedelta.total_seconds(datetime.strptime(str(row['Date'])[:10] + " " + str(row['Time']), "%Y-%m-%d %H:%M:%S")
- datetime.strptime(str(prev['Date'])[:10] + " " + str(prev['Time']), "%Y-%m-%d %H:%M:%S"))
elif(prev[target_field] == 0):
row['interval'] = timedelta.total_seconds(datetime.strptime(str(row['Date'])[:10] + " " + str(row['Time']), "%Y-%m-%d %H:%M:%S")
- datetime.strptime(str(prev['Date'])[:10] + " " + str(prev['Time']), "%Y-%m-%d %H:%M:%S")) + prev['interval']
except ValueError as e:
print(e)
print(str(row['Date']) + " " + str(row['Time']))
break
prev = row
data.iloc[j, 3] = row['interval']
j = j + 1
data['interval'] = data['interval'].apply(lambda x: abs(x / 60))
return data
def rdecay(data=None, seq_len=ARGS.seq_len, target_field=ARGS.target_field):
data['intervalReverse'] = 0
j = data.shape[0] - 1
for n in range(int(data.shape[0] / seq_len)):
i = 0
df_group = data.iloc[n * seq_len:(n * seq_len) + seq_len, :]
df_group = df_group[::-1]
for index, row in df_group.iterrows(): # go over mask
if(i == 0):
row['intervalReverse'] = 0
i = 1
else:
if(prev[target_field] == 1):
row['intervalReverse'] = timedelta.total_seconds(datetime.strptime(str(row['Date'])[:10] + " " + str(row['Time']), "%Y-%m-%d %H:%M:%S")
- datetime.strptime(str(prev['Date'])[:10] + " " + str(prev['Time']), "%Y-%m-%d %H:%M:%S"))
elif(prev[target_field] == 0):
row['intervalReverse'] = timedelta.total_seconds(datetime.strptime(str(row['Date'])[:10] + " " + str(row['Time']), "%Y-%m-%d %H:%M:%S")
- datetime.strptime(str(prev['Date'])[:10] + " " + str(prev['Time']), "%Y-%m-%d %H:%M:%S")) + prev['interval']
prev = row
data.iloc[j, 4] = row['intervalReverse']
j = j - 1
data['intervalReverse'] = data['intervalReverse'].apply(lambda x: abs(x / 60))
return data
def read_dataset(path_folder=ARGS.path_folder, dataset=ARGS.dataset):
path_dataset = os.path.join(path_folder, "initial", dataset)
dataset = pd.read_csv(path_dataset, header=0).fillna(-200)
dataset["timestamp"] = pd.to_datetime(dataset["date_format"])
dataset["Date"] = pd.to_datetime(dataset["timestamp"].dt.date, utc=False)
dataset["Time"] = dataset["timestamp"].dt.time.astype(str)
dataset["Month"] = dataset.Date.dt.month
cols = dataset.columns.to_list()
cols.remove("Date")
cols.remove("Time")
cols.remove("Month")
cols = ["Date", "Time", "Month"] + cols
dataset = dataset[cols]
dataset.drop(["date_format", "timestamp"], axis=1, inplace=True)
return dataset
def add_epoch(dataset=None, mask=None):
mask[['epoch_format']] = dataset[['epoch_format']].copy()
return mask
def group_by_seq(dataset=None, seq_len=ARGS.seq_len):
rows = dataset.groupby('Month').count()['Date'] % seq_len
rows = pd.DataFrame(rows)
rows = rows.reset_index()
final = pd.DataFrame()
for seq in range(rows.shape[0]):
temp = dataset[dataset['Month'] == rows.iloc[seq, 0]]
nrows = temp.shape[0] - rows.iloc[seq, 1]
temp = temp.iloc[0:nrows, :]
final = | pd.concat([final, temp]) | pandas.concat |
import time
import pandas as pd
import copy
import numpy as np
from shapely import affinity
from shapely.geometry import Polygon
import geopandas as gpd
def cal_arc(p1, p2, degree=False):
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
arc = np.pi - np.arctan2(dy, dx)
return arc / np.pi * 180 if degree else arc
def helper_print_with_time(*arg, sep=','):
print(time.strftime("%H:%M:%S", time.localtime()), sep.join(map(str, arg)))
def cal_euclidean(p1, p2):
return np.linalg.norm([p1[0] - p2[0], p1[1] - p2[1]])
def get_shape_mbr(df_shape):
oid = 'OID' if 'FID' in df_shape.columns else 'OBJECTID'
df_mbr = copy.deepcopy(df_shape[[oid, 'geometry']])
df_mbr.reset_index(drop=True, inplace=True)
df_mbr['geometry'] = pd.Series([geo.minimum_rotated_rectangle for geo in df_mbr['geometry']])
df_mbr['xy'] = pd.Series([list(geo.exterior.coords) for geo in df_mbr['geometry']])
#
df_mbr['x0'] = pd.Series([xy[0][0] for xy in df_mbr['xy']])
df_mbr['x1'] = pd.Series([xy[1][0] for xy in df_mbr['xy']])
df_mbr['x2'] = pd.Series([xy[2][0] for xy in df_mbr['xy']])
df_mbr['y0'] = pd.Series([xy[0][1] for xy in df_mbr['xy']])
df_mbr['y1'] = pd.Series([xy[1][1] for xy in df_mbr['xy']])
df_mbr['y2'] = pd.Series([xy[2][1] for xy in df_mbr['xy']])
#
df_mbr['l1'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['l2'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
df_mbr['a1'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['a2'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
#
df_mbr['longer'] = df_mbr['l1'] >= df_mbr['l2']
#
df_mbr['lon_len'] = pd.Series([l1 if longer else l2 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['short_len'] = pd.Series([l2 if longer else l1 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['lon_arc'] = pd.Series([a1 if longer else a2 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values])
df_mbr['short_arc'] = pd.Series([a2 if longer else a1 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values])
df_mbr.drop(['x0', 'x1', 'x2', 'y0', 'y1', 'y2', 'l1', 'l2', 'a1', 'a2'], axis=1, inplace=True)
#
df_shape = pd.merge(df_shape, df_mbr[[oid, 'lon_len', 'short_len', 'lon_arc', 'short_arc']], how='left', on=oid)
return df_mbr, df_shape
def get_shape_normalize_final(df_use, if_scale_y):
df_use = copy.deepcopy(df_use)
#
df_use['mu_x'] = pd.Series([geo.centroid.x for geo in df_use['geometry']])
df_use['mu_y'] = pd.Series([geo.centroid.y for geo in df_use['geometry']])
df_use['geometry'] = pd.Series(
[affinity.translate(geo, -mx, -my) for mx, my, geo in df_use[['mu_x', 'mu_y', 'geometry']].values])
df_use['x_max'] = pd.Series([max(geo.exterior.xy[0]) for geo in df_use['geometry']])
df_use['x_min'] = pd.Series([min(geo.exterior.xy[0]) for geo in df_use['geometry']])
df_use['scale_x'] = (df_use['x_max'] - df_use['x_min'])
df_use['y_max'] = pd.Series([max(geo.exterior.xy[1]) for geo in df_use['geometry']])
df_use['y_min'] = pd.Series([min(geo.exterior.xy[1]) for geo in df_use['geometry']])
df_use['scale_y'] = (df_use['y_max'] - df_use['y_min'])
if if_scale_y:
df_use['geometry'] = pd.Series(
[affinity.scale(geo, 1 / del_x, 1 / del_y, origin='centroid') for del_x, del_y, geo in
df_use[['scale_x', 'scale_y', 'geometry']].values])
else:
df_use['geometry'] = pd.Series([affinity.scale(geo, 1 / del_x, 1 / del_x, origin='centroid') for del_x, geo in
df_use[['scale_x', 'geometry']].values])
df_use.drop(['mu_x', 'mu_y', 'scale_x', 'scale_y', 'x_max', 'x_min', 'y_max', 'y_min'], axis=1, inplace=True)
return df_use
def simplify_cos_on_node(df_node, tor_cos):
oid = 'OBJECTID'
df_line = copy.deepcopy(df_node)
#
df_line = df_line[df_line['PID'] != 0].reset_index(drop=True)
df_line['PID'] = df_line['PID'] - 1
#
coor_dic = {(int(oid), int(pid)): [x, y] for oid, pid, x, y in df_line[['OBJECTID', 'PID', 'x', 'y']].values}
df_line['x_l'] = pd.Series([coor_dic[(oid, (pid - 1 if pid >= 1 else pnum - 2))][0] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['y_l'] = pd.Series([coor_dic[(oid, (pid - 1 if pid >= 1 else pnum - 2))][1] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['x_r'] = pd.Series([coor_dic[(oid, (pid + 1 if pid < (pnum - 2) else 0))][0] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['y_r'] = pd.Series([coor_dic[(oid, (pid + 1 if pid < (pnum - 2) else 0))][1] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
#
df_line['dx_l'] = pd.Series([x - xl for x, xl in df_line[['x', 'x_l']].values])
df_line['dy_l'] = pd.Series([y - yl for y, yl in df_line[['y', 'y_l']].values])
df_line['dx_r'] = pd.Series([xr - x for x, xr in df_line[['x', 'x_r']].values])
df_line['dy_r'] = pd.Series([yr - y for y, yr in df_line[['y', 'y_r']].values])
df_line['cos'] = pd.Series(
[(dxl * dxr + dyl * dyr) / (np.sqrt(dxl * dxl + dyl * dyl) * np.sqrt(dxr * dxr + dyr * dyr)) for
dxl, dyl, dxr, dyr in df_line[['dx_l', 'dy_l', 'dx_r', 'dy_r']].values])
#
df_line = df_line[df_line['cos'] <= tor_cos].reset_index(drop=True)
#
df_line = reset_node_PID(df_line)
return df_line
def reset_node_PID(df_node):
oid = 'OBJECTID'
df_node.reset_index(inplace=True, drop=False)
dft = df_node.groupby([oid], as_index=False)['index'].agg({'id_min': 'min'})
df_node = pd.merge(df_node, dft, how='left', on=oid)
df_node['PID'] = df_node['index'] - df_node['id_min']
df_node.drop(['index', 'id_min'], axis=1, inplace=True)
return df_node
def node_to_polygon(df_node):
df_node['xy'] = pd.Series([(x, y) for x, y in df_node[['x', 'y']].values])
dft = df_node.groupby(['OBJECTID'], as_index=True)['xy'].apply(list)
dft = dft.reset_index(drop=False)
dft['geometry'] = pd.Series([Polygon(xy) for xy in dft['xy']])
dft = gpd.GeoDataFrame(dft)
return dft
def get_shape_simplify(df_rotate, tor_dist, tor_cos, simplify_type=1):
df_node = get_node_features(df_rotate)
df_node = simplify_cos_on_node(df_node, tor_cos)
df_poly = node_to_polygon(df_node)
df_poly = pd.merge(df_poly, df_rotate[['OBJECTID', 'lon_len', 'short_len']], how='left', on='OBJECTID')
df_poly['diag'] = pd.Series([w * h / np.sqrt(w * w + h * h) for w, h in df_poly[['lon_len', 'short_len']].values])
df_poly = simplify_dp_on_shape(df_poly, tor_dist, type=simplify_type)
df_poly.drop(['xy', 'diag'], axis=1, inplace=True)
return df_poly
def simplify_dp_on_shape(df_shape, tor=0.000001, type=1):
if type == 1:
df_shape['geometry'] = pd.Series(
[geo.simplify(tor * diag) for geo, diag in df_shape[['geometry', 'diag']].values])
else:
df_shape['geometry'] = df_shape['geometry'].simplify(tor)
return df_shape
def reset_start_point(df_poly):
dfq = copy.deepcopy(df_poly)
dfn = get_node_features(dfq).query('PID!=0')
dfn['s'] = dfn['x'] + dfn['y']
dft = dfn.groupby(['OBJECTID'], as_index=False)['s'].agg({'s_min': 'min'})
dfn = pd.merge(dfn, dft, how='left', on='OBJECTID')
dfn['ds'] = abs(dfn['s'] - dfn['s_min'])
dfn['flag'] = dfn['ds'] < 10e-10
dft = dfn.sort_values(['OBJECTID', 'flag'], ascending=False).groupby(['OBJECTID']).head(1)
dic_temp = {row['OBJECTID']: row['PID'] for index, row in dft.iterrows()}
dfn = dfn.set_index('PID')
dft = [pd.concat([group.loc[dic_temp[index]:], group.loc[:dic_temp[index]]], axis=0)
for index, group in dfn.groupby('OBJECTID')
]
dfn = pd.concat(dft, axis=0).reset_index(drop=False)
dfn = node_to_polygon(dfn)
cols = [x for x in dfq.columns if 'geometry' not in x]
dfn = pd.merge(dfq[cols], dfn[['OBJECTID', 'geometry']], how='left', on='OBJECTID')
dfn = gpd.GeoDataFrame(dfn)
return dfn
def get_node_features(df_shape):
oid = 'OBJECTID'
df_shape['xy'] = pd.Series(['|'.join(map(str, geo.exterior.coords)) for geo in df_shape['geometry']])
df_shape['p_num'] = pd.Series([geo.exterior.coords.__len__() for geo in df_shape['geometry']])
df_shape['length'] = df_shape['geometry'].exterior.length
df_node = (df_shape.set_index([oid, 'p_num', 'length'])['xy']
.str.split('|', expand=True)
.stack()
.reset_index(level=3, drop=True)
.reset_index(name='xy'))
df_node['x'] = pd.Series([float(xy.split(',')[0][1:]) for xy in df_node['xy'].values])
df_node['y'] = pd.Series([float(xy.split(',')[1][:-1]) for xy in df_node['xy'].values])
#
df_node.reset_index(inplace=True, drop=False)
dft = df_node.groupby([oid], as_index=False)['index'].agg({'id_min': 'min'})
df_node = pd.merge(df_node, dft, how='left', on=oid)
df_node['PID'] = df_node['index'] - df_node['id_min']
df_node.drop(['xy', 'id_min', 'index'], axis=1, inplace=True)
return df_node
def get_line_features_final(df_node, POINTS_SHAPE=20):
#
df_line = copy.deepcopy(df_node)
#
df_line['next_x'] = df_line.groupby(['OBJECTID'], as_index=False)['x'].shift(-1)
df_line['next_y'] = df_line.groupby(['OBJECTID'], as_index=False)['y'].shift(-1)
df_line['dx'] = df_line['next_x'] - df_line['x']
df_line['dy'] = df_line['next_y'] - df_line['y']
df_line['dl'] = | pd.Series([(dx ** 2 + dy ** 2) ** 0.5 for dx, dy in df_line[['dx', 'dy']].values]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import argparse
import os
import glob
import itertools
from pathlib import Path
from typing import Dict, List, Tuple
from collections import defaultdict
import json
import time
import logging
import random
import pandas as pd
import numpy as np
import re
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateLogger
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
T5ForConditionalGeneration,
T5Tokenizer,
get_linear_schedule_with_warmup
)
from metrics import (
calculate_rouge,
calculate_bleu,
calculate_meteor,
calculate_chrf
)
logger = logging.getLogger(__name__)
class WebNLG(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length,
max_target_length, type_path, prefix="", **dataset_kwargs):
super().__init__()
self.tokenizer = tokenizer
self.prefix = prefix if prefix is not None else ""
self.pad_token_id = self.tokenizer.pad_token_id
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.inputs = []
self.targets = []
self.dataset_kwargs = dataset_kwargs
self._build(type_path)
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_line = self.prefix + str(self.inputs[index]).rstrip("\n")
tgt_line = str(self.targets[index]).rstrip("\n")
return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index}
def collate_fn(self, batch):
batch_encoding = self.tokenizer.prepare_seq2seq_batch(
[x["src_texts"] for x in batch],
tgt_texts=[x["tgt_texts"] for x in batch],
max_length=self.max_source_length,
max_target_length=self.max_target_length,
return_tensors="pt",
**self.dataset_kwargs,
).data
batch_encoding["ids"] = torch.tensor([x["id"] for x in batch])
return batch_encoding
def _build(self, type_path):
if type_path == 'train':
df = pd.read_csv('Datasets/webnlg_train.csv')
elif type_path == 'eval':
df = pd.read_csv('Datasets/webnlg_dev.csv')
else:
df = pd.read_csv('Datasets/webnlg_test.csv')
for index, row in df.iterrows():
line = row['input_text']
target = row['target_text']
self.inputs.append(line)
self.targets.append(target)
class DART(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length,
max_target_length, type_path, prefix="", **dataset_kwargs):
super().__init__()
self.tokenizer = tokenizer
self.prefix = prefix if prefix is not None else ""
self.pad_token_id = self.tokenizer.pad_token_id
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.inputs = []
self.targets = []
self.dataset_kwargs = dataset_kwargs
self._build(type_path)
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_line = self.prefix + str(self.inputs[index]).rstrip("\n")
tgt_line = str(self.targets[index]).rstrip("\n")
return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index}
def collate_fn(self, batch):
batch_encoding = self.tokenizer.prepare_seq2seq_batch(
[x["src_texts"] for x in batch],
tgt_texts=[x["tgt_texts"] for x in batch],
max_length=self.max_source_length,
max_target_length=self.max_target_length,
return_tensors="pt",
**self.dataset_kwargs,
).data
batch_encoding["ids"] = torch.tensor([x["id"] for x in batch])
return batch_encoding
def _build(self, type_path):
if type_path == 'train':
df = | pd.read_csv('Datasets/dart_train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 12:51:40 2019
@author: 561719
"""
##########################Data Normalization#######################################################
import pandas as pd
import numpy as np
R1=pd.read_csv("C:\\Users\\561719\\Documents\\Imarticus_MLP\\NYC_property_sales\\nyc-rolling-sales.csv")
R2=R1.iloc[:,1:]
print(R2.head())
# Data Nromalization
R2.replace(to_replace=' - ',value='NA',inplace=True)
R2.head()
sp=R2.loc[R2['SALE PRICE'] != 'NA']
sp1=sp.loc[R2['LAND SQUARE FEET'] != 'NA']
sp2=sp1.loc[R2['GROSS SQUARE FEET'] != 'NA']
sp2['SALE PRICE']=pd.to_numeric(sp2['SALE PRICE'])
sp2['LAND SQUARE FEET']=pd.to_numeric(sp2['LAND SQUARE FEET'])
sp2['GROSS SQUARE FEET']=pd.to_numeric(sp2['GROSS SQUARE FEET'])
mean_sp=int(sp2['SALE PRICE'].mean())
mean_lsq=int(sp2['LAND SQUARE FEET'].mean())
mean_gsq=int(sp2['GROSS SQUARE FEET'].mean())
R2['SALE PRICE'].replace(to_replace='NA',value=mean_sp,inplace=True)
R2['LAND SQUARE FEET'].replace(to_replace='NA',value=mean_lsq,inplace=True)
R2['GROSS SQUARE FEET'].replace(to_replace='NA',value=mean_gsq,inplace=True)
R2.dtypes
R2['SALE PRICE']=pd.to_numeric(R2['SALE PRICE'])
R2['LAND SQUARE FEET']=pd.to_numeric(R2['LAND SQUARE FEET'])
R2['GROSS SQUARE FEET']=pd.to_numeric(R2['GROSS SQUARE FEET'])
R2.dtypes
R2['SALE DATE']= | pd.to_datetime(R2['SALE DATE']) | pandas.to_datetime |
"""Test cases for Streamlit app functionality."""
import sqlite3
import unittest
import pandas as pd
from mock import patch
from strigiform.app.streamlit import add_line_break
from strigiform.app.streamlit import get_data
from strigiform.app.streamlit import get_period_stats
class Streamlit(unittest.TestCase):
"""Testing class."""
def get_test_data(self):
"""Test data select."""
self.conn = sqlite3.connect("./tests/mock_db.db")
# Read mock data and insert into mock db
test_data = pd.read_csv("./tests/mock_data.csv")
test_data.to_sql("mock_data", self.conn, if_exists="replace")
self.sql_query = """select * from mock_data"""
self.sql_query_empty = (
"""select * from mock_data where scientific_name = 'mock'"""
)
self.sql_query_no_date = """select scientific_name from mock_data"""
self.df = get_data(self.sql_query, self.conn)
self.df_empty = pd.DataFrame()
self.df_no_date = self.df.drop(["date"], axis=1)
def test_get_data(self):
"""Test for get_data function of Streamlit."""
self.get_test_data()
df = get_data(self.sql_query, self.conn)
assert len(df) > 0
assert len(df.columns) > 0
def test_data_length(self):
"""Test for get_data function of Streamlit."""
self.get_test_data()
with self.assertRaises(ValueError):
get_data(self.sql_query_empty, self.conn)
def test_date_column(self):
"""Test for get_data function of Streamlit."""
self.get_test_data()
with self.assertRaises(AttributeError):
get_data(self.sql_query_no_date, self.conn)
def test_get_period_stats_date(self):
"""Test for retreiving period statistics."""
self.get_test_data()
with self.assertRaises(ValueError):
get_period_stats(
self.df, pd.to_datetime("2021-02-01"), pd.to_datetime("2021-01-01")
)
def test_get_period_stats_df(self):
"""Test for retreiving period statistics."""
self.get_test_data()
with self.assertRaises(AttributeError):
get_period_stats(
self.df_empty,
| pd.to_datetime("2021-01-01") | pandas.to_datetime |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads( | ujson.dumps(labelled_input) | pandas._libs.json.dumps |
import preprocessor as p
import re
import wordninja
import csv
import pandas as pd
# Data Loading
def load_data(filename):
filename = [filename]
concat_text = | pd.DataFrame() | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
import numpy as np
import pandas_validator as pv
from pandas_validator.core.exceptions import ValidationError
class BaseSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.BaseSeriesValidator(series_type=np.int64)
def test_is_valid_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_should_return_true_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertIsNone(self.validator.validate(series))
def test_should_return_false_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertRaises(ValidationError, self.validator.validate, series)
class IntegerSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.IntegerSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0, 1, 2])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-1, 0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0, 1, 2, 3])
self.assertFalse(self.validator.is_valid(series))
class FloatSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.FloatSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = | pd.Series([0., 1., 2.]) | pandas.Series |
#!/usr/bin/env python3
# Converts PLINK covariate and fam file into a covariate file for Gemma
import sys
import pandas as pd
import argparse
import numpy as np
EOL=chr(10)
def parseArguments():
parser = argparse.ArgumentParser(description='fill in missing bim values')
parser.add_argument('--inp_fam',type=str,required=True)
parser.add_argument('--data',type=str,required=True,help="File with phenotype and covariate data")
parser.add_argument('--cov_list', type=str,help="comma separated list of covariates",default="")
parser.add_argument('--pheno',type=str,required=True,help="comma separated list of pheno column")
parser.add_argument('--phe_out', type=str,help="output fam file")
parser.add_argument('--cov_out', type=str,help="output covariate file")
parser.add_argument('--gxe_out', type=str,help="output gxe file (gemma use)")
parser.add_argument('--gxe', type=str,help="gxe covariate (gemma use)")
parser.add_argument('--form_out', type=int,help="format output : 1:Gemma, 2:boltlmm, 3:FastLmm", required=True)
args = parser.parse_args()
return args
def check_missing(x, MissingOut):
if x in ["NA","na","null","-9"] or ((type(x)==type("x")) and (len(x)<1)) or x==-9 or x==r"\N" or x=="-":
return MissingOut
else:
return x
def getColNames(label_str):
col_names = []
col_fns = []
for lab in label_str:
det = lab.split("/")
if len(det)>1:
col_names.append(det[0])
col_fns.append(eval(det[1]))
else:
col_names.append(lab)
col_fns.append(False)
return col_names,col_fns
def errorMessage10(phe):
print("""
A problem has been detected in file <%s> column <%s>.
There is some invalid data. I regret I can't tell you which row.
Please check -- the data should be numeric only.
If there is missing data, please use NA
"""%(args.data,phe))
args = parseArguments()
TAB =chr(9)
phenos = args.pheno.split(",")
use= ["FID","IID"]
useI = ["FID","IID"]
if args.cov_list:
covariates = args.cov_list.split(",")
use = useI+covariates
else:
covariates = []
if args.gxe:
gxe=[args.gxe]
gxe_use = useI+gxe
else :
gxe=[]
if args.form_out==1 :
MissingOut="NA"
elif args.form_out==2:
MissingOut="NA"
elif args.form_out==3:
MissingOut="-9"
else :
print("--form_out : "+str(args.form_out)+" not define")
sys.exit(11)
pheno_labels, pheno_transform = getColNames(phenos)
covar_labels, cover_transform = getColNames(use)
gxe_labels, gxe_transform = getColNames(gxe)
usecols = covar_labels+pheno_labels+gxe_labels
datad = | pd.read_csv(args.data,delim_whitespace=True,usecols=usecols) | pandas.read_csv |
import pandas as pd
import yaml
import os
from . import DATA_FOLDER, SCHEMA, SYNONYM_RULES
def run(
rule_file: str = SYNONYM_RULES,
schema_file: str = SCHEMA,
data_folder: str = DATA_FOLDER,
):
"""Add rules to capture more terms as synonyms during named entity
recognition (NER)
:param rule_file: YAML file that contains the rules.,
defaults to SYNONYM_RULES
:param schema_file: YAML file that provides schema., defaults to SCHEMA
:param data_folder: Data folder where the input termlists are located and
the ouput files are saved.,
defaults to DATA_FOLDER
"""
with open(rule_file, "r") as rules, open(schema_file, "r") as sf:
try:
rule_book = yaml.safe_load(rules)
schema = yaml.safe_load(sf)
prefix_cols = ["id", "text"]
rules_cols = schema["classes"]["Rule"]["slots"]
prefix_df = pd.DataFrame(columns=prefix_cols)
rules_df = pd.DataFrame(columns=rules_cols)
terms_cols = [
"cui",
"source",
"id",
"match_term",
"preferred_term",
"category",
]
for key, value in rule_book["prefixes"].items():
row = pd.DataFrame([[value, key]], columns=prefix_cols)
prefix_df = | pd.concat([prefix_df, row]) | pandas.concat |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import re
import sys
import pandas as pd
import numpy as np
import glob
from sklearn.feature_extraction.text import CountVectorizer
from evaluation.experiment import Experiment
def convert_argmin(x):
label = x.split('-')[0]
if label == 'I':
return 0
if label == 'O':
return 1
if label == 'B':
return 2
def convert_7class_argmin(x):
label = x.split('-')[0]
if label == 'I':
label = x.split('-')[1].split(':')[0]
if label == 'MajorClaim':
return 0
elif label == 'Claim':
return 3
elif label == 'Premise':
return 5
if label == 'O':
return 1
if label == 'B':
label = x.split('-')[1].split(':')[0]
if label == 'MajorClaim':
return 2
elif label == 'Claim':
return 4
elif label == 'Premise':
return 6
def convert_crowdsourcing(x):
if x == 'Premise-I':
return 0
elif x == 'O':
return 1
elif x== 'Premise-B':
return 2
else:
return -1
def load_argmin_data():
path = '../../data/bayesian_sequence_combination/data/argmin/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "*.dat.out"))
df_from_each_file = (pd.read_csv(f, sep='\t', usecols=(0, 5, 6), converters={5:convert_argmin, 6:convert_argmin},
header=None, quoting=3) for f in all_files)
concatenated = pd.concat(df_from_each_file, ignore_index=True, axis=1).as_matrix()
annos = concatenated[:, 1::3]
for t in range(1, annos.shape[0]):
annos[t, (annos[t-1, :] == 1) & (annos[t, :] == 0)] = 2
gt = concatenated[:, 2][:, None]
doc_start = np.zeros((annos.shape[0], 1))
doc_start[np.where(concatenated[:, 0] == 1)] = 1
# correct the base classifiers
non_start_labels = [0]
start_labels = [2] # values to change invalid I tokens to
for l, label in enumerate(non_start_labels):
start_annos = annos[doc_start.astype(bool).flatten(), :]
start_annos[start_annos == label] = start_labels[l]
annos[doc_start.astype(bool).flatten(), :] = start_annos
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/gt.csv', gt, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/doc_start.csv', doc_start, fmt='%s', delimiter=',')
return gt, annos, doc_start
def load_argmin_7class_data():
path = '../../data/bayesian_sequence_combination/data/argmin/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "*.dat.out"))
df_from_each_file = (pd.read_csv(f, sep='\t', usecols=(0, 5, 6), converters={5:convert_7class_argmin,
6:convert_7class_argmin}, header=None, quoting=3) for f in all_files)
concatenated = pd.concat(df_from_each_file, ignore_index=True, axis=1).as_matrix()
annos = concatenated[:, 1::3]
gt = concatenated[:, 2][:, None]
doc_start = np.zeros((annos.shape[0], 1))
doc_start[np.where(concatenated[:, 0] == 1)] = 1
# correct the base classifiers
non_start_labels = [0, 3, 5]
start_labels = [2, 4, 6] # values to change invalid I tokens to
for l, label in enumerate(non_start_labels):
start_annos = annos[doc_start.astype(bool).flatten(), :]
start_annos[start_annos == label] = start_labels[l]
annos[doc_start.astype(bool).flatten(), :] = start_annos
outpath = '../../data/bayesian_sequence_combination/data/argmin7/'
if not os.path.isdir(outpath):
os.mkdir(outpath)
np.savetxt(outpath + 'annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt(outpath + 'gt.csv', gt, fmt='%s', delimiter=',')
np.savetxt(outpath + 'doc_start.csv', doc_start, fmt='%s', delimiter=',')
return gt, annos, doc_start
def load_crowdsourcing_data():
path = '../../data/bayesian_sequence_combination/data/crowdsourcing/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "exported*.csv"))
print(all_files)
convs = {}
for i in range(1,50):
convs[i] = convert_crowdsourcing
df_from_each_file = [pd.read_csv(f, sep=',', header=None, skiprows=1, converters=convs) for f in all_files]
concatenated = pd.concat(df_from_each_file, ignore_index=False, axis=1).as_matrix()
concatenated = np.delete(concatenated, 25, 1);
annos = concatenated[:,1:]
doc_start = np.zeros((annos.shape[0],1))
doc_start[0] = 1
for i in range(1,annos.shape[0]):
if '_00' in str(concatenated[i,0]):
doc_start[i] = 1
np.savetxt('../../data/bayesian_sequence_combination/data/crowdsourcing/gen/annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/crowdsourcing/gen/doc_start.csv', doc_start, fmt='%s', delimiter=',')
return annos, doc_start
def build_feature_vectors(text_data_arr):
text_data_arr = np.array(text_data_arr).astype(str)
vectorizer = CountVectorizer()
count_vectors = vectorizer.fit_transform(text_data_arr) # each element can be a sentence or a single word
count_vectors = count_vectors.toarray() # each row will be a sentence
return count_vectors, vectorizer.get_feature_names()
def _load_pico_feature_vectors_from_file(corpus):
all_text = []
for docid in corpus.docs:
text_d = corpus.get_doc_text(docid)
all_text.append(text_d)
feature_vecs, _ = build_feature_vectors(all_text)
return feature_vecs
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.expanduser("../../data/bayesian_sequence_combination/data/bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['text'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['text'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = pd.concat([all_data, doc_data], axis=0)
# print('breaking for fast debugging')
# break
return all_data, workerids
def load_biomedical_data(regen_data_files, debug_subset_size=None):
savepath = '../../data/bayesian_sequence_combination/data/bio/'
if not os.path.isdir(savepath):
os.mkdir(savepath)
if regen_data_files or not os.path.isfile(savepath + '/annos.csv'):
anno_path_root = '../../data/bayesian_sequence_combination/data/bio-PICO/annotations'
# There are four folders here:
# acl17-test: the only one containing 'professional' annotations. 191 docs
# train: 3549 docs
# dev: 500 docs
# test: 500 docs
# Total of 4740 is slightly fewer than the values stated in the paper.
# The validation/test split in the acl17-test data is also not given. This suggests we may need to run the
# HMMCrowd and LSTMCrowd methods with hyperparameter tuning on our own splits. Let's skip that tuning for now?
# Cite Nils' paper about using a generic hyperparameter tuning that works well across tasks -- we need to do
# this initially because we don't have gold data to optimise on.
# Nguyen et al do only light tuning with a few (less than 5) values to choose from for each hyperparameter of
# HMM-Crowd, LSTM-Crowd and the individual LSTMs. Not clear whether the values set in the code as default are
# the chosen values -- let's assume so for now. We can re-tune later if necessary. Remember: we don't require
# a validation set for tuning our methods.
# We need for task1 and task2:
# train, dev and test splits.
# I believe the acl17-test set was split to form the dev and test sets in nguyen et al.
# Task 1 does not require separate training samples -- it's trained on crowdsourced rather than professional labels.
# Task 2 requires testing on separate samples (with gold labels)
# from the training samples (with crowd labels).
# Both tasks use all training data for training and the acl17-test set for validation/testing.
# These other splits into the train, test and dev folders appear to relate to a different set of experiments
# and are not relevant to nguyen et al 2017.
folders_to_load = ['acl17-test', 'train', 'test', 'dev']
all_data = None
all_workerids = None
for folder in folders_to_load:
print('Loading folder %s' % folder)
folder_data, workerids = _load_bio_folder(anno_path_root, folder)
if all_data is None:
all_data = folder_data
all_workerids = workerids
else:
all_data = pd.concat([all_data, folder_data])
all_workerids = np.unique(np.append(workerids.flatten(), all_workerids.flatten()))
all_data.to_csv(savepath + '/annos.csv', columns=all_workerids, header=False, index=False)
all_data.to_csv(savepath + '/gt.csv', columns=['gold'], header=False, index=False)
all_data.to_csv(savepath + '/doc_start.csv', columns=['doc_start'], header=False, index=False)
all_data.to_csv(savepath + '/text.csv', columns=['text'], header=False, index=False)
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None, nrows=debug_subset_size)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
print('loading text data...')
text = pd.read_csv(savepath + './text.csv', skip_blank_lines=False, header=None, nrows=debug_subset_size)
text = text.fillna(' ').values
print('loading doc starts...')
doc_start = pd.read_csv(savepath + '/doc_start.csv', header=None, nrows=debug_subset_size).values #np.genfromtxt(savepath + '/doc_start.csv')
print('Loaded %i documents' % np.sum(doc_start))
print('loading ground truth labels...')
gt = pd.read_csv(savepath + '/gt.csv', header=None, nrows=debug_subset_size).values # np.genfromtxt(savepath + '/gt.csv')
# # debug subset
# # crowd_labelled[int(np.round(0.01 * len(crowd_labelled) )):] = False
# annos = pd.read_csv(savepath + './annos_debug.csv', skip_blank_lines=False, header=None)
# annos = annos.fillna(-1)
# annos = annos.values
#
# text = pd.read_csv(savepath + './text_debug.csv', skip_blank_lines=False, header=None)
# text = text.fillna(' ').values
#
# doc_start = pd.read_csv(savepath + './doc_start_debug.csv', skip_blank_lines=False, header=None)
# doc_start = doc_start.values.astype(bool)
#
# gt = pd.read_csv(savepath + './gt_debug.csv', skip_blank_lines=False, header=None)
# gt = gt.values.astype(int)
if len(text) == len(annos) - 1:
# sometimes the last line of text is blank and doesn't get loaded into text, but doc_start and gt contain labels
# for the newline token
annos = annos[:-1]
doc_start = doc_start[:-1]
gt = gt[:-1]
print('Creating dev/test split...')
# seed = 10
#
# gt_test, gt_dev, doc_start_dev, text_dev = split_dataset(
# gt, doc_start, text, annos, seed
# )
#
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
#testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
ntestdocs = int(np.floor(ndocs * 0.5))
docidxs = np.cumsum(doc_start & (gt != -1)) # gets us the doc ids
# # testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.argwhere(docidxs == (ntestdocs+1))[0][0]
#
# # devidxs = np.ones(len(gt), dtype=bool)
# # devidxs[testidxs] = False
#
# The first half of the labelled data is used as dev, second half as test
gt_test = np.copy(gt)
gt_test[ntestidxs:] = -1
gt_dev = np.copy(gt)
gt_dev[:ntestidxs] = -1
doc_start_dev = doc_start[gt_dev != -1]
text_dev = text[gt_dev != -1]
gt_task1_dev = gt_dev
gt_dev = gt_dev[gt_dev != -1]
return gt_test, annos, doc_start, text, gt_task1_dev, gt_dev, doc_start_dev, text_dev
def _map_ner_str_to_labels(arr):
arr = arr.astype(str)
arr[arr == 'O'] = 1
arr[arr == 'B-ORG'] = 2
arr[arr == 'I-ORG'] = 0
arr[arr == 'B-PER'] = 4
arr[arr == 'I-PER'] = 3
arr[arr == 'B-LOC'] = 6
arr[arr == 'I-LOC'] = 5
arr[arr == 'B-MISC'] = 8
arr[arr == 'I-MISC'] = 7
arr[arr == '?'] = -1
try:
arr_ints = arr.astype(int)
except:
print("Could not map all annotations to integers. The annotations we found were:")
uannos = []
for anno in arr:
if anno not in uannos:
uannos.append(anno)
print(uannos)
# # Don't correc the training data like this as it can introduce more errors, e.g. some errors in the data are where
# there is a mis-placed O in the middle of a tag. Correcting the subsequent I to a B is wrong...
# I_labels = [0, 3, 5, 7]
# B_labels = [2, 4, 6, 8]
# for i, I in enumerate(I_labels):
# arr_prev = np.zeros(arr_ints.shape)
# arr_prev[1:] = arr_ints[:-1]
# to_correct = (arr_ints == I) & (arr_prev != B_labels[i]) & (arr_prev != I)
#
# if np.sum(to_correct):
# print('Correction at tokens: %s' % np.argwhere(to_correct).flatten())
# arr_ints[to_correct] = B_labels[i]
# # change IOB2 to IOB
# I_labels = [0, 3, 5, 7]
# B_labels = [2, 4, 6, 8]
# for i, I in enumerate(I_labels):
# arr_prev = np.zeros(arr_ints.shape)
# arr_prev[1:] = arr_ints[:-1]
# to_correct = (arr_ints == B_labels[i]) & (arr_prev != I)
#
# if np.sum(to_correct):
# print('Correction at tokens: %s' % np.argwhere(to_correct).flatten())
# arr_ints[to_correct] = I
return arr_ints
def _load_rodrigues_annotations(dir, worker_str, gold_char_idxs=None, gold_tokens=None, skip_imperfect_matches=False):
worker_data = None
for f in os.listdir(dir):
if not f.endswith('.txt'):
continue
doc_str = f.split('.')[0]
f = os.path.join(dir, f)
#print('Processing %s' % f)
new_data = pd.read_csv(f, names=['text', worker_str], skip_blank_lines=False,
dtype={'text':str, worker_str:str}, na_filter=False, delim_whitespace=True)
doc_gaps = (new_data['text'] == '') & (new_data[worker_str] == '')
doc_start = np.zeros(doc_gaps.shape[0], dtype=int)
doc_start[doc_gaps[:-1][doc_gaps[:-1]].index + 1] = 1 # the indexes after the gaps
doc_content = new_data['text'] != ''
new_data['doc_start'] = doc_start
new_data = new_data[doc_content]
new_data['doc_start'].iat[0] = 1
annos_to_keep = np.ones(new_data.shape[0], dtype=bool)
for t, tok in enumerate(new_data['text']):
if len(tok.split('/')) > 1:
tok = tok.split('/')[0]
new_data['text'].iat[t] = tok
if len(tok) == 0:
annos_to_keep[t] = False
# compare the tokens in the worker annotations to the gold labels. They are misaligned in the dataset. We will
# skip labels in the worker annotations that are assigned to only a part of a token in the gold dataset.
char_counter = 0
gold_tok_idx = 0
skip_sentence = False
sentence_start = 0
if gold_char_idxs is not None:
gold_chars = np.array(gold_char_idxs[doc_str])
last_accepted_tok = ''
last_accepted_idx = -1
for t, tok in enumerate(new_data['text']):
if skip_imperfect_matches and skip_sentence:
new_data[worker_str].iloc[t] = -1
if new_data['doc_start'].iat[t]:
skip_sentence = False
if new_data['doc_start'].iat[t]:
sentence_start = t
gold_char_idx = gold_chars[gold_tok_idx]
gold_tok = gold_tokens[doc_str][gold_tok_idx]
#print('tok = %s, gold_tok = %s' % (tok, gold_tok))
if not annos_to_keep[t]:
continue # already marked as skippable
if char_counter < gold_char_idx and \
(last_accepted_tok + tok) in gold_tokens[doc_str][gold_tok_idx-1]:
print('Correcting misaligned annotations (split word in worker data): %i, %s' % (t, tok))
skip_sentence = True
last_accepted_tok += tok
annos_to_keep[last_accepted_idx] = False # skip the previous ones until the end
# where we remove a line, assume that the last annotation in the removed line really belongs to the
# line before...
# new_data[worker_str].iat[t - 1] = new_data[worker_str].iat[t]
# assume that the first annotation was actually correct -- I don't think we want this because the
# first token was sometimes erroneously applied to only a part of the string.
#new_data[worker_str].iat[t] = new_data[worker_str].iat[last_accepted_idx]
new_data['text'].iat[t] = last_accepted_tok
new_data['doc_start'].iat[t] = new_data['doc_start'].iat[last_accepted_idx]
last_accepted_idx = t
char_counter += len(tok)
elif tok not in gold_tok or (tok == '' and gold_tok != ''):
print('Correcting misaligned annotations (spurious text in worker data): %i, %s vs. %s' % (t, tok, gold_tok))
skip_sentence = True
annos_to_keep[t] = False # skip the previous ones until the end
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
elif tok == gold_tok[:len(tok)]: # needs to match the first characters in the string, not just be there somewhere
gold_tok_idx += 1
if tok != gold_tok:
skip_sentence = True
while char_counter > gold_char_idx:
print('error in text alignment between worker and gold!')
len_to_skip = gold_chars[gold_tok_idx - 1] - gold_chars[gold_tok_idx - 2]
# move the gold counter along to the next token because gold is behind
gold_tok_idx += 1
gold_chars[gold_tok_idx:] -= len_to_skip
gold_char_idx = gold_chars[gold_tok_idx]
gold_char_idxs[doc_str] = gold_chars
last_accepted_tok = tok
last_accepted_idx = t
char_counter += len(tok)
else:
skip_sentence = True
annos_to_keep[t] = False
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
# no more text in this document, but the last sentence must be skipped
if skip_imperfect_matches and skip_sentence:
# annos_to_keep[sentence_start:t+1] = False
new_data[worker_str].iloc[sentence_start:t+1] = -1
new_data = new_data[annos_to_keep]
new_data[worker_str] = _map_ner_str_to_labels(new_data[worker_str])
new_data['doc_id'] = doc_str
new_data['tok_idx'] = np.arange(new_data.shape[0])
# add to data from this worker
if worker_data is None:
worker_data = new_data
else:
worker_data = pd.concat([worker_data, new_data])
return worker_data
def _load_rodrigues_annotations_all_workers(annotation_data_path, gold_data, skip_dirty=False):
worker_dirs = os.listdir(annotation_data_path)
data = None
annotator_cols = np.array([], dtype=str)
char_idx_word_starts = {}
chars = {}
char_counter = 0
for t, tok in enumerate(gold_data['text']):
if gold_data['doc_id'].iloc[t] not in char_idx_word_starts:
char_counter = 0
starts = []
toks = []
char_idx_word_starts[gold_data['doc_id'].iloc[t]] = starts
chars[gold_data['doc_id'].iloc[t]] = toks
starts.append(char_counter)
toks.append(tok)
char_counter += len(tok)
for widx, dir in enumerate(worker_dirs):
if dir.startswith("."):
continue
worker_str = dir
annotator_cols = np.append(annotator_cols, worker_str)
dir = os.path.join(annotation_data_path, dir)
print('Processing dir for worker %s (%i of %i)' % (worker_str, widx, len(worker_dirs)))
worker_data = _load_rodrigues_annotations(dir, worker_str,
char_idx_word_starts, chars, skip_dirty)
print("Loaded a dataset of size %s" % str(worker_data.shape))
# now need to join this to other workers' data
if data is None:
data = worker_data
else:
data = data.merge(worker_data, on=['doc_id', 'tok_idx', 'text', 'doc_start'], how='outer', sort=True, validate='1:1')
return data, annotator_cols
def IOB_to_IOB2(seq):
# test with and without this to see if we can reproduce the MV values from Nguyen et al with NER data.
# It seems to make little difference.
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in I_labels:
typeidx = np.argwhere(I_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] and seq[i-1] != label):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = B_labels[typeidx]
return seq
def IOB2_to_IOB(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in B_labels:
typeidx = np.argwhere(B_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] or seq[i-1] != I_labels[typeidx]):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = I_labels[typeidx]
return seq
def load_ner_data(regen_data_files, skip_sen_with_dirty_data=False):
# In Nguyen et al 2017, the original data has been separated out for task 1, aggregation of crowd labels. In this
# task, the original training data is further split into val and test -- to make our results comparable with Nguyen
# et al, we need to test on the test split for task 1, but train our model on both.
# To make them comparable with Rodrigues et al. 2014, we need to test on all data (check this in their paper).
# Task 2 is for prediction on a test set given a model trained on the training set and optimised on the validation
# set. It would be ideal to show both these results...
savepath = '../../data/bayesian_sequence_combination/data/ner/' # location to save our csv files to
if not os.path.isdir(savepath):
os.mkdir(savepath)
# within each of these folders below is an mturk_train_data folder, containing crowd labels, and a ground_truth
# folder. Rodrigues et al. have assigned document IDs that allow us to match up the annotations from each worker.
# Nguyen et al. have split the training set into the val/test folders for task 1. Data is otherwise the same as in
# the Rodrigues folder under mturk/extracted_data.
task1_val_path = '../../data/bayesian_sequence_combination/data/crf-ma-NER-task1/val/'
task1_test_path = '../../data/bayesian_sequence_combination/data/crf-ma-NER-task1/test/'
# These are just two files that we use for text features + ground truth labels.
task2_val_path = '../../data/bayesian_sequence_combination/data/English NER/eng.testa'
task2_test_path = '../../data/bayesian_sequence_combination/data/English NER/eng.testb'
if regen_data_files or not os.path.isfile(savepath + '/task1_val_annos.csv'):
# Steps to load data (all steps need to map annotations to consecutive integer labels).
# 1. Create an annos.csv file containing all the annotations in task1_val_path and task1_test_path.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(task1_val_path + 'ground_truth/', 'gold')
# load the validation data
data, annotator_cols = _load_rodrigues_annotations_all_workers(task1_val_path + 'mturk_train_data/',
gold_data, skip_sen_with_dirty_data)
# 2. Create ground truth CSV for task1_val_path (for tuning the LSTM)
# merge gold with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'text'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annotations per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annotations
if len(np.unique(counts)) > 1:
print('Validation data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annotations.' % doc)
# remove any lines with no annotations
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_val_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the text in same order
data.to_csv(savepath + '/task1_val_text.csv', columns=['text'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_val_gt.csv', columns=['gold'], header=False, index=False)
# 3. Load worker annotations for test set.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(task1_test_path + 'ground_truth/', 'gold')
# load the test data
data, annotator_cols = _load_rodrigues_annotations_all_workers(task1_test_path + 'mturk_train_data/',
gold_data, skip_sen_with_dirty_data)
# 4. Create ground truth CSV for task1_test_path
# merge with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'text'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annotations per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annotations
if len(np.unique(counts)) > 1:
print('Test data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annotations.' % doc)
# remove any lines with no annotations
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_test_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the text in same order
data.to_csv(savepath + '/task1_test_text.csv', columns=['text'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_test_gt.csv', columns=['gold'], header=False, index=False)
# 5. Create a file containing only the words for the task 2 validation set, i.e. like annos.csv with no annotations.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_val_path but blank out the task_1 labels
# (for tuning the LSTM for task 2)
import csv
eng_val = pd.read_csv(task2_val_path, delimiter=' ', usecols=[0,3], names=['text', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_val.shape[0])
docstart_token = eng_val['text'][0]
doc_starts[1:] = (eng_val['text'] == docstart_token)[:-1]
eng_val['doc_start'] = doc_starts
eng_val['tok_idx'] = eng_val.index
eng_val = eng_val[eng_val['text'] != docstart_token] # remove all the docstart labels
eng_val['gold'] = _map_ner_str_to_labels(eng_val['gold'])
eng_val['gold'] = IOB_to_IOB2(eng_val['gold'].values)
eng_val.to_csv(savepath + '/task2_val_gt.csv', columns=['gold'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_text.csv', columns=['text'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 6. Create a file containing only the words for the task 2 test set, i.e. like annos.csv with no annotations.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_test_path but blank out the task_1 labels/
eng_test = pd.read_csv(task2_test_path, delimiter=' ', usecols=[0,3], names=['text', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_test.shape[0])
docstart_token = eng_test['text'][0]
doc_starts[1:] = (eng_test['text'] == docstart_token)[:-1]
eng_test['doc_start'] = doc_starts
eng_test['tok_idx'] = eng_test.index
eng_test = eng_test[eng_test['text'] != docstart_token] # remove all the docstart labels
eng_test['gold'] = _map_ner_str_to_labels(eng_test['gold'])
eng_test['gold'] = IOB_to_IOB2(eng_test['gold'].values)
eng_test.to_csv(savepath + '/task2_test_gt.csv', columns=['gold'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_text.csv', columns=['text'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 7. Reload the data for the current run...
print('loading annos for task1 test...')
annos = pd.read_csv(savepath + '/task1_test_annos.csv', skip_blank_lines=False)
print('loading text data for task1 test...')
text = pd.read_csv(savepath + '/task1_test_text.csv', skip_blank_lines=False, header=None)
print('loading doc_starts for task1 test...')
doc_start = | pd.read_csv(savepath + '/task1_test_doc_start.csv', skip_blank_lines=False, header=None) | pandas.read_csv |
"""
Produces a tsv file to study all the nii files and perform the quality check.
"""
import os
from os import path
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
from clinica.utils.inputs import RemoteFileStructure, fetch_file
def extract_metrics(caps_dir, output_dir, group_label):
if not path.exists(output_dir):
os.makedirs(output_dir)
# Load eyes segmentation
home = str(Path.home())
cache_clinicadl = path.join(home, ".cache", "clinicadl", "segmentation")
url_aramis = "https://aramislab.paris.inria.fr/files/data/template/"
FILE1 = RemoteFileStructure(
filename="eyes_segmentation.nii.gz",
url=url_aramis,
checksum="56f699c06cafc62ad8bb5b41b188c7c412d684d810a11d6f4cbb441c0ce944ee",
)
if not (path.exists(cache_clinicadl)):
os.makedirs(cache_clinicadl)
segmentation_file = path.join(cache_clinicadl, FILE1.filename)
if not (path.exists(segmentation_file)):
try:
segmentation_file = fetch_file(FILE1, cache_clinicadl)
except IOError as err:
raise IOError("Unable to download required eyes segmentation for QC:", err)
segmentation_nii = nib.load(segmentation_file)
segmentation_np = segmentation_nii.get_fdata()
# Get the GM template
template_path = path.join(
caps_dir,
"groups",
f"group-{group_label}",
"t1",
f"group-{group_label}_template.nii.gz",
)
template_nii = nib.load(template_path)
template_np = template_nii.get_fdata()
template_np = np.sum(template_np, axis=3)
template_segmentation_np = template_np * segmentation_np
# Get the data
filename = path.join(output_dir, "QC_metrics.tsv")
columns = [
"participant_id",
"session_id",
"max_intensity",
"non_zero_percentage",
"frontal_similarity",
]
results_df = pd.DataFrame()
subjects = os.listdir(path.join(caps_dir, "subjects"))
subjects = [subject for subject in subjects if subject[:4:] == "sub-"]
for subject in subjects:
subject_path = path.join(caps_dir, "subjects", subject)
sessions = os.listdir(subject_path)
sessions = [session for session in sessions if session[:4:] == "ses-"]
for session in sessions:
image_path = path.join(
subject_path,
session,
"t1",
"spm",
"segmentation",
"normalized_space",
subject
+ "_"
+ session
+ "_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii.gz",
)
if path.exists(image_path):
# GM analysis
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image_segmentation_np = image_np * segmentation_np
eyes_nmi_value = nmi(
occlusion1=template_segmentation_np,
occlusion2=image_segmentation_np,
)
non_zero_percentage = np.count_nonzero(image_np) / image_np.size
row = [
[
subject,
session,
np.max(image_np),
non_zero_percentage,
eyes_nmi_value,
]
]
row_df = pd.DataFrame(row, columns=columns)
results_df = | pd.concat([results_df, row_df]) | pandas.concat |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = | pd.Series([10, 1000, -100, 500, 500], index=default_index) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
__all__ = (
"meter_data_from_csv",
"meter_data_from_json",
"meter_data_to_csv",
"temperature_data_from_csv",
"temperature_data_from_json",
"temperature_data_to_csv",
)
def meter_data_from_csv(
filepath_or_buffer,
tz=None,
start_col="start",
value_col="value",
gzipped=False,
freq=None,
**kwargs
):
""" Load meter data from a CSV file.
Default format::
start,value
2017-01-01T00:00:00+00:00,0.31
2017-01-02T00:00:00+00:00,0.4
2017-01-03T00:00:00+00:00,0.58
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
start_col : :any:`str`, optional, default ``'start'``
Date period start column.
value_col : :any:`str`, optional, default ``'value'``
Value column, can be in any unit.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.DataFrame.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [start_col, value_col],
"dtype": {value_col: np.float64},
"parse_dates": [start_col],
"index_col": start_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs)
df.index = pd.to_datetime(df.index, utc=True)
# for pandas<0.24, which doesn't localize even with utc=True
if df.index.tz is None:
df.index = df.index.tz_localize("UTC") # pragma: no cover
if tz is not None:
df = df.tz_convert(tz)
if freq == "hourly":
df = df.resample("H").sum(min_count=1)
elif freq == "daily":
df = df.resample("D").sum(min_count=1)
return df
def temperature_data_from_csv(
filepath_or_buffer,
tz=None,
date_col="dt",
temp_col="tempF",
gzipped=False,
freq=None,
**kwargs
):
""" Load temperature data from a CSV file.
Default format::
dt,tempF
2017-01-01T00:00:00+00:00,21
2017-01-01T01:00:00+00:00,22.5
2017-01-01T02:00:00+00:00,23.5
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
date_col : :any:`str`, optional, default ``'dt'``
Date period start column.
temp_col : :any:`str`, optional, default ``'tempF'``
Temperature column.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.Series.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [date_col, temp_col],
"dtype": {temp_col: np.float64},
"parse_dates": [date_col],
"index_col": date_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
df = | pd.read_csv(filepath_or_buffer, **read_csv_kwargs) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List, Union, Tuple
from macrosynergy.management.simulate_quantamental_data import make_qdf
from macrosynergy.management.shape_dfs import reduce_df
class NaivePnL:
"""Computes and collects illustrative PnLs with limited signal options and
disregarding transaction costs
:param <pd.Dataframe> df: standardized data frame with the following necessary
columns: 'cid', 'xcat', 'real_date' and 'value'.
:param <str> ret: return category.
:param <List[str]> sigs: signal categories.
:param <List[str]> cids: cross sections to be considered. Default is all in the
dataframe.
:param <str> start: earliest date in ISO format. Default is None and earliest date
in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date in df
is used.
:param <dict> blacklist: cross sections with date ranges that should be excluded
from the dataframe.
"""
def __init__(self, df: pd.DataFrame, ret: str, sigs: List[str],
cids: List[str] = None,
start: str = None, end: str = None,
blacklist: dict = None):
self.ret = ret
self.sigs = sigs
xcats = [ret] + sigs
cols = ['cid', 'xcat', 'real_date', 'value']
self.df, self.xcats, self.cids = reduce_df(df[cols], xcats, cids, start, end,
blacklist, out_all=True)
self.df['real_date'] = pd.to_datetime(self.df['real_date'])
self.pnl_names = [] # list for PnL names
self.black = blacklist
def make_pnl(self, sig: str, sig_op: str = 'zn_score_pan', pnl_name: str = None,
rebal_freq: str = 'daily', rebal_slip = 0, vol_scale: float = None,
min_obs: int = 252, iis: bool = True,
neutral: str = 'zero', thresh: float = None):
# Todo: implement the four 'pass through arguments to make_zn_score()
"""Calculate daily PnL and add to the main dataframe of the class instance
:param <str> sig: name of signal that is the basis for positioning. The signal
is assumed to be recorded at the end of the day prior to position taking.
:param <str> sig_op: signal transformation options; must be one of
'zn_score_pan', 'zn_score_cs', or 'binary'.
Default 'zn_score_pan' transforms raw signals into z-scores around zero value
based on the whole panel.
Option 'zn_score_cs' transforms signals to z-scores around zero based on
cross-section alone.
Option 'binary' transforms signals into uniform long/shorts (1/-1) across all
sections.
N.B.: zn-score here means standardized score with zero being the natural
neutral level and standardization through division by mean absolute value.
:param <str> pnl_name: name of the PnL to be generated and stored.
Default is none, i.e. a default name is given.
Previously calculated PnLs in the class will be overwritten. This means that
if a set of PnLs is to be compared they require custom names.
:param <str> rebal_freq: rebalancing frequency for positions according to signal
must be one of 'daily' (default), 'weekly' or 'monthly'.
:param <str> rebal_slip: rebalancing slippage in days. Default is 1, which means
that it takes one day to rebalance the position and that the new positions
produces PnL from the second day after the signal has been recorded.
:param <bool> vol_scale: ex-post scaling of PnL to annualized volatility given.
This for comparative visualization and not out-of-sample. Default is none.
:param <int> min_obs: the minimum number of observations required to calculate
zn_scores. Default is 252.
# Todo: implement in function
:param <bool> iis: if True (default) zn-scores are also calculated for the initial
sample period defined by min-obs, on an in-sample basis, to avoid losing history.
# Todo: implement in function
:param <str> neutral: method to determine neutral level. Default is 'zero'.
Alternatives are 'mean' and "median".
# Todo: implement in function
:param <float> thresh: threshold value beyond which scores are winsorized,
i.e. contained at that threshold. Therefore, the threshold is the maximum absolute
score value that the function is allowed to produce. The minimum threshold is 1
standard deviation.
# Todo: implement in function
"""
assert sig in self.sigs
assert sig_op in ['zn_score_pan', 'zn_score_cs', 'binary']
assert rebal_freq in ['daily', 'weekly', 'monthly']
dfx = self.df[self.df['xcat'].isin([self.ret, sig])]
dfw = dfx.pivot(index=['cid', 'real_date'], columns='xcat', values='value')
if sig_op == 'zn_score_pan':
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
sda = dfw[sig].abs().mean()
dfw['psig'] = dfw[sig] / sda
elif sig_op == 'zn_score_cs': # zn-score based on
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
zn_score = lambda x: x / np.nanmean(np.abs(x))
dfw['psig'] = dfw[sig].groupby(level=0).apply(zn_score)
elif sig_op == 'binary':
dfw['psig'] = np.sign(dfw[sig])
# Signal for the following day explains the lag mechanism.
dfw['psig'] = dfw['psig'].groupby(level=0).shift(1) # lag explanatory 1 period
dfw.reset_index(inplace=True)
if rebal_freq != 'daily':
dfw['year'] = dfw['real_date'].dt.year
if rebal_freq == 'monthly':
dfw['month'] = dfw['real_date'].dt.month
rebal_dates = dfw.groupby(['cid', 'year', 'month'])['real_date'].\
min() # rebalancing days are first of month
if rebal_freq == 'weekly':
dfw['week'] = dfw['real_date'].dt.week
rebal_dates = dfw.groupby(['cid', 'year', 'week'])['real_date'].\
min() # rebalancing days are first of week
dfw['sig'] = np.nan
dfw.loc[dfw['real_date'].isin(rebal_dates), 'sig'] = \
dfw.loc[dfw['real_date'].isin(rebal_dates), 'psig']
dfw['sig'] = dfw['sig'].fillna(method='ffill').shift(rebal_slip)
dfw['value'] = dfw[self.ret] * dfw['sig']
df_pnl = dfw.loc[:, ['cid', 'real_date', 'value']] # cross-section PnLs
df_pnl_all = df_pnl.groupby(['real_date']).sum() # global PnL as sum
df_pnl_all = df_pnl_all[df_pnl_all['value'].cumsum() != 0] # trim early zeros
df_pnl_all['cid'] = 'ALL'
df_pnl_all = df_pnl_all.reset_index()[df_pnl.columns] # columns as in df_pnl...
df_pnl = df_pnl.append(df_pnl_all) #... and append
if vol_scale is not None:
leverage = vol_scale * (df_pnl_all['value'].std() * np.sqrt(261))**(-1)
df_pnl['value'] = df_pnl['value'] * leverage
pnn = ('PNL_' + sig) if pnl_name is None else pnl_name # set PnL name
df_pnl['xcat'] = pnn
if pnn in self.pnl_names:
self.df = self.df[~(self.df['xcat'] == pnn)] # remove any PnL with same name
else:
self.pnl_names = self.pnl_names + [pnn]
self.df = self.df.append(df_pnl[self.df.columns]).reset_index(drop=True)
def plot_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None, figsize: Tuple = (10, 6)):
"""Plot line chart of cumulative PnLs, single PnL, multiple PnL types per
cross section, or mutiple cross sections per PnL type.
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted;
default is 'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in ISO format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:param <Tuple> figsize: tuple of plot width and height. Default is (10,6).
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
sns.set_theme(style='whitegrid', palette='colorblind',
rc={'figure.figsize': figsize})
if len(pnl_cids) == 1:
dfx['cum_value'] = dfx.groupby('xcat').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='xcat',
estimator=None, lw=1)
leg = ax.axes.get_legend()
if len(pnl_cats) > 1:
leg.set_title('PnL categories for ' + pnl_cids[0])
else:
leg.set_title('PnL category for ' + pnl_cids[0])
else:
dfx['cum_value'] = dfx.groupby('cid').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='cid',
estimator=None, lw=1)
leg = ax.axes.get_legend()
leg.set_title('Cross sections')
plt.title('Cumulative naive PnL', fontsize=16)
plt.xlabel('')
plt.ylabel('% of risk capital, no compounding')
plt.axhline(y=0, color='black', linestyle='--', lw=1)
plt.show()
def evaluate_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None):
"""Small table of key PnL statistics
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted; default is
'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:return: standardized dataframe with key PnL performance statistics
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
groups = 'xcat' if len(pnl_cids) == 1 else 'cid'
stats = ['Return (pct ar)', 'St. Dev. (pct ar)', 'Sharpe ratio', 'Sortino ratio',
'Max 21-day draw', 'Max 6-month draw', 'Traded months']
dfw = dfx.pivot(index='real_date', columns=groups, values='value')
df = | pd.DataFrame(columns=dfw.columns, index=stats) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns= | lrange(5) | pandas.compat.lrange |
import datetime as dtm
import itertools
import pandas as pd
import numpy as np
from sklearn.metrics import r2_score
from sklearn.base import clone
import sugartime.core as core
class Patient:
"""
Object containing data for an
individual patient.
"""
def __init__(self):
self.carbs_per_insulin = 8 # this modulates the grid search in self.find_optimal_bolus
self.target_range = (80, 140)
def load_example_data(self):
"""
Loads the raw example patient dataset.
"""
data = core.load_and_clean_example_data()
self.data = core.feature_engineering(data)
def load_synthetic_data(self):
"""
Loads a synthetic dataset created with the simglucose package
(https://github.com/jxx123/simglucose).
"""
self.data = core.load_and_clean_synthetic_data()
def load_device_data(self, clarity_filename, tandem_filename):
"""
Loads and cleans a novel data set from a clarity continuous
glucose monitor and a tandem glucose pump.
"""
self.data = core.load_and_clean_data(
clarity_filename,
tandem_filename,
)
def load_data(self, X, y):
"""
Loads a novel data set.
Should be an N x 3 numpy array, where N is the # of
observations.
Output:
Pandas dataframe with a datetime index and the following columns:
* estimated_glu
* carb_grams
* all_insulin
"""
# create datetime index for dataframe
t = dtm.datetime(2020, 11, 3, 12, 5)
ind = pd.date_range(
start=t,
end=t + dtm.timedelta(minutes=5 * (len(y)-1)),
freq='5T')
# make dataframe
df = pd.DataFrame(
np.concatenate((y, X), axis=1),
columns=['estimated_glu','carb_grams','all_insulin'],
index=ind
)
self.data = df
def split_data(self, target_name, feature_names, split=[0.75]):
"""
Split the data into training and testing (and maybe validation) sets.
Saves the splits to the patient object.
Inputs:
* target_name (str): name of target variable
* feature_names (str): name of feature variables
* split (list): one- or two-element list. First element marks the end
of the training set. Second element marks the of the validation set.
Testing set is whatever is left.
"""
if len(split) > 1:
Xtrain, ytrain, Xval, yval, Xtest, ytest = core.split_data(
self.data,
target_name=target_name,
feature_names=feature_names,
split=split,
)
self.Xval = Xval
self.yval = yval
else:
Xtrain, ytrain, Xtest, ytest = core.split_data(
self.data,
target_name=target_name,
feature_names=feature_names,
split=split,
)
self.Xtrain = Xtrain
self.ytrain = ytrain
self.Xtest = Xtest
self.ytest = ytest
class MultiOutModel:
"""
A multioutput time series model that consists of multiple individual
scikit-learn regression estimators. Each estimator is trained on a
different time shift (t+N) of the target variable. Each estimator
can be different (e.g., random forest for t+1, support vector machine
for t+2, etc.), and can have unique hyperparameters for both the base
estimator and the time series model design.
Takes the following params:
* patient (obj): a unique patient dataset
* horizon (int): how far into the future the model will attempt to
forecast.
"""
def __init__(self, patient, horizon):
self.horizon = horizon
self.steps = list(range(horizon))
self.patient = patient
def add_step(self, step, multi_out_step):
"""
Assigns a new regression estimator to a designated time step in the
full multioutput model.
Takes the following:
* step (int): the step in the multioutput model that is being defined
* estimator (obj): a MultiOutStep object containing the estimator for
the current output time step.
"""
self.steps[step] = multi_out_step
def fit(
self,
X,
y,
estimator,
auto_order,
exog_order,
exog_delay
):
"""
Fits a multioutput model.
Takes the following:
* X (dataframe): design matrix for training data
* y (dataframe): target variable for training data
* estimators (dict): dict containing all of the estimators over which
to perform grid search. Key is the estimator name (str), while
value is the estimator itself.
* auto_order (int): autoregressive order of endogenous variable
* exog_order (list): order of exogenous variables
* exog_delay (list): delay of exogenous variables
Stores the fitted model(s) in the MultiOutModel object.
"""
# impose lags from design params
horizon = self.horizon
features, target = core.add_lags(
X, y, auto_order, exog_order, exog_delay)
features, target = core.horizon_transform(
features, target, horizon=horizon)
# loop through all the time steps
for i in range(horizon):
for est in estimator.keys():
cmdl = MultiOutStep(
clone(estimator[est]),
est,
auto_order,
exog_order,
exog_delay,
horizon,
i)
cmdl.estimator = cmdl.fit(features, target)
self.add_step(i, cmdl)
def grid_search(self, X, y, Xval, yval, estimators, design_params):
"""
Performs a brute force grid search in order to find the optimal
time and amount of insulin that will maintain a forecasted
time series as close as possible to a blood glucose level of
110 mg/dL, given information about future carbohydrate consumption.
Takes the following:
* X (dataframe): design matrix for training data
* y (dataframe): target variable for training data
* Xval (dataframe): design matrix for validation data
* yval (dataframe): target variable for validation data
* estimators (dict): dict containing all of the estimators over which
to perform grid search. Keys are the estimator names (str), while
values are the estimators themselves.
* design_params (list): list of tuples of all of the
permutations of desired design params
Stores the best fitting model(s) in the MultiOutModel object.
"""
# model = MultiOutModel(horizon)
horizon = self.horizon
# loop through all the time steps
for i in range(horizon):
best_r2 = False
# loop through all the design params
for idp, (ao, eo, ed) in enumerate(design_params):
# create design matrix and target matrix
features, target = core.add_lags(X, y, ao, eo, ed)
features, target = core.horizon_transform(
features, target, horizon=horizon
)
# loop through all the models and perform hyperparameter search
for est in estimators.keys():
cmdl = MultiOutStep(
clone(estimators[est]), est, ao, eo, ed, horizon, i
)
cmdl.estimator = cmdl.fit(features, target)
r2 = cmdl.model_performance(Xval, yval)
cmdl.r2 = r2[0]
# keep the model with the highest r_squared
if not best_r2:
self.add_step(i, cmdl)
best_r2 = r2
elif r2 > best_r2:
self.add_step(i, cmdl)
best_r2 = r2
# print out the best model for each step
print(
"Best model for step {} is {}({},{},{}): r2 = {}".format(
i,
self.steps[i].name,
self.steps[i].auto_order,
self.steps[i].exog_order,
self.steps[i].exog_delay,
self.steps[i].r2,
)
)
def multioutput_forecast(self, X, y):
"""
Performs a multioutput forecast.
Assumes the forecast should start at the end of the supplied
X and y data.
Takes the following:
* X (dataframe): design matrix
* y (dataframe): target variable
Returns:
* numpy array with the forecasted data.
"""
ypred = []
for step in self.steps:
features, target = core.add_lags(
X, y, step.auto_order, step.exog_order, step.exog_delay
)
ypred.append(step.predict(features[-1, :].reshape(-1, 1).T)[0])
return ypred
def dynamic_forecast(self, X, y, start_time, inserts):
"""
Performs dynamic forecasting using future
exogenous variable information (provided by the inserts parameter).
Uses only the t+1 model from the overall multioutput model.
Takes the following:
* X (dataframe): design matrix
* y (dataframe): target variable
* start_time (datetime): start time of the forecast
* inserts (dict): contains dict of dicts. First level key is the
name of the exogenous variable, with another dict
as the value. This second level dict has the
insertion datetime as the key, and the amount
of the insertion (i.e., grams of carbs, or amount
of insulin bolus)
Returns:
* dataframe with the forecasted values
"""
# set some datetime variables
insert_times = [j for k1 in inserts.keys() for j in inserts[k1].keys()]
max_time = max(insert_times) + dtm.timedelta(minutes=5 * self.horizon)
cur_time = start_time - dtm.timedelta(minutes=5)
keep_dt = | pd.date_range(start=start_time, end=max_time, freq="5T") | pandas.date_range |
import argparse
import torch
import numpy as np
import pandas as pd
import pickle as pkl
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split, KFold
from dataset_graph import construct_dataset, mol_collate_func
from transformer_graph import make_model
from utils import ScheduledOptim, get_options, get_loss, cal_loss, evaluate, scaffold_split
from collections import defaultdict
def model_train(model, train_dataset, valid_dataset, model_params, train_params, dataset_name, fold):
# build data loader
train_loader = DataLoader(dataset=train_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=True, drop_last=True, num_workers=4, pin_memory=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=True, drop_last=True, num_workers=4, pin_memory=True)
# build loss function
criterion = get_loss(train_params['loss_function'])
# build optimizer
optimizer = ScheduledOptim(torch.optim.Adam(model.parameters(), lr=0),
train_params['warmup_factor'], model_params['d_model'],
train_params['total_warmup_steps'])
best_valid_metric = float('inf') if train_params['task'] == 'regression' else float('-inf')
best_epoch = -1
best_valid_result, best_valid_bedding = None, None
for epoch in range(train_params['total_epochs']):
# train
train_loss = list()
model.train()
for batch in tqdm(train_loader):
smile_list, adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch, max_length, max_length, d_edge)
y_true = y_true.to(train_params['device']) # (batch, task_numbers)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch, max_length)
# (batch, task_numbers)
y_pred, _ = model(node_features, batch_mask, adjacency_matrix, edge_features)
loss = cal_loss(y_true, y_pred, train_params['loss_function'], criterion,
train_params['mean'], train_params['std'], train_params['device'])
optimizer.zero_grad()
loss.backward()
optimizer.step_and_update_lr()
train_loss.append(loss.detach().item())
# valid
model.eval()
with torch.no_grad():
valid_true, valid_pred, valid_smile, valid_embedding = list(), list(), list(), list()
for batch in tqdm(valid_loader):
smile_list, adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch, max_length, max_length, d_edge)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch, max_length)
# (batch, task_numbers)
y_pred, y_embedding = model(node_features, batch_mask, adjacency_matrix, edge_features)
y_true = y_true.numpy() # (batch, task_numbers)
y_pred = y_pred.detach().cpu().numpy() # (batch, task_numbers)
y_embedding = y_embedding.detach().cpu().numpy()
valid_true.append(y_true)
valid_pred.append(y_pred)
valid_smile.append(smile_list)
valid_embedding.append(y_embedding)
valid_true, valid_pred = np.concatenate(valid_true, axis=0), np.concatenate(valid_pred, axis=0)
valid_smile, valid_embedding = np.concatenate(valid_smile, axis=0), np.concatenate(valid_embedding, axis=0)
valid_result = evaluate(valid_true, valid_pred, valid_smile,
requirement=['sample', train_params['loss_function'], train_params['metric']],
data_mean=train_params['mean'], data_std=train_params['std'], data_task=train_params['task'])
# save and print message in graph regression
if train_params['task'] == 'regression':
if valid_result[train_params['metric']] < best_valid_metric:
best_valid_metric = valid_result[train_params['metric']]
best_epoch = epoch + 1
best_valid_result = valid_result
best_valid_bedding = valid_embedding
torch.save({'state_dict': model.state_dict(),
'best_epoch': best_epoch,
f'best_valid_{train_params["metric"]}': best_valid_metric},
f'./Model/{dataset_name}/best_model_{dataset_name}_fold_{fold}.pt')
print("Epoch {}, learning rate {:.6f}, "
"train {}: {:.4f}, "
"valid {}: {:.4f}, "
"best epoch {}, best valid {}: {:.4f}"
.format(epoch + 1, optimizer.view_lr(),
train_params['loss_function'], np.mean(train_loss),
train_params['loss_function'], valid_result[train_params['loss_function']],
best_epoch, train_params['metric'], best_valid_metric
))
# save and print message in graph classification
else:
if valid_result[train_params['metric']] > best_valid_metric:
best_valid_metric = valid_result[train_params['metric']]
best_epoch = epoch + 1
best_valid_result = valid_result
best_valid_bedding = valid_embedding
torch.save({'state_dict': model.state_dict(),
'best_epoch': best_epoch,
f'best_valid_{train_params["metric"]}': best_valid_metric},
f'./Model/{dataset_name}/best_model_{dataset_name}_fold_{fold}.pt')
print("Epoch {}, learning rate {:.6f}, "
"train {}: {:.4f}, "
"valid {}: {:.4f}, "
"valid {}: {:.4f}, "
"best epoch {}, best valid {}: {:.4f}"
.format(epoch + 1, optimizer.view_lr(),
train_params['loss_function'], np.mean(train_loss),
train_params['loss_function'], valid_result[train_params['loss_function']],
train_params['metric'], valid_result[train_params['metric']],
best_epoch, train_params['metric'], best_valid_metric
))
# early stop
if abs(best_epoch - epoch) >= 20:
print("=" * 20 + ' early stop ' + "=" * 20)
break
return best_valid_result, best_valid_bedding
def model_test(checkpoint, test_dataset, model_params, train_params):
# build loader
test_loader = DataLoader(dataset=test_dataset, batch_size=train_params['batch_size'], collate_fn=mol_collate_func,
shuffle=False, drop_last=True, num_workers=4, pin_memory=True)
# build model
model = make_model(**model_params)
model.to(train_params['device'])
model.load_state_dict(checkpoint['state_dict'])
# test
model.eval()
with torch.no_grad():
test_true, test_pred, test_smile, test_embedding = list(), list(), list(), list()
for batch in tqdm(test_loader):
smile_list, adjacency_matrix, node_features, edge_features, y_true = batch
adjacency_matrix = adjacency_matrix.to(train_params['device']) # (batch, max_length, max_length)
node_features = node_features.to(train_params['device']) # (batch, max_length, d_node)
edge_features = edge_features.to(train_params['device']) # (batch, max_length, max_length, d_edge)
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0 # (batch, max_length)
# (batch, task_numbers)
y_pred, y_embedding = model(node_features, batch_mask, adjacency_matrix, edge_features)
y_true = y_true.numpy() # (batch, task_numbers)
y_pred = y_pred.detach().cpu().numpy() # (batch, task_numbers)
y_embedding = y_embedding.detach().cpu().numpy()
test_true.append(y_true)
test_pred.append(y_pred)
test_smile.append(smile_list)
test_embedding.append(y_embedding)
test_true, test_pred = np.concatenate(test_true, axis=0), np.concatenate(test_pred, axis=0)
test_smile, test_embedding = np.concatenate(test_smile, axis=0), np.concatenate(test_embedding, axis=0)
test_result = evaluate(test_true, test_pred, test_smile,
requirement=['sample', train_params['loss_function'], train_params['metric']],
data_mean=train_params['mean'], data_std=train_params['std'], data_task=train_params['task'])
print("test {}: {:.4f}".format(train_params['metric'], test_result[train_params['metric']]))
return test_result, test_embedding
if __name__ == '__main__':
# init args
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, help="random seeds", default=np.random.randint(10000))
parser.add_argument("--gpu", type=str, help='gpu', default=-1)
parser.add_argument("--fold", type=int, help='the number of k-fold', default=5)
parser.add_argument("--dataset", type=str, help='choose a dataset', default='esol')
parser.add_argument("--split", type=str, help="choose the split type", default='random',
choices=['random', 'scaffold', 'cv'])
args = parser.parse_args()
# load options
model_params, train_params = get_options(args.dataset)
# init device and seed
print(f"Seed: {args.seed}")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
train_params['device'] = torch.device(f'cuda:{args.gpu}')
torch.cuda.manual_seed(args.seed)
else:
train_params['device'] = torch.device('cpu')
# load data
if train_params['task'] == 'regression':
with open(f'./Data/{args.dataset}/preprocess/{args.dataset}.pickle', 'rb') as f:
[data_mol, data_label, data_mean, data_std] = pkl.load(f)
else:
with open(f'./Data/{args.dataset}/preprocess/{args.dataset}.pickle', 'rb') as f:
[data_mol, data_label] = pkl.load(f)
# calculate the padding
model_params['max_length'] = max([data.GetNumAtoms() for data in data_mol])
print(f"Max padding length is: {model_params['max_length']}")
# construct dataset
print('=' * 20 + ' construct dataset ' + '=' * 20)
dataset = construct_dataset(data_mol, data_label, model_params['d_atom'], model_params['d_edge'], model_params['max_length'])
total_metrics = defaultdict(list)
# split dataset
if args.split == 'scaffold':
# we run the scaffold split 5 times for different random seed, which means different train/valid/test
for idx in range(args.fold):
print('=' * 20 + f' train on fold {idx + 1} ' + '=' * 20)
# get dataset
train_index, valid_index, test_index = scaffold_split(data_mol, frac=[0.8, 0.1, 0.1], balanced=True,
include_chirality=False, ramdom_state=args.seed + idx)
train_dataset, valid_dataset, test_dataset = dataset[train_index], dataset[valid_index], dataset[test_index]
# calculate total warmup steps
train_params['total_warmup_steps'] = \
int(len(train_dataset) / train_params['batch_size']) * train_params['total_warmup_epochs']
print('train warmup step is: {}'.format(train_params['total_warmup_steps']))
if train_params['task'] == 'regression':
train_params['mean'] = np.mean(np.array(data_label)[train_index])
train_params['std'] = np.std(np.array(data_label)[train_index])
else:
train_params['mean'], train_params['std'] = 0, 1
# define a model
model = make_model(**model_params)
model = model.to(train_params['device'])
# train and valid
print(f"train size: {len(train_dataset)}, valid size: {len(valid_dataset)}, test size: {len(test_dataset)}")
best_valid_result, _ = model_train(model, train_dataset, valid_dataset, model_params, train_params, args.dataset, idx + 1)
best_valid_csv = | pd.DataFrame.from_dict({'smile': best_valid_result['smile'], 'actual': best_valid_result['label'], 'predict': best_valid_result['prediction']}) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
import datetime
import json
import logging
import os
import traceback
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List
import yaml
from pandas import DataFrame, Int32Dtype, concat, isna, read_csv
# ROOT directory
ROOT = Path(os.path.dirname(__file__))
# Used to fill unknown districts
UNKNOWN_DISTRICT_ID = "9999999"
def table_rename(data: DataFrame, column_adapter: Dict[str, str]) -> DataFrame:
"""Rename all columns of a dataframe and drop the columns not in the adapter."""
data = data.rename(columns=column_adapter)
data = data.drop(columns=[col for col in data.columns if col not in column_adapter.values()])
return data
def load_config(config_path: str) -> Dict[str, Any]:
"""Load a YAML configuration file given its path."""
with open(config_path, "r") as fh:
config_yaml = yaml.safe_load(fh)
return config_yaml
def nullable_method_call(func: Callable, *args, print_exc: bool = True, **kwargs) -> Any:
"""Return the output of calling the provided `func`, default to `None` in case of failure."""
try:
return func(*args, **kwargs)
except:
if print_exc:
traceback.print_exc()
return None
def convert_dtype(schema: Dict[str, str], data: DataFrame) -> DataFrame:
"""Convert all columns in `data` to the appropriate dtype according to `schema`."""
df = DataFrame(index=data.index)
for column_name, dtype in schema.items():
if column_name not in data.columns:
continue
elif dtype == "str":
df[column_name] = data[column_name]
elif dtype == "float":
apply_func = partial(nullable_method_call, float, print_exc=False)
df[column_name] = data[column_name].apply(apply_func).astype(float)
elif dtype == "int":
apply_func = partial(nullable_method_call, int, print_exc=False)
df[column_name] = data[column_name].apply(apply_func).astype(Int32Dtype())
else:
raise TypeError(f"Unknown dtype {dtype}")
return df
def parse_district_id(district_id: str) -> str:
"""Ensure that `district_id` is a 7-digit string."""
try:
return f"{int(district_id):07d}"
except:
logging.error(f"Unknown district {district_id}")
return None
def read_data(schema: Dict[str, str], state: str, url: str) -> DataFrame:
"""Read all CSV data from `url` into a dataframe and use `schema` to determine dtype."""
data = | read_csv(url, dtype=str, skiprows=1) | pandas.read_csv |
'''
Title: Git Data Commit
Description: This script is used to collect data from the COVID-19 Hub Feature layers hosted on ArcGIS Online into local machine and
then run the Git Commands to commit data in CSV format to this repository.
'''
# Import the required libraries
import pandas as pd
# from arcgis.features import GeoAccessor, GeoSeriesAccessor ==> not really using it right now
from arcgis import GIS
# intialise the GIS Object
gis = GIS()
# retrieve the Infections Times Series Layer item
infections = gis.content.get("42ec33c9361d49b585a23d780207726d")
infections_layer = infections.layers[0]
# retrieve the Vaccination Progress Times Series
vaccination = gis.content.get("0bad4380917a48da8f4f12028709c443")
vaccination_layer = vaccination.layers[2]
# retrieve the Provincial Records
provincial = gis.content.get("122efe4c5ab54ff9a9e75cc1908d48f4")
provincial_layer = provincial.layers[0]
# retrieve the Provincial Times Series Records
prov_time_series = gis.content.get("20703dd3a24f45f08ea37034285d3492")
prov_time_series_layer = prov_time_series.layers[0]
# create Spatial Dataframe objects
infections_df = | pd.DataFrame.spatial.from_layer(infections_layer) | pandas.DataFrame.spatial.from_layer |
import re
import pandas as pd
import numpy as np
from gensim import corpora, models, similarities
from difflib import SequenceMatcher
from build_tfidf import split
def ratio(w1, w2):
'''
Calculate the matching ratio between 2 words.
Only account for word pairs with at least 90% similarity
'''
m = SequenceMatcher(None, w1, w2)
r = m.ratio()
if r < 0.9: r = 0.0
return r
def build_features(data, tfidf, dictionary):
'''
Generate features:
1. Cosine similarity between tf-idf vectors of query vs. title
2. Cosine similarity between tf-idf vectors of query vs. description
3. Cosine similarity between tf-idf vectors of query vs. attribute text
4. Sum of word match ratios between query vs. title
5. Sum of word match ratios between query vs. description
6. Sum of word match ratios between query vs. attribute text
7. Query word count
'''
result = []
for loc in xrange(len(data)):
rowdata = data.loc[loc, ["product_title", "product_description", "attr_value", "search_term"]]
rowbow = [[str(text)] if isinstance(text, float) else split(text) for text in rowdata]
# query match level
titleMatch = descMatch = attrMatch = 0
for q in rowbow[3]:
titleMatch = titleMatch + np.sum(map(lambda w: ratio(q, w), rowbow[0]))
descMatch = descMatch + np.sum(map(lambda w: ratio(q, w), rowbow[1]))
attrMatch = attrMatch + np.sum(map(lambda w: ratio(q, w), rowbow[2]))
# get tfidf vectors
rowdata = [tfidf[dictionary.doc2bow(text)] for text in rowbow]
# prepare to get similarities
index = similarities.SparseMatrixSimilarity(rowdata[:3], num_features=len(dictionary))
# append everything to the result
result.append(np.concatenate((index[rowdata[3]], [titleMatch, descMatch, attrMatch, len(rowbow[3])]), axis=0).tolist())
# end loop
return np.array(result)
def main():
# load data
df_desc = pd.read_csv('data/product_descriptions.csv', encoding="ISO-8859-1")
df_attr = pd.read_csv('data/attributes_combined.csv', encoding="ISO-8859-1")
df_train = pd.read_csv('data/train.csv', encoding="ISO-8859-1")
df_train = pd.merge(df_train, df_desc, how='left', on='product_uid')
df_train = pd.merge(df_train, df_attr, how='left', on='product_uid')
df_test = pd.read_csv('data/test.csv', encoding="ISO-8859-1")
df_test = | pd.merge(df_test, df_desc, how='left', on='product_uid') | pandas.merge |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
| assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from statsmodels.compat.pandas import Appender, Substitution, to_numpy
from collections.abc import Iterable
import datetime as dt
from types import SimpleNamespace
import warnings
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde, norm
from statsmodels.tsa.base.prediction import PredictionResults
import statsmodels.base.wrapper as wrap
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly, cache_writable
from statsmodels.tools.docstring import Docstring, remove_parameters
from statsmodels.tools.validation import (
array_like,
bool_like,
int_like,
string_like,
)
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tsa.base import tsa_model
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.tsatools import (
freq_to_period,
lagmat,
)
__all__ = ["AR", "AutoReg"]
AR_DEPRECATION_WARN = """
statsmodels.tsa.AR has been deprecated in favor of statsmodels.tsa.AutoReg and
statsmodels.tsa.SARIMAX.
AutoReg adds the ability to specify exogenous variables, include time trends,
and add seasonal dummies. The AutoReg API differs from AR since the model is
treated as immutable, and so the entire specification including the lag
length must be specified when creating the model. This change is too
substantial to incorporate into the existing AR api. The function
ar_select_order performs lag length selection for AutoReg models.
AutoReg only estimates parameters using conditional MLE (OLS). Use SARIMAX to
estimate ARX and related models using full MLE via the Kalman Filter.
To silence this warning and continue using AR until it is removed, use:
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.ar_model.AR', FutureWarning)
"""
REPEATED_FIT_ERROR = """
Model has been fit using maxlag={0}, method={1}, ic={2}, trend={3}. These
cannot be changed in subsequent calls to `fit`. Instead, use a new instance of
AR.
"""
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x ** 2, axis=axis)
def _ar_predict_out_of_sample(y, params, k_ar, k_trend, steps, start=0):
mu = params[:k_trend] if k_trend else 0 # only have to worry constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(k_ar + steps) # this is one too big but does not matter
if start:
endog[:k_ar] = y[start - k_ar : start]
else:
endog[:k_ar] = y[-k_ar:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i : i + k_ar])
forecast[i] = fcast
endog[i + k_ar] = fcast
return forecast
class AutoReg(tsa_model.TimeSeriesModel):
"""
Autoregressive AR-X(p) model.
Estimate an AR-X model using Conditional Maximum Likelihood (OLS).
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
lags : {int, list[int]}
The number of lags to include in the model if an integer or the
list of lag indices to include. For example, [1, 4] will only
include lags 1 and 4 while lags=4 will include lags 1, 2, 3, and 4.
trend : {'n', 'c', 't', 'ct'}
The trend to include in the model:
* 'n' - No trend.
* 'c' - Constant only.
* 't' - Time trend only.
* 'ct' - Constant and time trend.
seasonal : bool
Flag indicating whether to include seasonal dummies in the model. If
seasonal is True and trend includes 'c', then the first period
is excluded from the seasonal terms.
exog : array_like, optional
Exogenous variables to include in the model. Must have the same number
of observations as endog and should be aligned so that endog[i] is
regressed on exog[i].
hold_back : {None, int}
Initial observations to exclude from the estimation sample. If None,
then hold_back is equal to the maximum lag in the model. Set to a
non-zero value to produce comparable models with different lag
length. For example, to compare the fit of a model with lags=3 and
lags=1, set hold_back=3 which ensures that both models are estimated
using observations 3,...,nobs. hold_back must be >= the maximum lag in
the model.
period : {None, int}
The period of the data. Only used if seasonal is True. This parameter
can be omitted if using a pandas object for endog that contains a
recognized frequency.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none'.
deterministic : DeterministicProcess
A deterministic process. If provided, trend and seasonal are ignored.
A warning is raised if trend is not "n" and seasonal is not False.
old_names : bool
Flag indicating whether to use the v0.11 names or the v0.12+ names.
.. deprecated:: 0.13
old_names is deprecated and will be removed after 0.14 is
released. You must update any code reliant on the old variable
names to use the new names.
See Also
--------
statsmodels.tsa.statespace.sarimax.SARIMAX
Estimation of SARIMAX models using exact likelihood and the
Kalman Filter.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.ar_model import AutoReg
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
>>> out = 'AIC: {0:0.3f}, HQIC: {1:0.3f}, BIC: {2:0.3f}'
Start by fitting an unrestricted Seasonal AR model
>>> res = AutoReg(data, lags = [1, 11, 12]).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.945, HQIC: 5.970, BIC: 6.007
An alternative used seasonal dummies
>>> res = AutoReg(data, lags=1, seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 6.017, HQIC: 6.080, BIC: 6.175
Finally, both the seasonal AR structure and dummies can be included
>>> res = AutoReg(data, lags=[1, 11, 12], seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.884, HQIC: 5.959, BIC: 6.071
"""
def __init__(
self,
endog,
lags,
trend="c",
seasonal=False,
exog=None,
hold_back=None,
period=None,
missing="none",
*,
deterministic=None,
old_names=False
):
super(AutoReg, self).__init__(endog, exog, None, None, missing=missing)
self._trend = string_like(
trend, "trend", options=("n", "c", "t", "ct")
)
self._seasonal = bool_like(seasonal, "seasonal")
self._period = int_like(period, "period", optional=True)
if self._period is None and self._seasonal:
if self.data.freq:
self._period = freq_to_period(self._index_freq)
else:
err = (
"freq cannot be inferred from endog and model includes"
" seasonal terms. The number of periods must be "
"explicitly set when the endog's index does not "
"contain a frequency."
)
raise ValueError(err)
terms = [TimeTrend.from_string(self._trend)]
if seasonal:
terms.append(Seasonality(self._period))
if hasattr(self.data.orig_endog, "index"):
index = self.data.orig_endog.index
else:
index = np.arange(self.data.endog.shape[0])
self._user_deterministic = False
if deterministic is not None:
if not isinstance(deterministic, DeterministicProcess):
raise TypeError("deterministic must be a DeterministicProcess")
self._deterministics = deterministic
self._user_deterministic = True
else:
self._deterministics = DeterministicProcess(
index, additional_terms=terms
)
self._lags = lags
self._exog_names = []
self._k_ar = 0
self._hold_back = int_like(hold_back, "hold_back", optional=True)
self._old_names = bool_like(old_names, "old_names", optional=False)
if deterministic is not None and (
self._trend != "n" or self._seasonal
):
warnings.warn(
'When using deterministic, trend must be "n" and '
"seasonal must be False.",
RuntimeWarning,
)
if self._old_names:
warnings.warn(
"old_names will be removed after the 0.14 release. You should "
"stop setting this parameter and use the new names.",
FutureWarning,
)
self._check_lags()
self._setup_regressors()
self.nobs = self._y.shape[0]
self.data.xnames = self.exog_names
@property
def ar_lags(self):
"""The autoregressive lags included in the model"""
return self._lags
@property
def hold_back(self):
"""The number of initial obs. excluded from the estimation sample."""
return self._hold_back
@property
def seasonal(self):
"""Flag indicating that the model contains a seasonal component."""
return self._seasonal
@property
def df_model(self):
"""The model degrees of freedom."""
return self._x.shape[1]
@property
def exog_names(self):
"""Names of exogenous variables included in model"""
return self._exog_names
def initialize(self):
"""Initialize the model (no-op)."""
pass
def _check_lags(self):
lags = self._lags
if isinstance(lags, Iterable):
lags = np.array(sorted([int_like(lag, "lags") for lag in lags]))
self._lags = lags
if np.any(lags < 1) or np.unique(lags).shape[0] != lags.shape[0]:
raise ValueError(
"All values in lags must be positive and " "distinct."
)
self._maxlag = np.max(lags)
else:
self._maxlag = int_like(lags, "lags")
if self._maxlag < 0:
raise ValueError("lags must be a positive scalar.")
self._lags = np.arange(1, self._maxlag + 1)
if self._hold_back is None:
self._hold_back = self._maxlag
if self._hold_back < self._maxlag:
raise ValueError(
"hold_back must be >= lags if lags is an int or"
"max(lags) if lags is array_like."
)
def _setup_regressors(self):
maxlag = self._maxlag
hold_back = self._hold_back
exog_names = []
endog_names = self.endog_names
x, y = lagmat(self.endog, maxlag, original="sep")
exog_names.extend(
[endog_names + ".L{0}".format(lag) for lag in self._lags]
)
if len(self._lags) < maxlag:
x = x[:, self._lags - 1]
self._k_ar = x.shape[1]
deterministic = self._deterministics.in_sample()
if deterministic.shape[1]:
x = np.c_[to_numpy(deterministic), x]
if self._old_names:
deterministic_names = []
if "c" in self._trend:
deterministic_names.append("intercept")
if "t" in self._trend:
deterministic_names.append("trend")
if self._seasonal:
period = self._period
names = ["seasonal.{0}".format(i) for i in range(period)]
if "c" in self._trend:
names = names[1:]
deterministic_names.extend(names)
else:
deterministic_names = list(deterministic.columns)
exog_names = deterministic_names + exog_names
if self.exog is not None:
x = np.c_[x, self.exog]
exog_names.extend(self.data.param_names)
y = y[hold_back:]
x = x[hold_back:]
if y.shape[0] < x.shape[1]:
reg = x.shape[1]
period = self._period
trend = 0 if self._trend == "n" else len(self._trend)
seas = 0 if not self._seasonal else period - ("c" in self._trend)
lags = self._lags.shape[0]
nobs = y.shape[0]
raise ValueError(
"The model specification cannot be estimated. "
"The model contains {0} regressors ({1} trend, "
"{2} seasonal, {3} lags) but after adjustment "
"for hold_back and creation of the lags, there "
"are only {4} data points available to estimate "
"parameters.".format(reg, trend, seas, lags, nobs)
)
self._y, self._x = y, x
self._exog_names = exog_names
def fit(self, cov_type="nonrobust", cov_kwds=None, use_t=False):
"""
Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
AutoRegResults
Estimation results.
See Also
--------
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance.
"""
# TODO: Determine correction for degree-of-freedom
# Special case parameterless model
if self._x.shape[1] == 0:
return AutoRegResultsWrapper(
AutoRegResults(self, np.empty(0), np.empty((0, 0)))
)
ols_mod = OLS(self._y, self._x)
ols_res = ols_mod.fit(
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t
)
cov_params = ols_res.cov_params()
use_t = ols_res.use_t
if cov_type == "nonrobust" and not use_t:
nobs = self._y.shape[0]
k = self._x.shape[1]
scale = nobs / (nobs - k)
cov_params /= scale
res = AutoRegResults(
self, ols_res.params, cov_params, ols_res.normalized_cov_params
)
return AutoRegResultsWrapper(res)
def _resid(self, params):
params = array_like(params, "params", ndim=2)
resid = self._y - self._x @ params
return resid.squeeze()
def loglike(self, params):
"""
Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Returns
-------
float
The log-likelihood value.
"""
nobs = self.nobs
resid = self._resid(params)
ssr = resid @ resid
llf = -(nobs / 2) * (np.log(2 * np.pi) + np.log(ssr / nobs) + 1)
return llf
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The score vector evaluated at the parameters.
"""
resid = self._resid(params)
return self._x.T @ resid
def information(self, params):
"""
Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The information matrix.
"""
resid = self._resid(params)
sigma2 = resid @ resid / self.nobs
return sigma2 * (self._x.T @ self._x)
def hessian(self, params):
"""
The Hessian matrix of the model.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The hessian evaluated at the parameters.
"""
return -self.information(params)
def _setup_oos_forecast(self, add_forecasts, exog_oos):
x = np.zeros((add_forecasts, self._x.shape[1]))
oos_exog = self._deterministics.out_of_sample(steps=add_forecasts)
n_deterministic = oos_exog.shape[1]
x[:, :n_deterministic] = to_numpy(oos_exog)
# skip the AR columns
loc = n_deterministic + len(self._lags)
if self.exog is not None:
x[:, loc:] = exog_oos[:add_forecasts]
return x
def _wrap_prediction(self, prediction, start, end):
n_values = end - start
if not isinstance(self.data.orig_endog, (pd.Series, pd.DataFrame)):
return prediction[-n_values:]
index = self._index
if end > self.endog.shape[0]:
freq = getattr(index, "freq", None)
if freq:
if isinstance(index, pd.PeriodIndex):
index = pd.period_range(index[0], freq=freq, periods=end)
else:
index = | pd.date_range(index[0], freq=freq, periods=end) | pandas.date_range |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = | Series(arr, index=self.bool_index, name="a") | pandas.Series |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import NUMERIC_TYPES, assert_eq
from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes
def test_can_cast_safely_same_kind():
# 'i' -> 'i'
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="int64")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**31], dtype="int64")._column
assert not data.can_cast_safely(to_dtype)
# 'u' -> 'u'
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("uint64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint64")._column
to_dtype = np.dtype("uint32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**33], dtype="uint64")._column
assert not data.can_cast_safely(to_dtype)
# 'f' -> 'f'
data = cudf.Series([np.inf, 1.0], dtype="float64")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series(
[np.finfo("float32").max * 2, 1.0], dtype="float64"
)._column
to_dtype = np.dtype("float32")
assert not data.can_cast_safely(to_dtype)
def test_can_cast_safely_mixed_kind():
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="int32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="uint32")._column
assert not data.can_cast_safely(to_dtype)
to_dtype = np.dtype("float64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1.0, 2.0, 3.0], dtype="float32")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
# not integer float
data = cudf.Series([1.0, 2.0, 3.5], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([10.0, 11.0, 2000.0], dtype="float64")._column
assert data.can_cast_safely(to_dtype)
# float out of int range
data = cudf.Series([1.0, 2.0, 1.0 * (2**31)], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
# negative signed integers casting to unsigned integers
data = cudf.Series([-1, 0, 1], dtype="int32")._column
to_dtype = np.dtype("uint32")
assert not data.can_cast_safely(to_dtype)
def test_to_pandas_nullable_integer():
gsr_not_null = cudf.Series([1, 2, 3])
gsr_has_null = cudf.Series([1, 2, None])
psr_not_null = pd.Series([1, 2, 3], dtype="int64")
psr_has_null = pd.Series([1, 2, None], dtype="Int64")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_to_pandas_nullable_bool():
gsr_not_null = cudf.Series([True, False, True])
gsr_has_null = cudf.Series([True, False, None])
psr_not_null = pd.Series([True, False, True], dtype="bool")
psr_has_null = pd.Series([True, False, None], dtype="boolean")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_can_cast_safely_has_nulls():
data = cudf.Series([1, 2, 3, None], dtype="float32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3.1, None], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
(1.0, 2.0, 3.0),
[float("nan"), None],
np.array([1, 2.0, -3, float("nan")]),
pd.Series(["123", "2.0"]),
pd.Series(["1.0", "2.", "-.3", "1e6"]),
pd.Series(
["1", "2", "3"],
dtype=pd.CategoricalDtype(categories=["1", "2", "3"]),
),
pd.Series(
["1.0", "2.0", "3.0"],
dtype=pd.CategoricalDtype(categories=["1.0", "2.0", "3.0"]),
),
# Categories with nulls
pd.Series([1, 2, 3], dtype=pd.CategoricalDtype(categories=[1, 2])),
pd.Series(
[5.0, 6.0], dtype=pd.CategoricalDtype(categories=[5.0, 6.0])
),
pd.Series(
["2020-08-01 08:00:00", "1960-08-01 08:00:00"],
dtype=np.dtype("<M8[ns]"),
),
pd.Series(
[pd.Timedelta(days=1, seconds=1), pd.Timedelta("-3 seconds 4ms")],
dtype=np.dtype("<m8[ns]"),
),
[
"inf",
"-inf",
"+inf",
"infinity",
"-infinity",
"+infinity",
"inFInity",
],
],
)
def test_to_numeric_basic_1d(data):
expected = pd.to_numeric(data)
got = cudf.to_numeric(data)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1, 2**11],
[1, 2**33],
[1, 2**63],
[np.iinfo(np.int64).max, np.iinfo(np.int64).min],
],
)
@pytest.mark.parametrize(
"downcast", ["integer", "signed", "unsigned", "float"]
)
def test_to_numeric_downcast_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**11],
[-1.0, -(2.0**11)],
[1.0, 2.0**33],
[-1.0, -(2.0**33)],
[1.0, 2.0**65],
[-1.0, -(2.0**65)],
[1.0, float("inf")],
[1.0, float("-inf")],
[1.0, float("nan")],
[1.0, 2.0, 3.0, 4.0],
[1.0, 1.5, 2.6, 3.4],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["signed", "integer", "unsigned"])
def test_to_numeric_downcast_large_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["float"])
def test_to_numeric_downcast_large_float_pd_bug(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
# Pandas bug: https://github.com/pandas-dev/pandas/issues/19729
with pytest.raises(AssertionError, match="Series are different"):
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
["1", "2", "3"],
[str(np.iinfo(np.int64).max), str(np.iinfo(np.int64).min)],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[""], # pure empty strings
["10.0", "11.0", "2e3"],
["1.0", "2e3"],
["1", "10", "1.0", "2e3"], # int-float mixed
["1", "10", "1.0", "2e3", "2e+3", "2e-3"],
["1", "10", "1.0", "2e3", "", ""], # mixed empty strings
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
if downcast in {"signed", "integer", "unsigned"}:
with pytest.warns(
UserWarning,
match="Downcasting from float to int "
"will be limited by float32 precision.",
):
got = cudf.to_numeric(gs, downcast=downcast)
else:
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
["2e128", "-2e128"],
[
"1.79769313486231e308",
"-1.79769313486231e308",
], # 2 digits relaxed from np.finfo(np.float64).min/max
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_large_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
if downcast == "float":
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
# Pandas bug: https://github.com/pandas-dev/pandas/issues/19729
with pytest.raises(AssertionError, match="Series are different"):
assert_eq(expected, got)
else:
expected = pd.Series([np.inf, -np.inf])
with pytest.warns(
UserWarning,
match="Downcasting from float to int "
"will be limited by float32 precision.",
):
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
| pd.Series(["1", "a", "3"]) | pandas.Series |
import numpy as np
import pandas as pd
import dask
from dask.delayed import tokenize
from ... import delayed
from .. import methods
from .io import from_delayed, from_pandas
def read_sql_table(
table,
uri,
index_col,
divisions=None,
npartitions=None,
limits=None,
columns=None,
bytes_per_chunk="256 MiB",
head_rows=5,
schema=None,
meta=None,
engine_kwargs=None,
**kwargs,
):
"""
Create dataframe from an SQL table.
If neither divisions or npartitions is given, the memory footprint of the
first few rows will be determined, and partitions of size ~256MB will
be used.
Parameters
----------
table : string or sqlalchemy expression
Select columns from here.
uri : string
Full sqlalchemy URI for the database connection
index_col : string
Column which becomes the index, and defines the partitioning. Should
be a indexed column in the SQL server, and any orderable type. If the
type is number or time, then partition boundaries can be inferred from
npartitions or bytes_per_chunk; otherwide must supply explicit
``divisions=``.
``index_col`` could be a function to return a value, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
``index_col=sql.func.abs(sql.column("value")).label("abs(value)")``, or
``index_col=cast(sql.column("id"),types.BigInteger).label("id")`` to convert
the textfield ``id`` to ``BigInteger``.
Note ``sql``, ``cast``, ``types`` methods comes from ``sqlalchemy`` module.
Labeling columns created by functions or arithmetic operations is
required.
divisions: sequence
Values of the index column to split the table by. If given, this will
override npartitions and bytes_per_chunk. The divisions are the value
boundaries of the index column used to define the partitions. For
example, ``divisions=list('acegikmoqsuwz')`` could be used to partition
a string column lexographically into 12 partitions, with the implicit
assumption that each partition contains similar numbers of records.
npartitions : int
Number of partitions, if divisions is not given. Will split the values
of the index column linearly between limits, if given, or the column
max/min. The index column must be numeric or time for this to work
limits: 2-tuple or None
Manually give upper and lower range of values for use with npartitions;
if None, first fetches max/min from the DB. Upper limit, if
given, is inclusive.
columns : list of strings or None
Which columns to select; if None, gets all; can include sqlalchemy
functions, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
recommended.
bytes_per_chunk : str, int
If both divisions and npartitions is None, this is the target size of
each partition, in bytes
head_rows : int
How many rows to load for inferring the data-types, unless passing meta
meta : empty DataFrame or None
If provided, do not attempt to infer dtypes, but use these, coercing
all chunks on load
schema : str or None
If using a table name, pass this to sqlalchemy to select which DB
schema to use within the URI connection
engine_kwargs : dict or None
Specific db engine parameters for sqlalchemy
kwargs : dict
Additional parameters to pass to `pd.read_sql()`
Returns
-------
dask.dataframe
Examples
--------
>>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',
... npartitions=10, index_col='id') # doctest: +SKIP
"""
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy.sql import elements
if index_col is None:
raise ValueError("Must specify index column to partition on")
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
if isinstance(table, str):
table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)
index = table.columns[index_col] if isinstance(index_col, str) else index_col
if not isinstance(index_col, (str, elements.Label)):
raise ValueError(
"Use label when passing an SQLAlchemy instance as the index (%s)" % index
)
if divisions and npartitions:
raise TypeError("Must supply either divisions or npartitions, not both")
columns = (
[(table.columns[c] if isinstance(c, str) else c) for c in columns]
if columns
else list(table.columns)
)
if index not in columns:
columns.append(index)
if isinstance(index_col, str):
kwargs["index_col"] = index_col
else:
# function names get pandas auto-named
kwargs["index_col"] = index_col.name
if head_rows > 0:
# derive metadata from first few rows
q = sql.select(columns).limit(head_rows).select_from(table)
head = | pd.read_sql(q, engine, **kwargs) | pandas.read_sql |
#! /usr/bin/env python3
import os
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from nltk import word_tokenize, pos_tag
from collections import Counter, defaultdict
from tqdm import tqdm
def visualize_class_balance(data_path):
train_fileid = os.listdir(data_path + '/sampled_train')
train_fileid = map(os.path.splitext, train_fileid)
train_fileid = [id_ for (id_, _) in train_fileid]
metadata = | pd.read_csv(data_path + '/annotations_metadata.csv') | pandas.read_csv |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = | Series(dti) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import os
import numpy as np
from numpy import nan
import pytest
import pandas._libs.parsers as parser
from pandas._libs.parsers import TextReader
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, map
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
from pandas.io.parsers import TextFileReader, read_csv
class TestTextReader(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self, capsys):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
msg = (r"Error tokenizing data\. C error: Expected 3 fields in"
" line 4, saw 4")
with pytest.raises(parser.ParserError, match=msg):
reader.read()
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object),
1: np.array(['b', 'e', 'h', 'm'], dtype=object),
2: np.array(['c', 'f', 'i', 'n'], dtype=object)}
assert_array_dicts_equal(result, expected)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
captured = capsys.readouterr()
assert 'Skipping line 4' in captured.err
assert 'Skipping line 6' in captured.err
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader( | StringIO(data) | pandas.compat.StringIO |
"""
Description: Processes model results for visualization
Uses methods:
- :func:`hists`: Processes a model histories for each scenario into results histories by comparing the states over time in each scenario with the states in the nominal scenario.
- :func:`hist`: Compares model history with the nominal model history over time to make a history of degradation.
- :func:`fxnhist`: Compares the history of function states in mdlhist over time.
- :func:`flowhist`: Compares the history of flow states in mdlhist over time.
- :func:`modephases`: Identifies the phases of operation for the system based on a mdlhist with a history of its modes
- :func:`graphflows`: Extracts non-nominal flows by comparing the a results graph with a nominal results graph.
- :func:`resultsgraph`: Makes a dict history of results graphs given a dict history of the nominal and faulty graphs
- :func:`resultsgraphs`: Makes a dict history of results graphs given a dict history of the nominal and faulty graphs
- :func:`totalcost`: Calculates the total host of a set of given end classifications
- :func:`state_probabilities`: Calculates the probabilities of given end-state classifications given an endclasses dictionary
- :func:`bootstrap_confidence_interval`: Convenience wrapper for scipy.bootstrap.
- :func:`overall_diff`: Calculates the difference between the nominal and fault scenarios for a set of nested endclasses
- :func:`end_diff`: Calculates the difference between the nominal and fault scenarios for a set of endclasses
- :func:`percent`: Calculates the percentage of a given indicator variable being True in endclasses
- :func:`average`: Calculates the average value of a given metric in endclasses
- :func:`expected`: Calculates the expected value of a given metric in endclasses using the rate variable in endclasses
- :func:`rate`: Calculates the rate of a given indicator variable being True in endclasses using the rate variable in endclasses
Also used for graph heatmaps, which use the results history to map results history statistics onto the graph, returning a dictonary with structure {fxn/flow: value}:
- :func:`heatmaps`: Makes a dict of heatmaps given a results history and a history of the differences between nominal and faulty models.
- :func:`degtime_heatmap`: Makes a heatmap dictionary of degraded time for functions given a result history
- :func:`degtime_heatmaps`: Makes a dict of heatmap dictionaries of degraded time for functions given results histories
- :func:`avg_degtime_heatmap`: Makes a heatmap dictionary of the average degraded heat time over a list of scenarios in the dict of results histories.
- :func:`exp_degtime_heatmap`: Makes a heatmap dictionary of the expected degraded heat time over a list of scenarios in the dict of results histories based on the rates in endclasses.
- :func:`fault_heatmap`: Makes a heatmap dictionary of faults given a results history.
- :func:`fault_heatmaps`: Makes dict of heatmaps dictionaries of resulting faults given a results history.
- :func:`faults_heatmap`: Makes a heatmap dictionary of the average resulting faults over all scenarios
- :func:`exp_faults_heatmap`: Makes a heatmap dictionary of the expected resulting faults over all scenarios
"""
#File Name: resultdisp/process.py
#Author: <NAME>
#Created: November 2019 (Refactored April 2020)
import copy
import networkx as nx
import numpy as np
import pandas as pd
from ordered_set import OrderedSet
from fmdtools.faultsim.propagate import cut_mdlhist
from scipy.stats import bootstrap
def hists(mdlhists, returndiff=True):
"""
Processes a model histories for each scenario into results histories by comparing the states over time in each scenario with the states in the nominal scenario.
Parameters
----------
mdlhists : dict
A dictionary of model histories for each scenario (e.g. from run_list or run_approach)
returndiff : bool, optional
Whether to return diffs, a dict of the differences between the values of the states in the nominal scenario and fault scenario. The default is True.
Returns
-------
reshists : dict
A dictionary of the results histories of each scenario over time.
diffs : dict
The difference between the nominal and fault scenario states (if returndiff is true--otherwise returns empty)
summaries : dict
A dict with all degraded functions and degraded flows resulting from the fault scenarios.
"""
reshists={}
diffs={}
summaries={}
nomhist = mdlhists.pop('nominal')
for scenname, history in mdlhists.items():
reshists[scenname], diffs[scenname], summaries[scenname] = hist(history, nomhist=nomhist, returndiff=returndiff)
mdlhists['nominal']=nomhist
return reshists, diffs, summaries
def typehist(mdl, reshist):
"""
Summarizes results history reshist over model classes
Parameters
----------
mdl : Model
Model used in the simulation
reshist : Dict
Results history from rd.process.hist(mdlhist)
Returns
-------
typehist : Dict
Results history of flow types/function classes with structure:
{'functions':{'status':[],'faults':{fxn1:[], fxn2:[]},'numfaults':[]}, 'flows':[], 'flowvals'{'flow1':[], 'flow2':[]}}
"""
typehist = {'flows':{}, 'flowvals':{}, 'functions':{}, 'time':reshist['time']}
for flowtype in mdl.flowtypes():
flows = mdl.flows_of_type(flowtype)
typehist['flows'][flowtype] = np.prod([reshist['flows'][flow] for flow in flows], axis=0)
typehist['flowvals'][flowtype] = flows
for fxnclass in mdl.fxnclasses():
fxns = mdl.fxns_of_class(fxnclass)
typehist['functions'][fxnclass] = dict.fromkeys(['status', 'numfaults', 'faults'])
typehist['functions'][fxnclass]['status'] = np.prod([reshist['functions'][fxn]['status'] for fxn in fxns], axis=0)
typehist['functions'][fxnclass]['numfaults'] = np.sum([reshist['functions'][fxn]['numfaults'] for fxn in fxns], axis=0)
typehist['functions'][fxnclass]['faults'] = {fxn:reshist['functions'][fxn]['numfaults'] for fxn in fxns}
return typehist
def hist(mdlhist, nomhist={}, returndiff=True):
"""
Compares model history with the nominal model history over time to make a history of degradation.
Parameters
----------
mdlhist : dict
the model fault history or a dict of both the nominal and fault histories {'nominal':nomhist, 'faulty':mdlhist}
nomhist : dict, optional
The model history in the nominal scenario (if not provided in mdlhist) The default is {}.
returndiff : bool, optional
Whether to return diffs, a dict of the differences between the values of the states in the nominal scenario and fault scenario. The default is True.
Returns
-------
reshist : dict
The results history over time.
diff : dict
The difference between the nominal and fault scenario states (if returndiff is true--otherwise returns empty)
summary : dict
A dict with all degraded functions and degraded flows.
"""
if nomhist: mdlhist={'nominal':nomhist, 'faulty':mdlhist}
if len(mdlhist['faulty']['time']) != len(mdlhist['nominal']['time']):
print("Faulty and nominal scenarios have different simulation times--cutting comparison to shared range.")
mdlhist['nominal'] = cut_mdlhist(mdlhist['nominal'], len(mdlhist['faulty']['time'])-1)
mdlhist['faulty'] = cut_mdlhist(mdlhist['faulty'], len(mdlhist['nominal']['time'])-1)
reshist = {}
reshist['time'] = mdlhist['nominal']['time']
reshist['flowvals'], reshist['flows'], degflows, numdegflows, flowdiff = flowhist(mdlhist, returndiff=returndiff)
reshist['functions'], numfaults, degfxns, numdegfxns, fxndiff = fxnhist(mdlhist, returndiff=returndiff)
reshist['stats'] = {'degraded flows': numdegflows, 'degraded functions': numdegfxns, 'total faults': numfaults}
summary = {'degraded functions': degfxns, 'degraded flows': degflows}
diff = {**fxndiff, **flowdiff}
return reshist, diff, summary
def flowhist(mdlhist, returndiff=True):
""" Compares the history of flow states in mdlhist over time."""
flowshist = {}
summhist = {}
degflows = []
diff = {}
for flowname in mdlhist['nominal']['flows']:
flowshist[flowname]={}
diff[flowname]={}
for att in mdlhist['nominal']['flows'][flowname]:
faulty = mdlhist['faulty']['flows'][flowname][att]
nominal = mdlhist['nominal']['flows'][flowname][att]
flowshist[flowname][att] = 1* (faulty == nominal)
if returndiff: get_diff(faulty, nominal, att, diff[flowname])
summhist[flowname] = np.prod(np.array(list(flowshist[flowname].values())), axis = 0)
if 0 in summhist[flowname]: degflows+=[flowname]
numdegflows = len(summhist) - np.sum(np.array(list(summhist.values())), axis=0)
return flowshist, summhist, degflows, numdegflows, diff
def fxnhist(mdlhist, returndiff=True):
""" Compares the history of function states in mdlhist over time."""
fxnshist = {}
faulthist = {}
deghist = {}
degfxns = []
diff = {}
for fxnname in mdlhist['nominal']['functions']:
fhist = copy.copy(mdlhist['faulty']['functions'][fxnname])
if any(fhist.get('faults', [])): del fhist['faults']
fxnshist[fxnname] = {}
diff[fxnname]={}
for state in fhist:
if type(fhist[state])==dict:
fxnshist[fxnname][state] = {}
diff[fxnname][state]={}
for substate in fhist[state]:
if substate!='faults':
get_diff_fxnhist(mdlhist['faulty']['functions'][fxnname][state][substate], mdlhist['nominal']['functions'][fxnname][state][substate], \
diff[fxnname][state], fxnshist[fxnname][state], substate)
if {'faults', 't_loc','mode'}.intersection(fhist[state]):
fxnshist[fxnname][state]['faults']= mdlhist['faulty']['functions'][fxnname][state].get('faults', np.zeros(len(mdlhist['faulty']['time'])))
fxnshist[fxnname][state]['numfaults'] = get_fault_hist(fxnshist[fxnname][state]['faults'], fxnname)
fxnshist[fxnname][state]['status'] = get_status(len(mdlhist['faulty']['time']),fxnshist[fxnname][state])
else:
get_diff_fxnhist(mdlhist['faulty']['functions'][fxnname][state], mdlhist['nominal']['functions'][fxnname][state], \
diff[fxnname], fxnshist[fxnname], state)
fxnshist[fxnname]['faults']=mdlhist['faulty']['functions'][fxnname].get('faults', np.zeros(len(mdlhist['faulty']['time'])))
fxnshist[fxnname]['numfaults'] = get_fault_hist(fxnshist[fxnname]['faults'], fxnname)
fxnshist[fxnname]['status'] = get_status(len(mdlhist['faulty']['time']),fxnshist[fxnname])
faulthist[fxnname]=fxnshist[fxnname]['numfaults']
deghist[fxnname] = fxnshist[fxnname]['status']
if 0 in deghist[fxnname] or any(0 < faulthist[fxnname]): degfxns+=[fxnname]
numfaults = np.sum(np.array(list(faulthist.values())), axis=0)
numdegfxns = len(deghist) - np.sum(np.array(list(deghist.values())), axis=0)
return fxnshist, numfaults, degfxns, numdegfxns, diff
def get_status(timelen, fhist=[]):
stat=np.prod(np.array(list([i for j,i in fhist.items() if (type(i)!=dict and j not in ['faults', 'numfaults'])])), axis = 0)
#if not stat: stat = np.ones(timelen, dtype=int)
return stat * (1 - 1*(fhist.get('numfaults', 0)>0))
def get_diff_fxnhist(faulty, nominal, diff, fxnhist, state):
fxnhist[state] = 1* (faulty == nominal)
get_diff(faulty, nominal, state, diff)
def get_diff(faulty, nominal, state, diff):
if state=='mode' or faulty.dtype.type==np.str_:
diff[state] = [int(nominal[i]!=faulty[i]) for i,f in enumerate(nominal)]
elif faulty.dtype.type==np.bool_:
diff[state] = 1*nominal - 1*faulty
else: diff[state] = nominal - faulty
def get_fault_hist(faults, fxnname):
if type(faults)==dict: return np.sum([fhist for fhist in faults.values()], axis=0)
elif type(faults[0])==np.float64: return faults
elif type(faults[0])==np.str_: return np.array([int(f!='nom') for f in faults])
else: raise Exception("Invalid data type in "+fxnname+" hist: "+str(type(faults)))
def modephases(mdlhist):
"""
Identifies the phases of operation for the system based on its modes.
Parameters
----------
mdlhist : dict
Model history from the nominal run
Returns
-------
phases : dict
Dictionary of distict phases that the system functions pass through, of the form:
{'fxn':{'phase1':[beg, end], phase2:[beg, end]}}
where each phase is defined by its corresponding mode in the modelhist
(numbered mode, mode1, mode2... for multiple modes)
modephases : dict
Dictionary of phases that the system passes through, of the form: {'fxn':{'mode1':{'phase1', 'phase2''}}}
"""
modephases={}
phases={}
for fxn in mdlhist["functions"].keys():
modehist = mdlhist["functions"][fxn].get('mode', [])
if len(modehist)!=0:
modes = OrderedSet(modehist)
modephases[fxn]=dict.fromkeys(modes)
phases_unsorted = dict()
for mode in modes:
modeinds = [ind for ind,m in enumerate(modehist) if m==mode]
startind = modeinds[0]
phasenum = 0; phaseid=mode
modephases[fxn][mode] = set()
for i, ind in enumerate(modeinds):
if ind+1 not in modeinds:
phases_unsorted [phaseid] =[startind, ind]
modephases[fxn][mode].add(phaseid)
if i!=len(modeinds)-1:
startind = modeinds[i+1]
phasenum+=1; phaseid=mode+str(phasenum)
phases[fxn] = dict(sorted(phases_unsorted.items(), key = lambda item: item[1][0]))
return phases, modephases
def graphflows(g, nomg, gtype='bipartite'):
"""
Extracts non-nominal flows by comparing the a results graph with a nominal results graph.
Parameters
----------
g : networkx graph
The graph in the given fault scenario
nomg : networkx graph
The graph in the nominal fault scenario
gtype : str, optional
The type of graph to return ('normal' or 'bipartite') The default is 'bipartite'.
Returns
-------
endflows : dict
A dictionary of degraded flows.
"""
endflows=dict()
if gtype=='normal':
for edge in g.edges:
flows=g.get_edge_data(edge[0],edge[1])
nomflows=nomg.get_edge_data(edge[0],edge[1])
for flow in flows:
if flows[flow]!=nomflows[flow]:
endflows[flow]={}
vals=flows[flow]
for val in vals:
if vals[val]!=nomflows[flow][val]: endflows[flow][val]=flows[flow][val]
elif gtype=='bipartite':
for node in g.nodes:
if g.nodes[node]['bipartite']==1: #only flow states
if g.nodes[node]['states']!=nomg.nodes[node]['states']:
endflows[node]={}
vals=g.nodes[node]['states']
for val in vals:
if vals[val]!=nomg.nodes[node]['states'][val]: endflows[node][val]=vals[val]
return endflows
def resultsgraph(g, nomg, gtype='bipartite'):
"""
Makes a graph of nominal/non-nominal states by comparing the nominal graph states with the non-nominal graph states
Parameters
----------
g : networkx Graph
graph for the fault scenario where the functions are nodes and flows are edges and with 'faults' and 'states' attributes
nomg : networkx Graph
graph for the nominal scenario where the functions are nodes and flows are edges and with 'faults' and 'states' attributes
gtype : 'normal' or 'bipartite'
whether the graph is a normal multgraph, or a bipartite graph. the default is 'bipartite'
Returns
-------
rg : networkx graph
copy of g with 'status' attributes added for faulty/degraded functions/flows
"""
rg=g.copy()
if gtype=='normal':
for edge in g.edges:
for flow in list(g.edges[edge].keys()):
if g.edges[edge][flow]!=nomg.edges[edge][flow]: status='Degraded'
else: status='Nominal'
rg.edges[edge][flow]={'values':g.edges[edge][flow],'status':status}
for node in g.nodes:
if g.nodes[node]['modes'].difference(['nom']): status='Faulty'
elif g.nodes[node]['states']!=nomg.nodes[node]['states']: status='Degraded'
else: status='Nominal'
rg.nodes[node]['status']=status
elif gtype=='bipartite' or gtype=='component':
for node in g.nodes:
if g.nodes[node]['bipartite']==0 or g.nodes[node].get('iscomponent', False): #condition only checked for functions
if g.nodes[node].get('modes', {'nom'}).difference(['nom']): status='Faulty'
elif g.nodes[node]['states']!=nomg.nodes[node]['states']: status='Degraded'
else: status='Nominal'
elif g.nodes[node]['states']!=nomg.nodes[node]['states']: status='Degraded'
else: status='Nominal'
rg.nodes[node]['status']=status
elif gtype=='typegraph':
for node in g.nodes:
if g.nodes[node]['level']==2:
if any({fxn for fxn, m in g.nodes[node]['modes'].items() if m not in [{'nom'},{}]}): status='Faulty'
elif g.nodes[node]['states']!=nomg.nodes[node]['states']: status='Degraded'
else: status='Nominal'
elif g.nodes[node]['level']==3:
if g.nodes[node]['states']!=nomg.nodes[node]['states']: status='Degraded'
else: status='Nominal'
else: status='Nominal'
rg.nodes[node]['status']=status
return rg
def resultsgraphs(ghist, nomghist, gtype='bipartite'):
"""
Makes a dict history of results graphs given a dict history of the nominal and faulty graphs
Parameters
----------
ghist : dict
dict history of the faulty graph
nomghist : dict
dict history of the nominal graph
gtype : str, optional
Type of graph provided/returned (bipartite, component, or normal). The default is 'bipartite'.
Returns
-------
rghist : dict
dict history of results graphs
"""
rghist = dict.fromkeys(ghist.keys())
for i,rg in rghist.items():
rghist[i] = resultsgraph(ghist[i],nomghist[i], gtype=gtype)
return rghist
##HEATMAP FUNCTIONS
def heatmaps(reshist, diff):
"""
Makes a dict of heatmaps given a results history and a history of the differences between nominal and faulty models.
Parameters
----------
reshist : dict
The model results history (e.g. from compare_functionhist
diff : dict
The differences (e.g. from compare_functionhist(s))
Returns
-------
heatmaps : dict
A dict of heatmaps based on the results history, including:
- degtime, the time the function/flow was degraded
- maxdeg, the maximum degradation experienced by the function
- intdeg, the integral of degradation of the function over the time interval
- maxfaults, the maximum number of faults in the function
- intdiff, the integral of the differences between function/flow states of the nominal and faulty model over time.
- maxdiff, the maximum difference between function/flow states of the nominal and faulty model over time.
"""
heatmaps = {'degtime':{},'maxdeg':{}, 'intdeg':{}, 'maxfaults':{}, 'intdiff':{}, 'maxdiff':{}}
len_time = len(reshist['time'])
for fxnname in reshist['functions'].keys():
heatmaps['degtime'][fxnname]=1.0-sum(reshist['functions'][fxnname]['status'])/len_time
heatmaps['maxfaults'][fxnname] = max(reshist['functions'][fxnname]['numfaults'])
if diff[fxnname]:
fxndiff =np.zeros(len(reshist['functions'][fxnname]['status']))
for valname in diff[fxnname].keys():
fxndiff = fxndiff + diff[fxnname][valname]
heatmaps['intdiff'][fxnname] = sum(fxndiff) /( len_time * len(diff[fxnname].keys()))
heatmaps['maxdiff'][fxnname] = max(fxndiff) /( len_time * len(diff[fxnname].keys()))
for flowname in reshist['flows'].keys():
heatmaps['degtime'][flowname]=1.0 - sum(reshist['flows'][flowname])/len_time
degraded=np.zeros(len(reshist['flows'][flowname]))
flowdiff=np.zeros(len(reshist['flows'][flowname]))
for valname in reshist['flowvals'][flowname].keys():
degraded = degraded + reshist['flowvals'][flowname][valname]
flowdiff = flowdiff + diff[flowname][valname]
heatmaps['maxdeg'][flowname] = max(degraded)
heatmaps['intdeg'][flowname] = sum(degraded)/len_time
heatmaps['maxdiff'][flowname] = max(flowdiff) /( len_time * len(diff[flowname].keys()))
heatmaps['intdiff'][flowname] = sum(flowdiff) /( len_time * len(diff[flowname].keys()))
return heatmaps
def degtime_heatmap(reshist):
""" Makes a heatmap dictionary of degraded time for functions given a result history"""
len_time = len(reshist['time'])
degtimemap={}
for fxnname in reshist['functions'].keys():
degtimemap[fxnname]=1.0-sum(reshist['functions'][fxnname]['status'])/len_time
for flowname in reshist['flows'].keys():
degtimemap[flowname]=1.0 - sum(reshist['flows'][flowname])/len_time
return degtimemap
def degtime_heatmaps(reshists):
""" Makes a dict of heatmap dictionaries of degraded time for functions given results histories"""
degtimemaps=dict.fromkeys(reshists.keys())
for reshist in reshists:
degtimemaps[reshist]=degtime_heatmap(reshists[reshist])
return degtimemaps
def avg_degtime_heatmap(reshists):
""" Makes a heatmap dictionary of the average degraded heat time over a list of scenarios in the dict of results histories."""
degtimetable = pd.DataFrame(degtime_heatmaps(reshists)).transpose()
return degtimetable.mean().to_dict()
def exp_degtime_heatmap(reshists, endclasses):
""" Makes a heatmap dictionary of the expected degraded heat time over a list of scenarios in the dict of results histories based on the rates in endclasses."""
if 'nominal' in {*endclasses, *reshists}:
if 'nominal' not in reshists: endclasses=endclasses.copy(); endclasses.pop('nominal')
elif 'nominal' not in endclasses: reshists=reshists.copy(); reshists.pop('nominal')
degtimetable = pd.DataFrame(degtime_heatmaps(reshists))
rates = list(pd.DataFrame(endclasses).transpose()['rate'])
expdegtimetable = degtimetable.multiply(rates).transpose()
return expdegtimetable.sum().to_dict()
def fault_heatmap(reshist):
""" Makes a heatmap dictionary of faults given a results history."""
heatmap={}
for fxnname in reshist['functions'].keys():
heatmap[fxnname] = max(reshist['functions'][fxnname]['numfaults'])
return heatmap
def fault_heatmaps(reshists):
""" Makes dict of heatmaps dictionaries of resulting faults given a results history."""
faulttimemaps=dict.fromkeys(reshists.keys())
for reshist in reshists:
faulttimemaps[reshist]=fault_heatmap(reshists[reshist])
return faulttimemaps
def faults_heatmap(reshists):
"""Makes a heatmap dictionary of the average resulting faults over all scenarios"""
faulttable = pd.DataFrame(fault_heatmaps(reshists)).transpose()
return faulttable.mean().to_dict()
def exp_faults_heatmap(reshists, endclasses):
"""Makes a heatmap dictionary of the expected resulting faults over all scenarios"""
if 'nominal' in {*endclasses, *reshists}:
if 'nominal' not in reshists: endclasses=endclasses.copy(); endclasses.pop('nominal')
elif 'nominal' not in endclasses: reshists=reshists.copy(); reshists.pop('nominal')
faulttable = pd.DataFrame(fault_heatmaps(reshists))
rates = list( | pd.DataFrame(endclasses) | pandas.DataFrame |
import sys
import os
import numpy as np
import pandas as pd
import dill
import torch
def devide_by_steps(data):
# find first/last frame
min_frame = min([x['frame']["id"][0] for x in data])
max_frame = max([max(x['frame']["id"]) for x in data])
#
new_data = []
for n in range(min_frame, max_frame+1):
frame = []
for ped in data:
if n in ped.values[:,1]:
frame.append(ped.values[ped.values[:,1]==n])
print("frame "+ str(n)+" from " + str(max_frame))
new_data.append(frame)
return new_data
def postproccess(dataset):
arr = []
for f in dataset:
arr.append(devide_by_steps(f))
print("dataset proccessed")
# tarr = torch.tensor(arr)
return arr
def maybe_makedirs(path_to_create):
"""This function will create a directory, unless it exists already,
at which point the function will return.
The exception handling is necessary as it prevents a race condition
from occurring.
Inputs:
path_to_create - A string path to a directory you'd like created.
"""
try:
os.makedirs(path_to_create)
except OSError:
if not os.path.isdir(path_to_create):
raise
def derivative_of(x, dt=1, radian=False):
if radian:
x = make_continuous_copy(x)
if x[~np.isnan(x)].shape[-1] < 2:
return np.zeros_like(x)
dx = np.full_like(x, np.nan)
dx[~np.isnan(x)] = np.gradient(x[~np.isnan(x)], dt)
return dx
dt = 0.4
maybe_makedirs('../processed')
data_columns = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration'], ['x', 'y']])
data_columns = data_columns.insert(0,('frame','id'))
data_columns = data_columns.insert(0,('ped','id'))
for desired_source in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:
for data_class in ['train', 'val', 'test']:
data_dict_path = os.path.join('./processed', '_'.join([desired_source, data_class]) + '.pkl')
processed_data_class = []
for subdir, dirs, files in os.walk(os.path.join('trajnet', desired_source, data_class)):
for file in files:
if not file.endswith('.txt'):
continue
input_data_dict = dict()
full_data_path = os.path.join(subdir, file)
print('At', full_data_path)
data = pd.read_csv(full_data_path, sep='\t', index_col=False, header=None)
data.columns = ['frame_id', 'track_id', 'pos_x', 'pos_y']
data['frame_id'] = | pd.to_numeric(data['frame_id'], downcast='integer') | pandas.to_numeric |
import urllib.request
import xmltodict, json
#import pygrib
import numpy as np
import pandas as pd
from datetime import datetime
import time
# Query to extract parameter forecasts for one particular place (point)
#
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::multipointcoverage
# &place=valencia
# ¶meters="GeopHeight, Temperature, Pressure, Humidity, WindDirection, WindSpeedMS,
# WindUMS, WindVMS, MaximumWind, DewPoint, Precipitation1h, PrecipitationAmount"
#
def extract_forecasts_place(fmi_addr, my_api_key, data_format, parameters, place ):
request = "getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::" + data_format
query_parameters = ""
for it in range(len(parameters)-1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + "place=" + place + "&" + "parameters=" + query_parameters
print(query, "\n")
with urllib.request.urlopen(query) as fd:
query = xmltodict.parse(fd.read())
return(query)
#---------------------------------------------------------------------------------------
# Query to extract parameter forecasts for a Region Of Interest (grid defined by bbox)
#
# Query made for FMI:
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::grid
# & crs=EPSG::4326
# & bbox=-0.439453, 39.192884, -0.201874, 39.426647
# & parameters=Temperature,Humidity,WindDirection, WindSpeedMS
#
def extract_forecasts_grid(fmi_addr, my_api_key, query_request, data_format, coord_sys, bbox, parameters):
# data_format = grid
request = query_request + data_format
# coordinate system e.g. coord_sys = EPSG::4326
query_crs = coord_sys
# bbox = [-0.439453, 39.192884, -0.201874, 39.426647] --- region of Valencia
query_box = ""
for j in range(len(bbox)-1):
query_box += str(bbox[j]) + ","
query_box += str(bbox[len(bbox)-1])
query_parameters = ""
for it in range(len(parameters) - 1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + \
"crs=" + query_crs + "&" + "bbox=" + query_box + "&" + "parameters=" + query_parameters
print("Query made for FMI: \n{}\n".format(query))
with urllib.request.urlopen(query) as fd:
response = xmltodict.parse(fd.read())
return(response)
#-----------------------------------------------------------------------------
# Query to extract values from a grib file in data.frame (dset)
# Columns names of data.frame are:
# ['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value']
#
def extract_gribs(dataDICT):
# gml:fileReference to key for the FTP
# path for the value we need , for downloading grib2 file
FTPurl = dataDICT['wfs:FeatureCollection']['wfs:member'][1]['omso:GridSeriesObservation']['om:result']['gmlcov:RectifiedGridCoverage']['gml:rangeSet']['gml:File']['gml:fileReference']
print("Query for downloading grb file with the values asked: \n{}\n".format(FTPurl))
# Create the grib2 file
result = urllib.request.urlopen(FTPurl)
with open('gribtest.grib2', 'b+w') as f:
f.write(result.read())
gribfile = 'gribtest.grib2' # Grib filename
grb = pygrib.open(gribfile)
# Creation of dictionary, for parameters : metric system
paremeters_units = {
"Mean sea level pressure": "Pa", "Orography": "meters", "2 metre temperature": "°C",
"2 metre relative humidity": "%",
"Mean wind direction": "degrees",
"10 metre wind speed": "m s**-1",
"10 metre U wind component": "m s**-1",
"10 metre V wind component": "m s**-1",
"surface precipitation amount, rain, convective": "kg m**-2", "2 metre dewpoint temperature": "°C"}
# Create a data frame to keep all the measurements from the grib file
dset = pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value'])
for g in grb:
str_g = str(g) # casting to str
col1, col2, *_ = str_g.split(":") # split the message columns
# Temporary data.frame
temp_ds = | pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value']) | pandas.DataFrame |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading and preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import zipfile
from PIL import Image
import numpy as np
import pandas as pd
from six.moves import urllib
from sklearn import preprocessing
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras import backend
from tensorflow.compat.v1.keras import datasets
from sklearn.model_selection import train_test_split
import dvrl_utils
def load_tabular_data(data_name, dict_no, noise_rate):
"""Loads Adult Income and Blog Feedback datasets.
This module loads the two tabular datasets and saves train.csv, valid.csv and
test.csv files under data_files directory.
UCI Adult data link: https://archive.ics.uci.edu/ml/datasets/Adult
UCI Blog data link: https://archive.ics.uci.edu/ml/datasets/BlogFeedback
If noise_rate > 0.0, adds noise on the datasets.
Then, saves train.csv, valid.csv, test.csv on './data_files/' directory
Args:
data_name: 'adult' or 'blog'
dict_no: training and validation set numbers
noise_rate: label corruption ratio
Returns:
noise_idx: indices of noisy samples
"""
# Loads datasets from links
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
# Adult Income dataset
if data_name == 'adult':
train_url = uci_base_url + 'adult/adult.data'
test_url = uci_base_url + 'adult/adult.test'
data_train = pd.read_csv(train_url, header=None)
data_test = pd.read_csv(test_url, skiprows=1, header=None)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['Age', 'WorkClass', 'fnlwgt', 'Education', 'EducationNum',
'MaritalStatus', 'Occupation', 'Relationship', 'Race',
'Gender', 'CapitalGain', 'CapitalLoss', 'HoursPerWeek',
'NativeCountry', 'Income']
# Creates binary labels
df['Income'] = df['Income'].map({' <=50K': 0, ' >50K': 1,
' <=50K.': 0, ' >50K.': 1})
# Changes string to float
df.Age = df.Age.astype(float)
df.fnlwgt = df.fnlwgt.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.CapitalGain = df.CapitalGain.astype(float)
df.CapitalLoss = df.CapitalLoss.astype(float)
# One-hot encoding
df = pd.get_dummies(df, columns=['WorkClass', 'Education', 'MaritalStatus',
'Occupation', 'Relationship',
'Race', 'Gender', 'NativeCountry'])
# Sets label name as Y
df = df.rename(columns={'Income': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Blog Feedback dataset
elif data_name == 'blog':
resp = urllib.request.urlopen(uci_base_url + '00304/BlogFeedback.zip')
zip_file = zipfile.ZipFile(io.BytesIO(resp.read()))
# Loads train dataset
train_file_name = 'blogData_train.csv'
data_train = pd.read_csv(zip_file.open(train_file_name), header=None)
# Loads test dataset
data_test = []
for i in range(29):
if i < 9:
file_name = 'blogData_test-2012.02.0'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.02.'+ str(i+1) + '.00_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
if i == 0:
data_test = temp_data
else:
data_test = pd.concat((data_test, temp_data), axis=0)
for i in range(31):
if i < 9:
file_name = 'blogData_test-2012.03.0'+ str(i+1) + '.00_00.csv'
elif i < 25:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.01_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
data_test = pd.concat((data_test, temp_data), axis=0)
df = pd.concat((data_train, data_test), axis=0)
# Removes rows with missing data
df = df.dropna()
# Sets label and named as Y
df.columns = df.columns.astype(str)
df['280'] = 1*(df['280'] > 0)
df = df.rename(columns={'280': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# load california housing dataset (./data_files/california_housing_train.csv
# and ./data_files/california_housing_test.csv)
elif data_name == 'cali':
train_url = './data_files/california_housing_train.csv'
test_url = './data_files/california_housing_test.csv'
data_train = | pd.read_csv(train_url, header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv( | StringIO('a,b,c\n1,2,3') | pandas.compat.StringIO |
### Twitter Data Tools
## <NAME>
## Created: 8/15/2018
## Updated: 8/23/2018
import os
import re
import sys
import math
import nltk
import errno
import tarfile
import unidecode
import numpy as np
import pandas as pd
import subprocess as sb
def get_id_sets(data):
parent = list(data['tweet']['tweet_id']['parent'].keys())
retweet = list(data['tweet']['tweet_id']['retweet'].keys())
reply = list(data['tweet']['tweet_id']['reply'].keys())
replies = []
for i in reply:
replies.extend(data['tweet']['tweet_id']['reply'][i])
replies = np.unique(replies)
normal = list(set(parent) - set(retweet) - set(reply) - set(replies))
sets_ids = [retweet,reply,replies,normal]
sets_label = ['retweet','reply','replies','normal']
tweet_id_sets = {}
code = {}
for i, j in enumerate(sets_label):
tweet_id_sets[j] = sets_ids[i]
for k in sets_ids[i]:
if k not in code.keys():
code[k] = np.zeros(len(sets_label),dtype=int)
code[k][i] = 1
else:
code[k][i] = 1
tweet_id_sets['set-code'] = code
tweet_id_sets['set-label'] = sets_label
return tweet_id_sets
def read(STDVID,date_range):
try:
directory = '/twitter-tweets-stream/'+STDVID+'-processed/'
filename = 'keywordStream'+STDVID+'Tweets_'+date_range+'_processed.npy.tar.gz'
filename_tar = directory+filename
filename_tar_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+filename_tar
file0_tar = tarfile.open(filename_tar_directory)
file0_tar.extract(filename.replace('.tar.gz',''))
out = np.load(filename.replace('.tar.gz',''),allow_pickle=True).item()
os.remove(filename.replace('.tar.gz',''))
file0_tar.close()
except FileNotFoundError:
try:
directory = '/raw-data/twitter-tweets-stream/'+STDVID+'-processed/'
filename = 'keywordStream'+STDVID+'Tweets_'+date_range+'_processed.npy.tar.gz'
filename_tar = directory+filename
filename_tar_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+filename_tar
file0_tar = tarfile.open(filename_tar_directory)
file0_tar.extract(filename.replace('.tar.gz',''))
out = np.load(filename.replace('.tar.gz',''),allow_pickle=True).item()
os.remove(filename.replace('.tar.gz',''))
file0_tar.close()
except:
print('Error: The '+STDVID+'-processed directory can not be found or the dataset does not exist anywhere.')
print('Please refer to the README.md file.')
try:
sys.exit()
except SystemExit:
sys.exit
# tweet id sets
tweet_id_sets = get_id_sets(out)
return out, tweet_id_sets
def tweet_id_categories(table,dtype=str):
tweet_ids = {}
if dtype != str:
tweet_ids['retweets'] = table.index[table['RTT'] == 1].tolist()
tweet_ids['replied'] = table.index[table['RPT'] == 1].tolist()
tweet_ids['replies'] = table.index[table['TRP'] == 1].tolist()
tweet_ids['normal'] = table.index[table['NRT'] == 1].tolist()
else:
tweet_ids['retweets'] = table.index[table['RTT'] == '1'].tolist()
tweet_ids['replied'] = table.index[table['RPT'] == '1'].tolist()
tweet_ids['replies'] = table.index[table['TRP'] == '1'].tolist()
tweet_ids['normal'] = table.index[table['NRT'] == '1'].tolist()
return tweet_ids
def read_csv(stream,date_range,dtype=str,subset=False,which_set='subset',which_set_params=[]):
out_T = None
out_U = None
if subset == False:
try:
directory_1 = stream+'-tabulated/'
pathway_T = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-tweet.csv.gz'
pathway_U = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-user.csv.gz'
out_T = pd.read_csv(pathway_T,compression='gzip',sep=',',index_col=0,header=0,dtype=str)
out_U = pd.read_csv(pathway_U,compression='gzip',sep=',',index_col=0,header=0,dtype=str)
except FileNotFoundError:
try:
directory_1 = '/raw-data/twitter-tweets-stream/'+stream+'-tabulated/'
pathway_T = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-tweet.csv.gz'
pathway_U = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-user.csv.gz'
out_T = pd.read_csv(pathway_T,compression='gzip',sep=',',index_col=0,header=0,dtype=str)
out_U = | pd.read_csv(pathway_U,compression='gzip',sep=',',index_col=0,header=0,dtype=str) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ..data_structures.sarray import SArray
from ..data_structures.sframe import SFrame
from ..data_structures.sarray import load_sarray
from .._cython.cy_flexible_type import GMT
from . import util
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import time
import warnings
import functools
import tempfile
import sys
import six
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None]
self.datetime_data2 = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111).replace(tzinfo=GMT(0.0)),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.np_array_data = [np.array(x) for x in self.vec_data]
self.empty_np_array_data = [np.array([])]
self.np_matrix_data = [np.matrix(x) for x in self.vec_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype, _type)
self.assertEqual(len(_sarray), len(_data))
sarray_contents = list(_sarray.head(len(_sarray)))
if _type == np.ndarray:
# Special case for np.ndarray elements, which assertSequenceEqual
# does not handle.
np.testing.assert_array_equal(sarray_contents, _data)
else:
# Use unittest methods when possible for better consistency.
self.assertSequenceEqual(sarray_contents, _data)
def __test_almost_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype, _type)
self.assertEqual(len(_sarray), len(_data))
l = list(_sarray)
for i in range(len(l)):
if type(l[i]) in (list, array.array):
for j in range(len(l[i])):
self.assertAlmostEqual(l[i][j], _data[i][j])
else:
self.assertAlmostEqual(l[i], _data[i])
def __test_creation_raw(self, data, dtype, expected):
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_pd(self, data, dtype, expected):
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
self.__test_creation_raw(data, dtype, expected)
self.__test_creation_pd(data, dtype, expected)
def __test_creation_type_inference_raw(self, data, expected_dtype, expected):
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
def __test_creation_type_inference_pd(self, data, expected_dtype, expected):
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
self.__test_creation_type_inference_raw(data, expected_dtype, expected)
self.__test_creation_type_inference_pd(data, expected_dtype, expected)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.np_array_data, np.ndarray, self.np_array_data)
self.__test_creation(self.empty_np_array_data, np.ndarray,
self.empty_np_array_data)
self.__test_creation(self.np_matrix_data, np.ndarray, self.np_matrix_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with map/filter type
self.__test_creation_raw(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_raw(map(lambda x: x * 10, self.int_data),
float,
[float(x) * 10 for x in self.int_data])
self.__test_creation_raw(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_raw(filter(lambda x: x < 5, self.int_data),
int,
list(filter(lambda x: x < 5, self.int_data)))
self.__test_creation_raw(filter(lambda x: x > 5, self.float_data),
float,
list(filter(lambda x: x > 5, self.float_data)))
self.__test_creation_raw(filter(lambda x: len(x) > 3, self.string_data),
str,
list(filter(lambda x: len(x) > 3, self.string_data)))
self.__test_creation_pd(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_pd(map(lambda x: x * 10, self.int_data),
float,
[float(x) * 10 for x in self.int_data])
self.__test_creation_pd(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference(self.np_array_data, np.ndarray,
self.np_array_data)
self.__test_creation_type_inference(self.empty_np_array_data,
np.ndarray,
self.empty_np_array_data)
self.__test_creation_type_inference(self.np_matrix_data, np.ndarray,
self.np_matrix_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
self.__test_creation((1,2,3,4), int, [1,2,3,4])
self.__test_creation_type_inference_raw(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_type_inference_raw(map(lambda x: x * 10, self.float_data),
float,
[x * 10 for x in self.float_data])
self.__test_creation_type_inference_raw(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_type_inference_pd(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_type_inference_pd(map(lambda x: x * 10, self.float_data),
float,
[float(x) * 10 for x in self.float_data])
self.__test_creation_type_inference_pd(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_type_inference_raw(filter(lambda x: x < 5, self.int_data),
int,
list(filter(lambda x: x < 5, self.int_data)))
self.__test_creation_type_inference_raw(filter(lambda x: x > 5, self.float_data),
float,
list(filter(lambda x: x > 5, self.float_data)))
self.__test_creation_type_inference_raw(filter(lambda x: len(x) > 3, self.string_data),
str,
list(filter(lambda x: len(x) > 3, self.string_data)))
# genertors
def __generator_parrot(data):
for ii in data:
yield ii
self.__test_creation_raw(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_raw(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_raw(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_pd(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_pd(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_pd(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.string_data), str, self.string_data)
# Test numpy types, which are not compatible with the pd.Series path in
# __test_creation and __test_creation_type_inference
self.__test_equal(SArray(np.array(self.vec_data), array.array),
self.vec_data, array.array)
self.__test_equal(SArray(np.matrix(self.vec_data), array.array),
self.vec_data, array.array)
self.__test_equal(SArray(np.array(self.vec_data)),
self.vec_data, array.array)
self.__test_equal(SArray(np.matrix(self.vec_data)),
self.vec_data, array.array)
# Test python 3
self.__test_equal(SArray(filter(lambda x: True, self.int_data)), self.int_data, int)
self.__test_equal(SArray(map(lambda x: x, self.int_data)), self.int_data, int)
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype, float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype, int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_in(self):
sint = SArray(self.int_data, int)
self.assertTrue(5 in sint)
self.assertFalse(20 in sint)
sstr = SArray(self.string_data, str)
self.assertTrue("abc" in sstr)
self.assertFalse("zzzzzz" in sstr)
self.assertFalse("" in sstr)
self.__test_equal(sstr.contains("ll"), ["ll" in i for i in self.string_data], int)
self.__test_equal(sstr.contains("a"), ["a" in i for i in self.string_data], int)
svec = SArray([[1.0,2.0],[2.0,3.0],[3.0,4.0],[4.0,5.0]], array.array)
self.__test_equal(svec.contains(1.0), [1,0,0,0], int)
self.__test_equal(svec.contains(0.0), [0,0,0,0], int)
self.__test_equal(svec.contains(2), [1,1,0,0], int)
slist = SArray([[1,"22"],[2,"33"],[3,"44"],[4,None]], list)
self.__test_equal(slist.contains(1.0), [1,0,0,0], int)
self.__test_equal(slist.contains(3), [0,0,1,0], int)
self.__test_equal(slist.contains("33"), [0,1,0,0], int)
self.__test_equal(slist.contains("3"), [0,0,0,0], int)
self.__test_equal(slist.contains(None), [0,0,0,1], int)
sdict = SArray([{1:"2"},{2:"3"},{3:"4"},{"4":"5"}], dict)
self.__test_equal(sdict.contains(1.0), [1,0,0,0], int)
self.__test_equal(sdict.contains(3), [0,0,1,0], int)
self.__test_equal(sdict.contains("4"), [0,0,0,1], int)
self.__test_equal(sdict.contains("3"), [0,0,0,0], int)
self.__test_equal(SArray(['ab','bc','cd']).is_in('abc'), [1,1,0], int)
self.__test_equal(SArray(['a','b','c']).is_in(['a','b']), [1,1,0], int)
self.__test_equal(SArray([1,2,3]).is_in(array.array('d',[1.0,2.0])), [1,1,0], int)
self.__test_equal(SArray([1,2,None]).is_in([1, None]), [1,0,1], int)
self.__test_equal(SArray([1,2,None]).is_in([1]), [1,0,0], int)
def test_save_load(self):
# Check top level load function
with util.TempDirectory() as f:
sa = SArray(self.float_data)
sa.save(f)
sa2 = load_sarray(f)
self.__test_equal(sa2, self.float_data, float)
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEqual(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEqual(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elements.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(len(sa_random)))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in range(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elements.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(len(sa_random)))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
# Python 3 doesn't return keys in same order from identical dictionaries.
sort_by_type = lambda x : str(type(x))
sa_list = sa_dict.apply(lambda x: sorted(list(x), key = sort_by_type))
self.__test_equal(sa_list, [sorted(list(x), key = sort_by_type) for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: 'a' in x if x is not None else None, skip_na=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: 'a' in x)
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: 1 in x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(len(no_change), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEqual(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEqual(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype, float)
# test float -> int
s = SArray(list(map(lambda x: x+0.2, self.float_data)), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), list(map(lambda x: str(x), self.int_data)))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
# astype between list and array
s = SArray([array.array('d',[1.0,2.0]), array.array('d',[2.0,3.0])])
ret = list(s.astype(list))
self.assertEqual(ret, [[1.0, 2.0], [2.0,3.0]])
ret = list(s.astype(list).astype(array.array))
self.assertEqual(list(s), list(ret))
with self.assertRaises(RuntimeError):
ret = list(SArray([["a",1.0],["b",2.0]]).astype(array.array))
badcast = list(SArray([["a",1.0],["b",2.0]]).astype(array.array, undefined_on_failure=True))
self.assertEqual(badcast, [None, None])
with self.assertRaises(TypeError):
s.astype(None)
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array)
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data))
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.countna(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.countna(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.countna(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.countna(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.countna(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.countna(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.countna(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.countna(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(list(s.tail()), [x for x in range(30,40)])
# smaller amount
self.assertEqual(list(s.tail(3)), [x for x in range(37,40)])
# larger amount
self.assertEqual(list(s.tail(40)), [x for x in range(0,40)])
# too large
self.assertEqual(list(s.tail(81)), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(list(map(lambda x: x*-1, self.int_data)), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.mean() is None)
# test sum
t = SArray([], float).sum()
self.assertTrue(type(t) == float)
self.assertTrue(t == 0.0)
t = SArray([], int).sum()
self.assertTrue(type(t) == int or type(t) == long)
self.assertTrue(t == 0)
self.assertTrue(SArray([], array.array).sum() == array.array('d',[]))
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_max_min_sum_mean_missing(self):
# negative and positive
s = SArray([-2,0,None,None,None], int)
self.assertEqual(s.max(), 0)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), -2)
self.assertAlmostEqual(s.mean(), -1)
s = SArray([None,None,None], int)
self.assertEqual(s.max(), None)
self.assertEqual(s.min(), None)
self.assertEqual(s.sum(), 0)
self.assertEqual(s.mean(), None)
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertRaises(ValueError, lambda: bool(s))
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(list(s), self.int_data)
self.assertRaises(ValueError, lambda: bool(s))
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEqual(sum1, realsum)
self.assertEqual(sum2, realsum)
self.assertEqual(sum3, realsum)
# abs
s=np.array(range(-10, 10))
t = SArray(s, int)
self.__test_equal(abs(t), list(abs(s)), int)
t = SArray(s, float)
self.__test_equal(abs(t), list(abs(s)), float)
t = SArray([s], array.array)
self.__test_equal(SArray(abs(t)[0]), list(abs(s)), float)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10])
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t ** 2, list(s ** 2), float)
self.__test_almost_equal(t ** 0.5, list(s ** 0.5), float)
self.__test_equal(((t ** 2) ** 0.5 + 1e-8).astype(int), list(s), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(t % 5, list(s % 5), int)
self.__test_equal(t // 5, list(s // 5), int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(+t, list(+s), int)
self.__test_equal(-t, list(-s), int)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
self.__test_equal(2**t, list(2**s), float)
s_neg = np.array([-1,-2,-3,5,6,7,8,9,10])
t_neg = SArray(s_neg, int)
self.__test_equal(t_neg // 5, list(s_neg // 5), int)
self.__test_equal(t_neg % 5, list(s_neg % 5), int)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0, 0, 0, 0, 1], int)
self.__test_equal(s != None, [1, 1, 1, 1, 0], int)
def test_modulus_operator(self):
l = [-5,-4,-3,-2,-1,0,1,2,3,4,5]
t = SArray(l, int)
self.__test_equal(t % 2, [i % 2 for i in l], int)
self.__test_equal(t % -2, [i % -2 for i in l], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10])
s2=np.array([5,4,3,2,1,10,9,8,7,6])
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t ** t2, list(s ** s2), float)
self.__test_almost_equal(t ** (1.0 / t2), list(s ** (1.0 / s2)), float)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_almost_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s ** s, [array.array('d', [float(j) ** float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s // s, [array.array('d', [float(j) // float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(+s, [array.array('d', [float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(-s, [array.array('d', [-float(j) for j in i]) for i in self.vec_data], array.array)
neg_float_data = [-v for v in self.float_data]
t = SArray(neg_float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(t // s, [array.array('d', [i[1] // float(j) for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_div_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
def test_floodiv_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
from math import isnan
def try_eq_sa_correct(left_val, right_val, correct):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(correct) is not list:
v1 = [v1]
correct = [correct]
for v, c in zip(v1, correct):
if type(v) is float and isnan(v):
assert isnan(c)
else:
self.assertEqual(type(v), type(c))
self.assertEqual(v, c)
try_eq_sa_correct(1, 0, None)
try_eq_sa_correct(0, 0, None)
try_eq_sa_correct(-1, 0, None)
try_eq_sa_correct(1.0, 0, float('inf'))
try_eq_sa_correct(0.0, 0, float('nan'))
try_eq_sa_correct(-1.0, 0, float('-inf'))
try_eq_sa_correct([1.0,0,-1], 0, [float('inf'), float('nan'), float('-inf')])
try_eq_sa_correct(1, [1.0, 0], [1., float('inf')])
try_eq_sa_correct(-1, [1.0, 0], [-1., float('-inf')])
try_eq_sa_correct(0, [1.0, 0], [0., float('nan')])
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1])
s2=np.array([0,1,0,1,0,1,0,1])
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_logical_ops_missing_value_propagation(self):
s=[0, 0,0,None, None, None,1,1, 1]
s2=[0,None,1,0, None, 1, 0,None,1]
t = SArray(s, int)
t2 = SArray(s2, int)
and_result = [0,0,0,0,None,None,0,None,1]
or_result = [0,None,1,None,None,1,1,1,1]
self.__test_equal(t & t2, and_result, int)
self.__test_equal(t | t2, or_result, int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"]
s2=["e","d","c","b","a","j","i","h","g","f"]
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEqual(len((t + t2).dropna()), 7)
self.assertEqual(len((t - t2).dropna()), 7)
self.assertEqual(len((t * t2).dropna()), 7)
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEqual(len(t.dropna()), 6)
self.assertEqual(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEqual(len(t2.dropna()), 0)
self.assertEqual(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEqual(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEqual(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEqual(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEqual(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEqual(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEqual(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEqual(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(list(sa_sample.head()), list(sa_sample2.head()))
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
self.assertEqual(len(SArray.from_sequence(100).sample(0.5, 1, exact=True)), 50)
self.assertEqual(len(SArray.from_sequence(100).sample(0.5, 2, exact=True)), 50)
def test_hash(self):
a = SArray([0,1,0,1,0,1,0,1], int)
b = a.hash()
zero_hash = b[0]
one_hash = b[1]
self.assertTrue((b[a] == one_hash).all())
self.assertTrue((b[1-a] == zero_hash).all())
# I can hash other stuff too
# does not throw
a.astype(str).hash().__materialize__()
a.apply(lambda x: [x], list).hash().__materialize__()
# Nones hash too!
a = SArray([None, None, None], int).hash()
self.assertTrue(a[0] is not None)
self.assertTrue((a == a[0]).all())
# different seeds give different hash values
self.assertTrue((a.hash(seed=0) != a.hash(seed=1)).all())
def test_random_integers(self):
a = SArray.random_integers(0)
self.assertEqual(len(a), 0)
a = SArray.random_integers(1000)
self.assertEqual(len(a), 1000)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array)
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def _my_element_slice(self, arr, start=None, stop=None, step=1):
return arr.apply(lambda x: x[slice(start, stop, step)], arr.dtype)
def _slice_equality_test(self, arr, start=None, stop=None, step=1):
self.assertEqual(
list(arr.element_slice(start, stop, step)),
list(self._my_element_slice(arr,start,stop,step)))
def test_element_slice(self):
#string slicing
g=SArray(range(1,1000, 10)).astype(str)
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#list slicing
g=SArray(range(1,10)).apply(lambda x: list(range(x)), list)
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#array slicing
import array
g=SArray(range(1,10)).apply(lambda x: array.array('d', range(x)))
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#this should fail
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).element_slice(1)
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).astype(float).element_slice(1)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = len(sa3)
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n // 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!",
"中文 应该也 行",
'Сблъсъкът между'])
expected = [{"this": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEqual(sa1.dtype, dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEqual(sa1.dtype, dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_word_count2(self):
sa = SArray(["This is some url http://www.someurl.com!!", "Should we? Yes, we should."])
#TODO: Get some weird unicode whitespace in the Chinese and Russian tests
expected1 = [{"this": 1, "is": 1, "some": 1, "url": 1, "http://www.someurl.com!!": 1},
{"should": 1, "we?": 1, "we": 1, "yes,": 1, "should.": 1}]
expected2 = [{"this is some url http://www.someurl.com": 1},
{"should we": 1, " yes": 1, " we should.": 1}]
word_counts1 = sa._count_words()
word_counts2 = sa._count_words(delimiters=["?", "!", ","])
self.assertEqual(word_counts1.dtype, dict)
self.__test_equal(word_counts1, expected1, dict)
self.assertEqual(word_counts2.dtype, dict)
self.__test_equal(word_counts2, expected2, dict)
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEqual(result.dtype, dict)
self.__test_equal(result, expected, dict)
self.assertEqual(result2.dtype, dict)
self.__test_equal(result2, expected2, dict)
self.assertEqual(result3.dtype, dict)
self.__test_equal(result3, expected3, dict)
self.assertEqual(result4.dtype, dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEqual(result5.dtype, dict)
self.__test_equal(result5, expected5, dict)
self.assertEqual(result6.dtype, dict)
self.__test_equal(result6, expected6, dict)
self.assertEqual(result7.dtype, dict)
self.__test_equal(result7, expected7, dict)
self.assertEqual(result8.dtype, dict)
self.__test_equal(result8, expected8, dict)
self.assertEqual(result9.dtype, dict)
self.__test_equal(result9, expected9, dict)
self.assertEqual(result10.dtype, dict)
self.__test_equal(result10, expected10, dict)
self.assertEqual(result11.dtype, dict)
self.__test_equal(result11, expected11, dict)
self.assertEqual(result12.dtype, dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEqual([set(i) for i in sa_keys], [{str(i), i} for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEqual(list(sa_keys), [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEqual(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEqual(list(sa_values), [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEqual(list(sa_values), [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEqual(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEqual(list(sa_values), [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEqual(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEqual(list(sa_values), [{'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEqual(list(sa_values), [{'b': 20, 'c':None}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEqual(list(sa_values), [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEqual(list(sa_values), [{'a':1, 'c':None}, {"b": 4, None: 5}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEqual(list(sa_values), [0,0,None,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEqual(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEqual(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEqual(list(sa_values), [1,1,None,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEqual(list(sa_values), [1,1,None,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEqual(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEqual(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEqual(list(sa_values), [1,0,None,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEqual(list(sa_values), [0,1,None,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# similarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f)
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEqual(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEqual(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEqual(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEqual(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEqual(len(u), 13)
# We do not preserve order
self.assertEqual(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length()
self.assertEqual(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEqual(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEqual(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912], t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10], t[-10])
# A cache boundary
self.assertEqual(s[32*1024-1], t[32*1024-1])
self.assertEqual(s[32*1024], t[32*1024])
# totally different
self.assertEqual(s[19312], t[19312])
# edge case oddities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEqual(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEqual(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(list(result), list(ascending))
result = test.sort(ascending = False)
self.assertEqual(list(result), list(descending))
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10)]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(None, 100, str)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, str)
g = SArray.from_const(0, 100, float)
self.assertEqual(list(g), [0.0] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(0.0, 100, int)
self.assertEqual(list(g), [0] * 100)
self.assertEqual(g.dtype, int)
g = SArray.from_const(None, 100, float)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(None, 100, int)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, int)
g = SArray.from_const(None, 100, list)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, list)
g = SArray.from_const([1], 100, list)
self.assertEqual(list(g), [[1]] * 100)
self.assertEqual(g.dtype, list)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), list(range(100)))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), list(range(10, 100)))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), list(range(100, 10)))
def test_datetime(self):
sa = SArray(self.datetime_data)
self.__test_equal(sa ,self.datetime_data,dt.datetime)
sa = SArray(self.datetime_data2)
self.__test_equal(sa ,self.datetime_data2,dt.datetime)
ret = sa.split_datetime(limit=['year','month','day','hour','minute',
'second','us','weekday', 'isoweekday','tmweekday'])
self.assertEqual(ret.num_columns(), 10)
self.__test_equal(ret['X.year'] , [2013, 1902, None], int)
self.__test_equal(ret['X.month'] , [5, 10, None], int)
self.__test_equal(ret['X.day'] , [7, 21, None], int)
self.__test_equal(ret['X.hour'] , [10, 10, None], int)
self.__test_equal(ret['X.minute'] , [4, 34, None], int)
self.__test_equal(ret['X.second'] , [10, 10, None], int)
self.__test_equal(ret['X.us'] , [109321, 991111, None], int)
self.__test_equal(ret['X.weekday'] , [1, 1, None], int)
self.__test_equal(ret['X.isoweekday'] , [2, 2, None], int)
self.__test_equal(ret['X.tmweekday'] , [2, 2, None], int)
def test_datetime_difference(self):
sa = SArray(self.datetime_data)
sa2 = SArray(self.datetime_data2)
res = sa2 - sa
expected = [float(x.microsecond) / 1000000.0 if x is not None else x for x in self.datetime_data2]
self.assertEqual(len(res), len(expected))
for i in range(len(res)):
if res[i] is None:
self.assertEqual(res[i], expected[i])
else:
self.assertAlmostEqual(res[i], expected[i], places=6)
def test_datetime_lambda(self):
data = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111,
tzinfo=GMT(1))]
g=SArray(data)
gstr=g.apply(lambda x:str(x))
self.__test_equal(gstr, [str(x) for x in g], str)
gident=g.apply(lambda x:x)
self.__test_equal(gident, list(g), dt.datetime)
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = self.datetime_data
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1)]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 20, 1)]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 6, 23, 19, 10, 1)]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
sa = SArray(['2013-05-07T10:04:10',
'1902-10-21T10:34:10UTC+05:45'])
expected = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(5.75))]
self.__test_equal(sa.str_to_datetime() ,expected,dt.datetime)
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_argmax_argmin(self):
sa = SArray([1,4,-1,10,3,5,8])
index = [sa.argmax(),sa.argmin()]
expected = [3,2]
self.assertEqual(index,expected)
sa = SArray([1,4.3,-1.4,0,3,5.6,8.9])
index = [sa.argmax(),sa.argmin()]
expected = [6,2]
self.assertEqual(index,expected)
#empty case
sa = SArray([])
index = [sa.argmax(),sa.argmin()]
expected = [None,None]
self.assertEqual(index,expected)
# non-numeric type
sa = SArray(["434","43"])
with self.assertRaises(TypeError):
sa.argmax()
with self.assertRaises(TypeError):
sa.argmin()
def test_apply_with_recursion(self):
sa = SArray(range(1000))
sastr = sa.astype(str)
rets = sa.apply(lambda x:sastr[x])
self.assertEqual(list(rets), list(sastr))
def test_save_sarray(self):
'''save lazily evaluated SArray should not materialize to target folder
'''
data = SArray(range(1000))
data = data[data > 50]
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_to_numpy(self):
X = SArray(range(100))
import numpy as np
import numpy.testing as nptest
Y = np.array(range(100))
nptest.assert_array_equal(X.to_numpy(), Y)
X = X.astype(str)
Y = np.array([str(i) for i in range(100)])
nptest.assert_array_equal(X.to_numpy(), Y)
def test_rolling_mean(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [i + .5 for i in range(1,998)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_mean(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=3)
expected[2] = 1.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=2)
expected[1] = 0.5
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=1)
expected[0] = 0.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_mean(-3,0,min_observations=-1)
res = neg_data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [float(i) for i in range(-97,96,2)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = neg_data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_mean(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i+.5, i+1.5]) for i in range(2,9)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_mean(0,4)
expected = [float(i) for i in range(2,998)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(0,4)
expected = [float(i) for i in range(-96,95,2)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### Small backward window not including current
res = data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(2,997)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(-96,94,2)]
self.__test_equal(res,expected,float)
### Small forward window not including current
res = data.rolling_mean(1,5)
expected = [float(i) for i in range(3,998)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(1,5)
expected = [float(i) for i in range(-94,96,2)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
### "Centered" rolling aggregate
res = data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(2,998)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(-96,96,2)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
### Lopsided rolling aggregate
res = data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [i + .5 for i in range(1,998)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [float(i) for i in range(-97,97,2)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
### A very forward window
res = data.rolling_mean(500,502)
expected = [float(i) for i in range(501,999)] + [None for i in range(502)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(50,52)
expected = [float(i) for i in range(2,98,2)] + [None for i in range(52)]
self.__test_equal(res,expected,float)
### A very backward window
res = data.rolling_mean(-502,-500)
expected = [None for i in range(502)] + [float(i) for i in range(1,499)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-52,-50)
expected = [None for i in range(52)] + [float(i) for i in range(-98,-2,2)]
self.__test_equal(res,expected,float)
### A window size much larger than anticipated segment size
res = data.rolling_mean(0,749)
expected = [i + .5 for i in range(374,625)] + [None for i in range(749)]
self.__test_equal(res,expected,float)
### A window size larger than the array
res = data.rolling_mean(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_mean(0,0)
self.__test_equal(res, list(data), float)
res = data.rolling_mean(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, float)
res = data.rolling_mean(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_mean(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_mean(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_mean(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_mean(0,1)
self.__test_equal(res, [1.5,2.5,None], float)
def test_rolling_sum(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(6,3994,4)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_sum(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=3)
expected[2] = 3
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=2)
expected[1] = 1
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=1)
expected[0] = 0
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=0)
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_sum(-3,0,min_observations=-1)
res = neg_data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(-388,388,8)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = neg_data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_sum(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i, i+4]) for i in range(10,38,4)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_sum(0,4)
expected = [i for i in range(10,4990,5)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(0,4)
expected = [i for i in range(-480,480,10)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### Small backward window not including current
res = data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(10,4985,5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(-480,470,10)]
self.__test_equal(res,expected,int)
### Small forward window not including current
res = data.rolling_sum(1,5)
expected = [i for i in range(15,4990,5)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(1,5)
expected = [i for i in range(-470,480,10)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
### "Centered" rolling aggregate
res = data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(10,4990,5)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(-480,480,10)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
### Lopsided rolling aggregate
res = data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(6,3994,4)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(-388,388,8)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
### A very forward window
res = data.rolling_sum(500,502)
expected = [i for i in range(1503,2997,3)] + [None for i in range(502)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(50,52)
expected = [i for i in range(6,294,6)] + [None for i in range(52)]
self.__test_equal(res,expected,int)
### A very backward window
res = data.rolling_sum(-502,-500)
expected = [None for i in range(502)] + [i for i in range(3,1497,3)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-52,-50)
expected = [None for i in range(52)] + [i for i in range(-294,-6,6)]
self.__test_equal(res,expected,int)
### A window size much larger than anticipated segment size
res = data.rolling_sum(0,749)
expected = [i for i in range(280875,469125,750)] + [None for i in range(749)]
self.__test_equal(res,expected,int)
### A window size larger than the array
res = data.rolling_sum(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_sum(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_sum(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_sum(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_sum(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_sum(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_sum(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_sum(0,1)
self.__test_equal(res, [3,5,None], int)
def test_rolling_max(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_max(-3,0)
expected = [None for i in range(3)] + [i for i in range(3,1000)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_max(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_max(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=3)
expected[2] = 2
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_max(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_max(-3,0)
### Small forward window including current
res = data.rolling_max(0,4)
expected = [float(i) for i in range(4,1000)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_max(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_max(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_max(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_max(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_max(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_max(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_max(0,1)
self.__test_equal(res, [2,3,None], int)
def test_rolling_min(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_min(-3,0)
expected = [None for i in range(3)] + [i for i in range(0,997)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_min(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_min(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=3)
expected[2] = 0
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_min(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_min(-3,0)
### Small forward window including current
res = data.rolling_min(0,4)
expected = [float(i) for i in range(0,996)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_min(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_min(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_min(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_min(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_min(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_min(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_min(0,1)
self.__test_equal(res, [1,2,None], int)
def test_rolling_var(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_var(-3,0)
expected = [None for i in range(3)] + [1.25 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_var(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_var(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=3)
expected[2] = (2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_var(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_var(-3,0)
### Small forward window including current
res = data.rolling_var(0,4)
expected = [2 for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_var(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_var(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_var(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_var(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_var(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_var(0,1)
self.__test_equal(res, [.25,.25,None], float)
def test_rolling_stdv(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_stdv(-3,0)
expected = [None for i in range(3)] + [1.118033988749895 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_stdv(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_stdv(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=3)
expected[2] = math.sqrt(2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_stdv(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_stdv(-3,0)
### Small forward window including current
res = data.rolling_stdv(0,4)
expected = [math.sqrt(2) for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_stdv(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_stdv(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_stdv(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_stdv(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [.5,.5,None], float)
def test_rolling_count(self):
data = SArray(range(100))
### Small backward window including current
res = data.rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(97)]
self.__test_equal(res,expected,int)
# Test float inputs
res = data.astype(float).rolling_count(-3,0)
self.__test_equal(res,expected,int)
# Test vector input
res = SArray(self.vec_data).rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(7)]
self.__test_equal(res,expected,int)
### Test string input
res = SArray(self.string_data).rolling_count(-3,0)
self.__test_equal(res,expected[0:8],int)
### Small forward window including current
res = data.rolling_count(0,4)
expected = [5 for i in range(0,96)] + [4,3,2,1]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_count(0,0)
self.__test_equal(res, [1 for i in range(100)], int)
res = data.rolling_count(-2,-2)
self.__test_equal(res, [0,0] + [1 for i in range(98)], int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_count(4,2)
### Empty SArray
sa = SArray()
res = sa.rolling_count(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,2,1], int)
sa = SArray([1,2,None])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,1,0], int)
def cumulative_aggregate_comparison(self, out, ans):
import array
self.assertEqual(out.dtype, ans.dtype)
self.assertEqual(len(out), len(ans))
for i in range(len(out)):
if out[i] is None:
self.assertTrue(ans[i] is None)
if ans[i] is None:
self.assertTrue(out[i] is None)
if type(out[i]) != array.array:
self.assertAlmostEqual(out[i], ans[i])
else:
self.assertEqual(len(out[i]), len(ans[i]))
oi = out[i]
ansi = ans[i]
for j in range(len(oi)):
self.assertAlmostEqual(oi, ansi)
def test_cumulative_sum(self):
def single_test(src, ans):
out = src.cumulative_sum()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_sum()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 1.2, 3.3, 6.4, 10.5, 15.6, 21.7, 28.8])
)
single_test(
SArray([[11.0, 2.0], [22.0, 1.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([[11.0, 2.0], [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1, 4, 4, 9])
)
single_test(
SArray([None, [33.0, 3.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, [33.0, 3.0], None, [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [33.0, 3.0], [37.0, 7.0]])
)
def test_cumulative_mean(self):
def single_test(src, ans):
out = src.cumulative_mean()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_mean()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 0.6, 1.1, 1.6, 2.1, 2.6, 3.1, 3.6])
)
single_test(
SArray([[11.0, 22.0], [33.0, 66.0], [4.0, 2.0], [4.0, 2.0]]),
SArray([[11.0, 22.0], [22.0, 44.0], [16.0, 30.0], [13.0, 23.0]])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1.0, 2.0, 2.0, 3.0])
)
single_test(
SArray([None, [11.0, 22.0], [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
single_test(
SArray([None, [11.0, 22.0], None, [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
def test_cumulative_min(self):
def single_test(src, ans):
out = src.cumulative_min()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_min()
single_test(
SArray([0, 1, 2, 3, 4, 5, -1, 7, 8, -2, 10]),
SArray([0, 0, 0, 0, 0, 0, -1, -1, -1, -2, -2])
)
single_test(
SArray([7.1, 6.1, 3.1, 3.9, 4.1, 2.1, 2.9, 0.1]),
SArray([7.1, 6.1, 3.1, 3.1, 3.1, 2.1, 2.1, 0.1])
)
single_test(
SArray([None, 8, 6, 3, 4, None, 6, 2, 8, 9, 1]),
SArray([None, 8, 6, 3, 3, 3, 3, 2, 2, 2, 1])
)
single_test(
SArray([None, 5, None, 3, None, 10]),
SArray([None, 5, 5, 3, 3, 3])
)
def test_cumulative_max(self):
def single_test(src, ans):
out = src.cumulative_max()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_max()
single_test(
SArray([0, 1, 0, 3, 5, 4, 1, 7, 6, 2, 10]),
SArray([0, 1, 1, 3, 5, 5, 5, 7, 7, 7, 10])
)
single_test(
SArray([2.1, 6.1, 3.1, 3.9, 2.1, 8.1, 8.9, 10.1]),
SArray([2.1, 6.1, 6.1, 6.1, 6.1, 8.1, 8.9, 10.1])
)
single_test(
SArray([None, 1, 6, 3, 4, None, 4, 2, 8, 9, 1]),
SArray([None, 1, 6, 6, 6, 6, 6, 6, 8, 9, 9])
)
single_test(
SArray([None, 2, None, 3, None, 10]),
SArray([None, 2, 2, 3, 3, 10])
)
def test_cumulative_std(self):
def single_test(src, ans):
out = src.cumulative_std()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_std()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.0, 0.5, 0.81649658092772603, 1.1180339887498949,
1.4142135623730949, 1.707825127659933, 1.9999999999999998,
2.2912878474779195])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 1.6329931618554521])
)
def test_cumulative_var(self):
def single_test(src, ans):
out = src.cumulative_var()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_var()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray( [0.0, 0.25000000000000006, 0.6666666666666666, 1.25,
1.9999999999999996, 2.916666666666666, 3.999999999999999,
5.249999999999998])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 2.6666666666666665])
)
def test_numpy_datetime64(self):
# Make all datetimes naive
expected = [i.replace(tzinfo=GMT(0.0)) \
if i is not None and i.tzinfo is None else i for i in self.datetime_data]
# A regular list
iso_str_list = [np.datetime64('2013-05-07T10:04:10Z'),
np.datetime64('1902-10-21T10:34:10Z'),
None]
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
iso_str_list[2] = np.datetime64('NaT')
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
# A numpy array
np_ary = np.array(iso_str_list)
sa = SArray(np_ary)
self.__test_equal(sa,expected,dt.datetime)
### Every possible type of datetime64
test_str = '1969-12-31T23:59:56Z'
available_time_units = ['h','m','s','ms','us','ns','ps','fs','as']
expected = [dt.datetime(1969,12,31,23,59,56,tzinfo=GMT(0.0)) for i in range(7)]
expected.insert(0,dt.datetime(1969,12,31,23,59,0,tzinfo=GMT(0.0)))
expected.insert(0,dt.datetime(1969,12,31,23,0,0,tzinfo=GMT(0.0)))
for i in range(len(available_time_units)):
sa = SArray([np.datetime64(test_str,available_time_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
test_str = '1908-06-01'
available_date_units = ['Y','M','W','D']
expected = [dt.datetime(1908,6,1,0,0,0,tzinfo=GMT(0.0)) for i in range(4)]
expected[2] = dt.datetime(1908,5,28,0,0,0,tzinfo=GMT(0.0)) # weeks start on Thursday?
expected[0] = dt.datetime(1908,1,1,0,0,0,tzinfo=GMT(0.0))
for i in range(len(available_date_units)):
sa = SArray([np.datetime64(test_str,available_date_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
# Daylight savings time (Just to be safe. datetime64 deals in UTC, and
# we store times in UTC by default, so this shouldn't affect anything)
sa = SArray([np.datetime64('2015-03-08T02:38:00-08')])
expected = [dt.datetime(2015,3,8,10,38,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
# timezone considerations
sa = SArray([np.datetime64('2016-01-01T05:45:00+0545')])
expected = [dt.datetime(2016,1,1,0,0,0,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
### Out of our datetime range
with self.assertRaises(TypeError):
sa = SArray([np.datetime64('1066-10-14T09:00:00Z')])
def test_pandas_timestamp(self):
iso_str_list = [ | pd.Timestamp('2013-05-07T10:04:10') | pandas.Timestamp |
# Copyright (C) 2021 ServiceNow, Inc.
import pytest
import pandas as pd
import re
from nrcan_p2.data_processing.preprocessing_dfcol import (
rm_dbl_space,
rm_cid,
rm_dbl_punct,
convert_to_ascii,
lower,
rm_punct,
rm_newline,
rm_triple_chars,
rm_mid_num_punct,
rm_word_all_punct,
rm_newline_hyphenation,
rm_mid_word_punct,
rm_beg_end_word_punct,
merge_words,
merge_words_bkwd,
rm_nonprintable,
rm_punct_mid_punct,
rm_non_textual_punct,
rm_newline_except_end,
strip_space,
rm_email,
rm_url,
rm_doi,
rm_phonenumber,
rm_slash
)
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', ' blah \t\t \t \n blah'],
['Alaska. \n', ' blah \n blah']
)
]
)
def test_rm_dbl_space(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_dbl_space(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '(cid:1010)blah(cid:4)\n'],
['Alaska. \n', 'blah\n']
)
]
)
def test_rm_cid(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_cid(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
([' Alaska. \n', '\nblah \n '],
['Alaska. \n', '\nblah \n']
)
]
)
def test_strip_space(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = strip_space(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '||||kkll-ll???!!??...??....'],
['Alaska. \n', '|kkll-ll?!?...?.']
)
]
)
def test_convert_to_ascii(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
print(df_test)
res = rm_dbl_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', '𝟏−𝑨𝑨𝑹.. \n', "1 %>+* .B 4!\".𝐵 "],
['Alaska. \n', '1-AAR.. \n', "1 %>+* .B 4!\".B "]
)
]
)
def test_convert_to_ascii(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = convert_to_ascii(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', 'AL_aska.. \n'],
['alaska. \n', 'al_aska.. \n']
)
]
)
def test_lower(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = lower(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n', 'Al_aska.. \n'],
['Alaska \n', 'Al aska \n']
)
]
)
def test_rm_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
print(df_test)
res = rm_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['Alaska. \n\n', '\nAl_aska.. \n'],
['Alaska. ', ' Al_aska.. ']
)
]
)
def test_rm_newline(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_newline(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['..!. ..a. @.@ .. !!', 'Thhh.!.iiiiiiiss ! ~ is bad...', '"This!"', '"This.,"'],
['.. ..a. @@ .. !!', 'Thhh..iiiiiiiss ! ~ is bad..', '"This!"', '"This."']
)
]
)
def test_rm_punct_mid_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_punct_mid_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['..!. ..a. @.@ .. !!', 'Thhhiiiiiiiss ! ~ is bad...'],
[' ..a. ', 'Thhhiiiiiiiss is bad...']
)
]
)
def test_rm_word_all_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_word_all_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['..\n\n !!\n', '\nThhhii \n '],
['.. !!\n', ' Thhhii ']
)
]
)
def test_rm_newline_except_end(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_newline_except_end(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['This is normal..', 'Thhhiiiiiiiss is bad...'],
['This is normal..', 'Thiss is bad.']
)
]
)
def test_rm_triple_chars(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_triple_chars(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['This is normal..', 'This\x07 is bad \x0f'],
['This is normal..', 'This is bad ']
)
]
)
def test_rm_nonprintable(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_nonprintable(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(['00-00-00', '12-A2-B50', '132.00-130.3444', '132.00-130,123,99+E50', '-132.00+34', '12(2lkj2)09'],
['00 - 00 - 00', '12-A2-B50', '132.00 - 130.3444', '132.00 - 130 , 123 , 99+E50', '-132.00 + 34', '12 ( 2lkj2 ) 09']
)
]
)
def test_rm_mid_num_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_mid_num_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
# this function also removes double spaces
(["cur-\nrent", "cur- \n rent", "cur; -\n rent"],
["current", "current", "cur; -\n rent"]
)
]
)
def test_rm_newline_hyphenation(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_newline_hyphenation(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(["curr-ent", "\"current\"", "cu\"()rr#en%tcur", "cur.ent,.", "cur,'ent.", "cur.,", "cur'"],
["curr-ent", "\"current\"", "currentcur", "cur.ent.", "cur'ent.", "cur.,", "cur'"]
),
(["H23;0", "223+E02", "-23.0003"],
["H23;0", "223+E02", "-23.0003"]
)
]
)
def test_rm_mid_word_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_mid_word_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(["-+?curr-ent", "\"current\"", "curr-", "cur.ent.,", "cur,'", "cur.,", ".cur"],
["curr-ent", "\"current\"", "curr", "cur.ent.,", 'cur,', "cur.,", "cur"]
),
(["H23;0", "223+E02", "-23.0003"],
["H23;0", "223+E02", "-23.0003"]
)
]
)
def test_rm_beg_end_word_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_beg_end_word_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(["-+?curr-ent", "\"current\"", "curr-", "cur.ent.,", "cur,'", "cur.,",],
["-?curr-ent", "current", "curr-", "cur.ent.,", 'cur,', "cur.,"]
),
(["H23;0", "223+E02", "-23.0003"],
["H23;0", "223E02", "-23.0003"]
)
]
)
def test_rm_non_textual_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_non_textual_punct(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
(["-+?cu##r#r-ent", "\"cur?!rent\"", "curr-", "cur.ent.,", "cur,'", "cur.,",],
["curr-ent", "\"current\"", "curr", "cur.ent.,", 'cur,', "cur.,"]
),
(["H23;0", "223+E02", "-23.0003"],
["H23;0", "223+E02", "-23.0003"]
)
]
)
def test_rm_beg_end_word_punct_mid_word_punct(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_beg_end_word_punct(df_test.text)
res = rm_mid_word_punct(res)
print(res)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
([""" withi n s h ort di stEm ce s and a t different dep t h s corr e s p onding""",
""" withi n s h ort di stan ce s and a t different dep t h s corr e s p onding""",
""" withi n s h ort di stEm ce s and a t different dep t h s corr e s p onding
v P.riat ions in t he q_ua l i ty of t he ground wat e r derived f rom the
drift Fir e to b e expec t ed One we l l mn.y y i el d A. moder r: t ely ha rd
sli ghtly mine r e liz e d wRt e r whe r ea s P.nother we ll sunk to a similnr
de::i th M.d lo c<- t ed only 50 f ee t dis t FLnt mny g iv e water th a t is too
h i gh in dissolved su lph~ t e s s ~l ts to be used ei ther for drinking
or stock w~ t e ring """,
"""i n t h e y""",
""" Wr1 ter the.t cont a i ns a l 'lrgo amoun t of s di w".l. carbo n-.l t e ::md
sm~,1 1 '-tmounts of cr.:lcium -3.nd rrDgnesi um sr:i.l ts is sof t :mt if
tht:, cal c i um 'md r.i'1gnesitm salt s a:r:, pr e s ent in l :"rge a.mo 11nt s
t he wc.ter""",
"""and o nt he way""",
"""and a new day will be coming"""
],
["""within short di stEm ce sand at different depths corresponding""",
"""within short distances and at different depths corresponding""",
""" within short di stEm ce sand at different depths corresponding
v P.riat ions in the q_ua l i ty of the groundwater derived from the
drift Fire to be expected One well mn.y yield A. moder r: t ely hard
slightly miner e liz ed wRt er whereas P.nother well sunk to a similnr
de::i th M.d lo c<- ted only 50 feet dist FLnt mny give water that is too
high in dissolved su lph~ t es s ~l ts to be used either for drinking
or stock w~ t e ring """,
"""in they""",
""" Wr1 ter the.t contains a l 'lrgo amount of s di w".l. carbo n-.l t e ::md
sm~,1 1 '-tmounts of cr.:lcium -3.nd rrDgnesi um sr:i.l ts is soft :mt if
tht:, calcium 'md r.i'1gnesitm salts a:r:, present in l :"rge a.mo 11nt st
he wc.ter""",
"""and on the way""",
"""and anew day will be coming"""
]
),
]
)
def test_merge_words(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
#df_test['text'] = df_test.text.str.replace('\s+', ' ')
res = merge_words(df_test.text)
expected_text = [re.sub(r'\s+', ' ', text).strip() for text in expected_text]
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text",
[
([""" withi n s h ort di stEm ce s and a t different dep t h s corr e s p onding""",
""" withi n s h ort di stan ce s and a t different dep t h s corr e s p onding""",
""" withi n s h ort di stEm ce s and a t different dep t h s corr e s p onding
v P.riat ions in t he q_ua l i ty of t he ground wat e r derived f rom the
drift Fir e to b e expec t ed One we l l mn.y y i el d A. moder r: t ely ha rd
sli ghtly mine r e liz e d wRt e r whe r ea s P.nother we ll sunk to a similnr
de::i th M.d lo c<- t ed only 50 f ee t dis t FLnt mny g iv e water th a t is too
h i gh in dissolved su lph~ t e s s ~l ts to be used ei ther for drinking
or stock w~ t e ring """,
"""i n t h e y""",
""" Wr1 ter the.t cont a i ns a l 'lrgo amoun t of s di w".l. carbo n-.l t e ::md
sm~,1 1 '-tmounts of cr.:lcium -3.nd rrDgnesi um sr:i.l ts is sof t :mt if
tht:, cal c i um 'md r.i'1gnesitm salt s a:r:, pr e s ent in l :"rge a.mo 11nt s
t he wc.ter"""
],
["""within short di stEm ce sand at different depths corresponding""",
"""within short distance sand at different depths corresponding""",
""" within short di stEm ce sand at different depths corresponding
v P.riat ions in the q_ua l i ty of the groundwater derived from the
drift Fire to be expected One well mn.y yield A. moder r: t ely hard
slightly mine re liz ed wRt er whereas P.nother well sunk to a similnr
de::i th M.d lo c<- ted only 50 feet dist FLnt mny give water that is too
high in dissolved su lph~ t es s ~l ts to be used either for drinking
or stock w~ t e ring """,
"""in they""",
""" Wr1 ter the.t contains a l 'lrgo amount of s di w".l. carbo n-.l t e ::md
sm~,1 1 '-tmounts of cr.:lcium -3.nd rrDgnesi um sr:i.l ts is soft :mt if
tht:, calcium 'md r.i'1gnesitm salts a:r:, present in l :"rge a.mo 11nt s
the wc.ter"""
]
),
]
)
def test_merge_words_bkwd(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
#df_test['text'] = df_test.text.str.replace('\s+', ' ')
res = merge_words_bkwd(df_test.text)
expected_text = [re.sub(r'[\s]+', ' ', text).strip() for text in expected_text]
print(res)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text", [
(["This <EMAIL> name",
"<EMAIL>",
"This <EMAIL>. This name",
"This (<EMAIL>) there"
],
["This name",
" ",
"This . This name",
"This ( ) there"]),
])
def test_rm_email(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_email(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text", [
(["This google.com name",
"this www.gg-gg.com/com/com-g. name",
" a-w.ca",
"This (https://dd.d.org/10.0.0.0/303.0) there",
"blah ftp://ftp2.cits.rncan.gc.ca/pub/geott/ess_pubs/214/214401/gscof_1746_e_2003_mn1.pdf blah",
"ab out ground wo.ter in", # expect this one to be removed (it's a legal url, but a bad one)
"co..lled ", # this should not be removed
"O BARP.tt:GER R~S~ARCH",
"1063028 490, rue de la Couronne Quebec (Quebec) G1K 9A9 Tel. : 418-654-2677 Telecopieur : 418-654-2660 Courriel : <EMAIL> Web : http://www.cgcq.rncan.gc.ca/bibliotheque/"
],
["This name",
"this . name",
" ",
"This ( ) there",
"blah blah",
"ab out ground in",
"co..lled ",
"O R~S~ARCH",
"1063028 490, rue de la Couronne Quebec (Quebec) G1K 9A9 Tel. : 418-654-2677 Telecopieur : 418-654-2660 Courriel : Web : "
]),
])
def test_rm_url(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_url(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text", [
(["This doi:10.1130/B30450.1 name",
"this doi:10.1016/j.precamres.2009.04.005. name",
" doi:10.1130/B30450.1",
"This (doi:10.1130/B30450.1) there",
"This (https://doi.org/10.4095/130284) there",
"emediation, doi 10.1111/j.1745- 6592.1989.tb01125.x blah",
"thidoi "
],
["This name",
"this . name", #this one is sort of a problem
" ",
"This ( ) there",
"This ( ) there",
"emediation, - 6592.1989.tb01125.x blah", # this also happens to be covered by the doi removal
"thidoi "
])
])
def test_rm_doi(text_col, expected_text):
df_test = pd.DataFrame({'text': text_col})
res = rm_doi(df_test.text)
assert list(res.values) == expected_text
@pytest.mark.parametrize("text_col, expected_text", [
(["this 418-654-2660 is ",
"this 1919-1920 is not a phonenumber",
"this H202 is not",
"this +798798095",
" du Canada, 615, rue Booth, Ottawa, Ontario, K1A 0E9, telephone : (613) 995-5326, courriel ",
"G1K 9A9 Tel. : 418-654-2677 Telecopieur :",
"KIA 0E4 (613) 995-9351",
". Geotechnical profile for core 20130290075 fr",
"gh natural diamonds 1983-1991 (million carats)",
". 32, p. 2057-2070.",
"d 1988, GSC OF-1636, NGR-101-1988, NTS 131, 13J, rn::",
"ments (anomalous) CAMS-17434 4040 +- 70",
"on 5, p. 983-1006",
" 63 km above mouth (1974-1992)"
],
["this is ",
"this 1919-1920 is not a phonenumber",
"this H202 is not",
"this ",
" du Canada, 615, rue Booth, Ottawa, Ontario, K1A 0E9, telephone : , courriel ",
"G1K 9A9 Tel. : Telecopieur :",
"KIA 0E4 ",
". Geotechnical profile for core 20130290075 fr",
"gh natural diamonds 1983-1991 (million carats)",
". 32, p. 2057-2070.",
"d 1988, GSC OF-1636, NGR-101-1988, NTS 131, 13J, rn::",
"ments (anomalous) CAMS-17434 4040 +- 70",
"on 5, p. 983-1006",
" 63 km above mouth (1974-1992)"
])
])
def test_rm_phonenumber(text_col, expected_text):
df_test = | pd.DataFrame({'text': text_col}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert df1.should.equal(df2)
def test_equal_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert not df1.should.equal(df2)
@pytest.mark.parametrize('alias_name', [
'be_equal_to', 'be_equals_to', 'be_eq_to', 'eq',
])
def test_qeual_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
def test_not_equal_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert df1.should.not_equal(df2)
def test_not_equal_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert not df1.should.not_equal(df2)
@pytest.mark.parametrize('alias_name', [
'be_not_equal_to', 'be_not_equals_to', 'be_neq_to', 'neq',
])
def test_not_qeual_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
def test_have_same_length_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert df1.should.have_same_length(df2)
def test_have_same_length_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert not df1.should.have_same_length(df2)
def test_have_same_length_multiple(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2], columns=['id'])
df3 = pd.DataFrame([3], columns=['id'])
assert df1.should.have_same_length(df2, df3)
def test_have_same_width_true(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = pd.DataFrame(data1, columns=['id', 'name', 'age'])
data2 = [
('apple', 198, 'red'),
('banana', 128, 'yellow'),
]
df2 = pd.DataFrame(data2, columns=['fruit', 'price', 'color'])
assert df1.should.have_same_width(df2)
def test_have_same_width_false(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = pd.DataFrame(data1, columns=['id', 'name', 'age'])
data2 = [
('apple', 198),
('banana', 128),
]
df2 = pd.DataFrame(data2, columns=['fruit', 'price'])
assert not df1.should.have_same_width(df2)
def test_have_same_width_multiple(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = | pd.DataFrame(data1, columns=['id', 'name', 'age']) | pandas.DataFrame |
import argparse
import os
import shutil
import zipfile
import pathlib
import re
from datetime import datetime
import collections
import pandas as pd
import geohash
import math
import helpers
import plotly.express as px
ControlInfo = collections.namedtuple("ControlInfo", ["num_tracks", "date", "duration"])
def parse_args():
parser = argparse.ArgumentParser(
description="Converts HYSPLIT Locust model output into a geohashed CSV"
)
parser.add_argument("--input_dir", type=str, required=True)
parser.add_argument("--output_file", type=str, default="./output.csv")
parser.add_argument("--temp_dir", type=str, default="./temp")
return parser.parse_args()
def extract_input(input_dir, temp_dir):
os.makedirs(temp_dir, exist_ok=True)
p = pathlib.Path(input_dir)
swarm_ids = []
zip_files = p.glob("*.zip")
for zf in zip_files:
shutil.copy(zf, temp_dir)
temp_zip_file = os.path.join(temp_dir, zf.name)
with zipfile.ZipFile(temp_zip_file, "r") as zip_ref:
zip_ref.extractall(temp_dir)
swarm_ids.append(zf.stem)
return swarm_ids
def parse_control_files(data_dir, ids):
# regex to ignore comments on a line
line_re = re.compile("(.*)#")
# map to hold extract control info
swarm_control_info = {}
p = pathlib.Path(data_dir)
# find control files for each swarm id
for id in ids:
swarm_control_info[id] = []
control_files = p.glob(f"{id}_CONTROL.*.txt")
for cf in control_files:
# open the control file
with open(cf, "r") as f:
# strip comments
lines = f.read().splitlines()
stripped_lines = []
for l in lines:
m = line_re.match(l)
if m:
stripped_lines.append(m.group(1).strip())
else:
stripped_lines.append(l.strip())
# read in required data
parsed_date = datetime.strptime(stripped_lines[0], "%y %m %d %H %M")
number_of_tracks = stripped_lines[1]
duration_hrs = stripped_lines[2 + int(number_of_tracks)]
swarm_control_info[id].append(
ControlInfo(number_of_tracks, parsed_date, duration_hrs)
)
swarm_control_info[id].sort(key=lambda d: d.date)
ctrl_arr = swarm_control_info[id]
return swarm_control_info
def parse_trajectory_files(data_dir, ids, control_info):
day_re = re.compile(".*_day(\d+)")
p = pathlib.Path(data_dir)
# list of trajectory dataframes
trajectories = []
# find control files for each swarm id
for id in ids:
trajectory_data = {}
trajectory_files = p.glob(f"{id}_day*.txt")
for tf in trajectory_files:
# extract the day and use that to look up the control info
day = int(day_re.match(str(tf)).group(1))
ci = control_info[id][day - 1]
# load csv
tdf = pd.read_csv(tf, names=["point_id", "x", "y", "altitude"])
# get rid of the end marker
tdf = tdf[tdf["point_id"] != "END"]
# ID field consists of a track number in the first digit, and a record
# number in the remaining. They need to be split out.
tdf["track"] = tdf["point_id"].str.strip().str[0]
tdf["point_id"] = tdf["point_id"].str.strip().str[1:]
tdf["date"] = ci.date
# Group by the track ID number.
track_groups = tdf.groupby("track")
# Collect the grouped data frames by track/day
for track, frame in track_groups:
if track not in trajectory_data:
trajectory_data[track] = []
trajectory_data[track].append(frame)
# Compose the data for each track into a single df spanning multiple days
for _, d in trajectory_data.items():
tdf = | pd.concat(d) | pandas.concat |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, raise_on_error=False, typ=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if | com._all_none(items, major, minor) | pandas.core.common._all_none |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 08:13:14 2020
@author: abhijit
"""
#%% preamble
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#%% gapminder data
gapminder = pd.read_csv('data/gapminder.tsv', sep='\t')
gapminder[:5]
gapminder.head(5)
new_gm = gapminder[['country','gdpPercap','lifeExp']]
canada = gapminder.query('country=="Canada"')
canada = gapminder[gapminder['country']=='Canada']
canada = gapminder.groupby('country').get_group('Canada')
mtcars = pd.read_csv('data/mtcars.csv')
mtcars['kml'] = mtcars['mpg'] * 1.6/3.8
#%% survey data
from glob import glob
fnames = glob('data/survey*.csv')
visited, person, site, survey = [pd.read_csv(f) for f in fnames]
d1 = pd.merge(survey, visited, how='inner', left_on = 'taken', right_on = 'ident')
d2 = pd.merge(survey, person, how = 'outer', left_on = 'person', right_on = 'ident')
#%% weather data
weather = | pd.read_csv('data/weather.csv') | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
| pd.Series(actual) | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = | IntervalIndex.from_intervals(ivs, name=name) | pandas.IntervalIndex.from_intervals |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), | pd.Timestamp('2020-01-06 00:00:00') | pandas.Timestamp |
from builtins import print
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
import os
import operator
import utils
from utils.constants import UNIVARIATE_DATASET_NAMES as DATASET_NAMES
from utils.constants import UNIVARIATE_DATASET_NAMES_2018 as DATASET_NAMES_2018
from utils.constants import ARCHIVE_NAMES as ARCHIVE_NAMES
from utils.constants import CLASSIFIERS
from utils.constants import ITERATIONS
from utils.constants import MTS_DATASET_NAMES
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelEncoder
from scipy.interpolate import interp1d
from scipy.io import loadmat
def readucr(filename):
data = np.loadtxt(filename, delimiter=',')
Y = data[:, 0]
X = data[:, 1:]
return X, Y
def create_directory(directory_path):
if os.path.exists(directory_path):
return None
else:
try:
os.makedirs(directory_path)
except:
# in case another machine created the path meanwhile !:(
return None
return directory_path
def create_path(root_dir, classifier_name, archive_name):
output_directory = root_dir + '/results/' + classifier_name + '/' + archive_name + '/'
if os.path.exists(output_directory):
return None
else:
os.makedirs(output_directory)
return output_directory
def read_dataset(root_dir, archive_name, dataset_name):
datasets_dict = {}
cur_root_dir = root_dir.replace('-temp', '')
if archive_name == 'mts_archive':
file_name = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
x_train = np.load(file_name + 'x_train.npy')
y_train = np.load(file_name + 'y_train.npy')
x_test = np.load(file_name + 'x_test.npy')
y_test = np.load(file_name + 'y_test.npy')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
elif archive_name == 'UCRArchive_2018':
root_dir_dataset = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
df_train = pd.read_csv(root_dir_dataset + '/' + dataset_name + '_TRAIN.tsv', sep='\t', header=None)
df_test = pd.read_csv(root_dir_dataset + '/' + dataset_name + '_TEST.tsv', sep='\t', header=None)
y_train = df_train.values[:, 0]
y_test = df_test.values[:, 0]
x_train = df_train.drop(columns=[0])
x_test = df_test.drop(columns=[0])
x_train.columns = range(x_train.shape[1])
x_test.columns = range(x_test.shape[1])
x_train = x_train.values
x_test = x_test.values
# znorm
std_ = x_train.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
x_train = (x_train - x_train.mean(axis=1, keepdims=True)) / std_
std_ = x_test.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
x_test = (x_test - x_test.mean(axis=1, keepdims=True)) / std_
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
else:
file_name = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/' + dataset_name
x_train, y_train = readucr(file_name + '_TRAIN')
x_test, y_test = readucr(file_name + '_TEST')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
return datasets_dict
def read_all_datasets(root_dir, archive_name, split_val=False):
datasets_dict = {}
cur_root_dir = root_dir.replace('-temp', '')
dataset_names_to_sort = []
if archive_name == 'mts_archive':
for dataset_name in MTS_DATASET_NAMES:
root_dir_dataset = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
x_train = np.load(root_dir_dataset + 'x_train.npy')
y_train = np.load(root_dir_dataset + 'y_train.npy')
x_test = np.load(root_dir_dataset + 'x_test.npy')
y_test = np.load(root_dir_dataset + 'y_test.npy')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
elif archive_name == 'UCRArchive_2018':
for dataset_name in DATASET_NAMES_2018:
root_dir_dataset = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
df_train = pd.read_csv(root_dir_dataset + '/' + dataset_name + '_TRAIN.tsv', sep='\t', header=None)
df_test = pd.read_csv(root_dir_dataset + '/' + dataset_name + '_TEST.tsv', sep='\t', header=None)
y_train = df_train.values[:, 0]
y_test = df_test.values[:, 0]
x_train = df_train.drop(columns=[0])
x_test = df_test.drop(columns=[0])
x_train.columns = range(x_train.shape[1])
x_test.columns = range(x_test.shape[1])
x_train = x_train.values
x_test = x_test.values
# znorm
std_ = x_train.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
x_train = (x_train - x_train.mean(axis=1, keepdims=True)) / std_
std_ = x_test.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
x_test = (x_test - x_test.mean(axis=1, keepdims=True)) / std_
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
else:
for dataset_name in DATASET_NAMES:
root_dir_dataset = cur_root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
file_name = root_dir_dataset + dataset_name
x_train, y_train = readucr(file_name + '_TRAIN')
x_test, y_test = readucr(file_name + '_TEST')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
dataset_names_to_sort.append((dataset_name, len(x_train)))
dataset_names_to_sort.sort(key=operator.itemgetter(1))
for i in range(len(DATASET_NAMES)):
DATASET_NAMES[i] = dataset_names_to_sort[i][0]
return datasets_dict
def get_func_length(x_train, x_test, func):
if func == min:
func_length = np.inf
else:
func_length = 0
n = x_train.shape[0]
for i in range(n):
func_length = func(func_length, x_train[i].shape[1])
n = x_test.shape[0]
for i in range(n):
func_length = func(func_length, x_test[i].shape[1])
return func_length
def transform_to_same_length(x, n_var, max_length):
n = x.shape[0]
# the new set in ucr form np array
ucr_x = np.zeros((n, max_length, n_var), dtype=np.float64)
# loop through each time series
for i in range(n):
mts = x[i]
curr_length = mts.shape[1]
idx = np.array(range(curr_length))
idx_new = np.linspace(0, idx.max(), max_length)
for j in range(n_var):
ts = mts[j]
# linear interpolation
f = interp1d(idx, ts, kind='cubic')
new_ts = f(idx_new)
ucr_x[i, :, j] = new_ts
return ucr_x
def transform_mts_to_ucr_format():
mts_root_dir = '/mnt/Other/mtsdata/'
mts_out_dir = '/mnt/nfs/casimir/archives/mts_archive/'
for dataset_name in MTS_DATASET_NAMES:
# print('dataset_name',dataset_name)
out_dir = mts_out_dir + dataset_name + '/'
# if create_directory(out_dir) is None:
# print('Already_done')
# continue
a = loadmat(mts_root_dir + dataset_name + '/' + dataset_name + '.mat')
a = a['mts']
a = a[0, 0]
dt = a.dtype.names
dt = list(dt)
for i in range(len(dt)):
if dt[i] == 'train':
x_train = a[i].reshape(max(a[i].shape))
elif dt[i] == 'test':
x_test = a[i].reshape(max(a[i].shape))
elif dt[i] == 'trainlabels':
y_train = a[i].reshape(max(a[i].shape))
elif dt[i] == 'testlabels':
y_test = a[i].reshape(max(a[i].shape))
# x_train = a[1][0]
# y_train = a[0][:,0]
# x_test = a[3][0]
# y_test = a[2][:,0]
n_var = x_train[0].shape[0]
max_length = get_func_length(x_train, x_test, func=max)
min_length = get_func_length(x_train, x_test, func=min)
print(dataset_name, 'max', max_length, 'min', min_length)
print()
# continue
x_train = transform_to_same_length(x_train, n_var, max_length)
x_test = transform_to_same_length(x_test, n_var, max_length)
# save them
np.save(out_dir + 'x_train.npy', x_train)
np.save(out_dir + 'y_train.npy', y_train)
np.save(out_dir + 'x_test.npy', x_test)
np.save(out_dir + 'y_test.npy', y_test)
print('Done')
def calculate_metrics(y_true, y_pred, duration, y_true_val=None, y_pred_val=None):
res = pd.DataFrame(data=np.zeros((1, 4), dtype=np.float), index=[0],
columns=['precision', 'accuracy', 'recall', 'duration'])
res['precision'] = precision_score(y_true, y_pred, average='macro')
res['accuracy'] = accuracy_score(y_true, y_pred)
if not y_true_val is None:
# this is useful when transfer learning is used with cross validation
res['accuracy_val'] = accuracy_score(y_true_val, y_pred_val)
res['recall'] = recall_score(y_true, y_pred, average='macro')
res['duration'] = duration
return res
def save_test_duration(file_name, test_duration):
res = pd.DataFrame(data=np.zeros((1, 1), dtype=np.float), index=[0],
columns=['test_duration'])
res['test_duration'] = test_duration
res.to_csv(file_name, index=False)
def generate_results_csv(output_file_name, root_dir):
res = pd.DataFrame(data=np.zeros((0, 7), dtype=np.float), index=[],
columns=['classifier_name', 'archive_name', 'dataset_name',
'precision', 'accuracy', 'recall', 'duration'])
for classifier_name in CLASSIFIERS:
for archive_name in ARCHIVE_NAMES:
datasets_dict = read_all_datasets(root_dir, archive_name)
for it in range(ITERATIONS):
curr_archive_name = archive_name
if it != 0:
curr_archive_name = curr_archive_name + '_itr_' + str(it)
for dataset_name in datasets_dict.keys():
output_dir = root_dir + '/results/' + classifier_name + '/' \
+ curr_archive_name + '/' + dataset_name + '/' + 'df_metrics.csv'
if not os.path.exists(output_dir):
continue
df_metrics = pd.read_csv(output_dir)
df_metrics['classifier_name'] = classifier_name
df_metrics['archive_name'] = archive_name
df_metrics['dataset_name'] = dataset_name
res = pd.concat((res, df_metrics), axis=0, sort=False)
res.to_csv(root_dir + output_file_name, index=False)
# aggreagte the accuracy for iterations on same dataset
res = pd.DataFrame({
'accuracy': res.groupby(
['classifier_name', 'archive_name', 'dataset_name'])['accuracy'].mean()
}).reset_index()
return res
def plot_epochs_metric(hist, file_name, metric='loss'):
plt.figure()
plt.plot(hist.history[metric])
plt.plot(hist.history['val_' + metric])
plt.title('model ' + metric)
plt.ylabel(metric, fontsize='large')
plt.xlabel('epoch', fontsize='large')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(file_name, bbox_inches='tight')
plt.close()
def save_logs_t_leNet(output_directory, hist, y_pred, y_true, duration):
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(output_directory + 'history.csv', index=False)
df_metrics = calculate_metrics(y_true, y_pred, duration)
df_metrics.to_csv(output_directory + 'df_metrics.csv', index=False)
index_best_model = hist_df['loss'].idxmin()
row_best_model = hist_df.loc[index_best_model]
df_best_model = pd.DataFrame(data=np.zeros((1, 6), dtype=np.float), index=[0],
columns=['best_model_train_loss', 'best_model_val_loss', 'best_model_train_acc',
'best_model_val_acc', 'best_model_learning_rate', 'best_model_nb_epoch'])
df_best_model['best_model_train_loss'] = row_best_model['loss']
df_best_model['best_model_val_loss'] = row_best_model['val_loss']
df_best_model['best_model_train_acc'] = row_best_model['acc']
df_best_model['best_model_val_acc'] = row_best_model['val_acc']
df_best_model['best_model_nb_epoch'] = index_best_model
df_best_model.to_csv(output_directory + 'df_best_model.csv', index=False)
# plot losses
plot_epochs_metric(hist, output_directory + 'epochs_loss.png')
def save_logs(output_directory, hist, y_pred, y_true, duration, lr=True, y_true_val=None, y_pred_val=None):
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(output_directory + 'history.csv', index=False)
df_metrics = calculate_metrics(y_true, y_pred, duration, y_true_val, y_pred_val)
df_metrics.to_csv(output_directory + 'df_metrics.csv', index=False)
index_best_model = hist_df['loss'].idxmin()
row_best_model = hist_df.loc[index_best_model]
df_best_model = pd.DataFrame(data=np.zeros((1, 6), dtype=np.float), index=[0],
columns=['best_model_train_loss', 'best_model_val_loss', 'best_model_train_acc',
'best_model_val_acc', 'best_model_learning_rate', 'best_model_nb_epoch'])
df_best_model['best_model_train_loss'] = row_best_model['loss']
df_best_model['best_model_val_loss'] = row_best_model['val_loss']
df_best_model['best_model_train_acc'] = row_best_model['accuracy']
df_best_model['best_model_val_acc'] = row_best_model['val_accuracy']
if lr == True:
df_best_model['best_model_learning_rate'] = row_best_model['lr']
df_best_model['best_model_nb_epoch'] = index_best_model
df_best_model.to_csv(output_directory + 'df_best_model.csv', index=False)
# for FCN there is no hyperparameters fine tuning - everything is static in code
# plot losses
plot_epochs_metric(hist, output_directory + 'epochs_loss.png')
return df_metrics
def visualize_filter(root_dir):
import tensorflow.keras as keras
classifier = 'resnet'
archive_name = 'UCRArchive_2018'
dataset_name = 'GunPoint'
datasets_dict = read_dataset(root_dir, archive_name, dataset_name)
x_train = datasets_dict[dataset_name][0]
y_train = datasets_dict[dataset_name][1]
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
model = keras.models.load_model(
root_dir + 'results/' + classifier + '/' + archive_name + '/' + dataset_name + '/best_model.hdf5')
# filters
filters = model.layers[1].get_weights()[0]
new_input_layer = model.inputs
new_output_layer = [model.layers[1].output]
new_feed_forward = keras.backend.function(new_input_layer, new_output_layer)
classes = np.unique(y_train)
colors = [(255 / 255, 160 / 255, 14 / 255), (181 / 255, 87 / 255, 181 / 255)]
colors_conv = [(210 / 255, 0 / 255, 0 / 255), (27 / 255, 32 / 255, 101 / 255)]
idx = 10
idx_filter = 1
filter = filters[:, 0, idx_filter]
plt.figure(1)
plt.plot(filter + 0.5, color='gray', label='filter')
for c in classes:
c_x_train = x_train[np.where(y_train == c)]
convolved_filter_1 = new_feed_forward([c_x_train])[0]
idx_c = int(c) - 1
plt.plot(c_x_train[idx], color=colors[idx_c], label='class' + str(idx_c) + '-raw')
plt.plot(convolved_filter_1[idx, :, idx_filter], color=colors_conv[idx_c], label='class' + str(idx_c) + '-conv')
plt.legend()
plt.savefig(root_dir + 'convolution-' + dataset_name + '.pdf')
return 1
def viz_perf_themes(root_dir, df):
df_themes = df.copy()
themes_index = []
# add the themes
for dataset_name in df.index:
themes_index.append(utils.constants.dataset_types[dataset_name])
themes_index = np.array(themes_index)
themes, themes_counts = np.unique(themes_index, return_counts=True)
df_themes.index = themes_index
df_themes = df_themes.rank(axis=1, method='min', ascending=False)
df_themes = df_themes.where(df_themes.values == 1)
df_themes = df_themes.groupby(level=0).sum(axis=1)
df_themes['#'] = themes_counts
for classifier in CLASSIFIERS:
df_themes[classifier] = df_themes[classifier] / df_themes['#'] * 100
df_themes = df_themes.round(decimals=1)
df_themes.to_csv(root_dir + 'tab-perf-theme.csv')
def viz_perf_train_size(root_dir, df):
df_size = df.copy()
train_sizes = []
datasets_dict_ucr = read_all_datasets(root_dir, archive_name='UCR_TS_Archive_2015')
datasets_dict_mts = read_all_datasets(root_dir, archive_name='mts_archive')
datasets_dict = dict(datasets_dict_ucr, **datasets_dict_mts)
for dataset_name in df.index:
train_size = len(datasets_dict[dataset_name][0])
train_sizes.append(train_size)
train_sizes = np.array(train_sizes)
bins = np.array([0, 100, 400, 800, 99999])
train_size_index = np.digitize(train_sizes, bins)
train_size_index = bins[train_size_index]
df_size.index = train_size_index
df_size = df_size.rank(axis=1, method='min', ascending=False)
df_size = df_size.groupby(level=0, axis=0).mean()
df_size = df_size.round(decimals=2)
print(df_size.to_string())
df_size.to_csv(root_dir + 'tab-perf-train-size.csv')
def viz_perf_classes(root_dir, df):
df_classes = df.copy()
class_numbers = []
datasets_dict_ucr = read_all_datasets(root_dir, archive_name='UCR_TS_Archive_2015')
datasets_dict_mts = read_all_datasets(root_dir, archive_name='mts_archive')
datasets_dict = dict(datasets_dict_ucr, **datasets_dict_mts)
for dataset_name in df.index:
train_size = len(np.unique(datasets_dict[dataset_name][1]))
class_numbers.append(train_size)
class_numbers = np.array(class_numbers)
bins = np.array([0, 3, 4, 6, 8, 13, 9999])
class_numbers_index = np.digitize(class_numbers, bins)
class_numbers_index = bins[class_numbers_index]
df_classes.index = class_numbers_index
df_classes = df_classes.rank(axis=1, method='min', ascending=False)
df_classes = df_classes.groupby(level=0, axis=0).mean()
df_classes = df_classes.round(decimals=2)
print(df_classes.to_string())
df_classes.to_csv(root_dir + 'tab-perf-classes.csv')
def viz_perf_length(root_dir, df):
df_lengths = df.copy()
lengths = []
datasets_dict_ucr = read_all_datasets(root_dir, archive_name='UCR_TS_Archive_2015')
datasets_dict_mts = read_all_datasets(root_dir, archive_name='mts_archive')
datasets_dict = dict(datasets_dict_ucr, **datasets_dict_mts)
for dataset_name in df.index:
length = datasets_dict[dataset_name][0].shape[1]
lengths.append(length)
lengths = np.array(lengths)
bins = np.array([0, 81, 251, 451, 700, 1001, 9999])
lengths_index = np.digitize(lengths, bins)
lengths_index = bins[lengths_index]
df_lengths.index = lengths_index
df_lengths = df_lengths.rank(axis=1, method='min', ascending=False)
df_lengths = df_lengths.groupby(level=0, axis=0).mean()
df_lengths = df_lengths.round(decimals=2)
print(df_lengths.to_string())
df_lengths.to_csv(root_dir + 'tab-perf-lengths.csv')
def viz_plot(root_dir, df):
df_lengths = df.copy()
lengths = []
datasets_dict_ucr = read_all_datasets(root_dir, archive_name='UCR_TS_Archive_2015')
datasets_dict_mts = read_all_datasets(root_dir, archive_name='mts_archive')
datasets_dict = dict(datasets_dict_ucr, **datasets_dict_mts)
for dataset_name in df.index:
length = datasets_dict[dataset_name][0].shape[1]
lengths.append(length)
lengths_index = np.array(lengths)
df_lengths.index = lengths_index
plt.scatter(x=df_lengths['fcn'], y=df_lengths['resnet'])
plt.ylim(ymin=0, ymax=1.05)
plt.xlim(xmin=0, xmax=1.05)
# df_lengths['fcn']
plt.savefig(root_dir + 'plot.pdf')
def viz_for_survey_paper(root_dir, filename='results-ucr-mts.csv'):
df = | pd.read_csv(root_dir + filename, index_col=0) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: | pd.Timestamp("2012-05-15 00:00:00") | pandas.Timestamp |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import json
sns.set_context('paper')
sns.set(font_scale=1)
palette = sns.color_palette("mako_r", 10)
def compare_plots_best_performing(csv):
compare_plot_df = {'model': [], 'conds': [], 'rate': [], 'distortion': []}
fig, ax = plt.subplots(figsize=(8, 6))
for j in range(len(csv['csv_path'])):
# read the j'th csv that we want to compare
csv_df = pd.read_csv(csv['csv_path'][j])
# each csv has a number of conditions
# Lets loop throught those
for i in range(len(set(csv_df['num_conds']))):
tmp = csv_df.loc[csv_df['num_conds'] == i]
compare_plot_df['model'].append(j)
compare_plot_df['conds'].append(i)
# compare_plot_df['rate'].append(np.sort(tmp['test_kld'])[0])
# compare_plot_df['distortion'].append(np.sort(tmp['test_rcl'])[0])
compare_plot_df['rate'].append(np.mean(tmp['test_kld']))
compare_plot_df['distortion'].append(np.mean(tmp['test_rcl']))
dataframe_for_lineplot = pd.DataFrame(compare_plot_df)
dataframe_for_lineplot = dataframe_for_lineplot.loc[dataframe_for_lineplot['model'] == j]
sns.lineplot(ax=ax, data=dataframe_for_lineplot, x='distortion', y='rate', color='black', lw=0.5)
compare_plot_df = pd.DataFrame(compare_plot_df)
sns.scatterplot(
ax=ax, data=compare_plot_df, x='distortion', y='rate', hue='conds',
style='model', s=200
)
plt.legend(loc='upper left')
ax.set_xlabel('Distortion')
ax.set_ylabel('Rate')
fig.savefig('./multiple_models_conds.png', bbox_inches='tight')
def plot_single_model_multiple_epoch(
csv, compare_plot_df=None, count=None,
fig=None, ax=None, total=None, save=True
):
if compare_plot_df is None:
fig, ax = plt.subplots(figsize=(8, 6))
compare_plot_df = {'epoch': [], 'conds': [], 'rate': [], 'distortion': []}
csv_df = | pd.read_csv(csv['csv_path']) | pandas.read_csv |
import numpy as np
import pandas as pd
import dask
from dask.distributed import Client, progress
import itertools
from maxnorm.maxnorm_completion import *
from maxnorm.tenalg import *
from maxnorm.graphs import *
def generate_data(obs_mask, U, sigma):
data = obs_mask.copy()
clean_data = kr_get_items(U, data.coords)
#clean_data_rms = np.sqrt(np.sum(clean_data)**2 / len(clean_data))
clean_data_rms = 1
data.data = clean_data + np.random.randn(data.nnz) * sigma * clean_data_rms
return data
def gen_err(Upred, Utrue):
norm_true = kr_dot(Utrue, Utrue)
mse_gen = kr_dot(Upred, Upred) + norm_true - 2 * kr_dot(Upred, Utrue)
return np.sqrt(mse_gen / norm_true)
def run_simulation(n, t, r, sigma, r_fit, rep, d=10,
max_iter=None, inner_max_iter=10, tol=1e-10, alg='max', verbosity=0,
kappa=100, beta=1, epsilon=1e-2, delta=None):
# parameter parsing
n = int(n)
t = int(t)
r = int(r)
r_fit = int(r_fit)
rep = int(rep)
d = int(d)
# defaults
if max_iter is None:
max_iter = 3 * t * n
if delta is None:
delta = max(sigma, 0.05)
# generate truth
U = kr_random(n, t, r, rvs='unif')
U = kr_rescale(U, np.sqrt(n**t), 'hs')
# expander sampling
expander = nx.random_regular_graph(d, n)
observation_mask = obs_mask_expander(expander, t)
max_qnorm_ub_true = max_qnorm_ub(U)
data = generate_data(observation_mask, U, sigma)
if verbosity > 1:
print("Running a simulation: n = %d, t = %d, r = %d, sigma = %f, r_fit = %d\n" \
% (n, t, r, sigma, r_fit))
print("max_qnorm_ub_true = %1.3e" % max_qnorm_ub_true)
print("expander degree = %d, sampling %1.2e%%" % (d, 100. * float(data.nnz) / n**t))
clean_data_rmse = np.sqrt(loss(U, data) / data.nnz)
if alg == 'als':
try:
U_fit, cost_arr = \
tensor_completion_alt_min(data, r_fit, init='svd', max_iter=max_iter, tol=tol,
inner_max_iter=max_iter_inner, epsilon=epsilon)
except Exception:
U_fit = None
elif alg == 'max':
try:
U_fit, cost_arr = \
tensor_completion_maxnorm(data, r_fit, delta * np.sqrt(data.nnz), epsilon=epsilon,
#sgd=True, sgd_batch_size=int(ndata/2),
#U0 = Unew1,
init='svdrand', kappa=kappa, beta=beta,
verbosity=verbosity, inner_tol=tol/100,
tol=tol, max_iter=max_iter, inner_max_iter=inner_max_iter)
except Exception:
U_fit = None
elif alg == 'both':
try:
U_fit_als, cost_arr_als = \
tensor_completion_alt_min(data, r_fit, init='svd', max_iter=max_iter, tol=tol,
inner_max_iter=max_iter_inner, epsilon=epsilon)
except Exception:
U_fit_als = None
try:
U_fit_max, cost_arr_max = \
tensor_completion_maxnorm(data, r_fit, delta * np.sqrt(data.nnz), epsilon=epsilon,
#sgd=True, sgd_batch_size=int(ndata/2),
#U0 = U_fit_al,
init='svdrand', kappa=kappa, beta=beta,
verbosity=verbosity,
tol=tol, max_iter=max_iter, inner_max_iter=inner_max_iter)
except Exception:
U_fit_max = None
else:
raise Exception('unexpected algorithm')
if alg != 'both':
loss_true = np.sqrt(loss(U, data) / data.nnz)
if U_fit is not None:
loss_val = np.sqrt(loss(U_fit, data) / data.nnz)
gen_err_val = gen_err(U_fit, U)
max_qnorm_ub_val = max_qnorm_ub(U_fit)
else:
loss_val = np.nan
gen_err_val = np.nan
max_qnorm_ub_val = np.nan
return loss_true, max_qnorm_ub_true, loss_val, max_qnorm_ub_val, gen_err_val
else:
loss_true = np.sqrt(loss(U, data) / data.nnz)
if U_fit_als is not None:
loss_als = np.sqrt(loss(U_fit_als, data) / data.nnz)
max_qnorm_ub_als = max_qnorm_ub(U_fit_als)
gen_err_als = gen_err(U_fit_als, U)
else:
loss_als = np.nan
max_qnorm_ub_als = np.nan
gen_err_als = np.nan
if U_fit_max is not None:
loss_max = np.sqrt(loss(U_fit_max, data) / data.nnz)
max_qnorm_ub_max = max_qnorm_ub(U_fit_max)
gen_err_max = gen_err(U_fit_max, U)
else:
loss_max = np.nan
max_qnorm_ub_max = np.nan
gen_err_max = np.nan
return loss_true, max_qnorm_ub_true, loss_als, gen_err_als, loss_max, gen_err_max
if __name__ == '__main__':
# generate parameters for a sweep
n = [20, 40, 80]
#n = [10]
t = [3, 4]
r = [3]
sigma = [0.0]
r_fit = [3, 8, 16, 32, 64]
rep = [i for i in range(6)]
#const = [5, 10, 20, 40, 100]
d = [3, 7, 11, 15]
# n = [10]
# t = [3]
# r = [3]
# sigma = [0.1]
# r_fit = [6]
# rep = [0, 1, 2, 3]
param_list = [n, t, r, sigma, r_fit, rep, d]
params = list(itertools.product(*param_list))
param_df = | pd.DataFrame(params, columns=['n', 't', 'r', 'sigma', 'r_fit', 'rep', 'd']) | pandas.DataFrame |
# libraries
import pandas as pd
from pandas.api.types import CategoricalDtype, is_categorical_dtype
import numpy as np
import string
import types
import scanpy.api as sc
import anndata as ad
from plotnine import *
import plotnine
import scipy
from scipy import sparse, stats
from scipy.cluster import hierarchy
import glob
import more_itertools as mit
import tqdm
import pickle
import multiprocessing
import itertools
import sklearn
from sklearn.preprocessing import StandardScaler, label_binarize
from sklearn.cluster import AgglomerativeClustering
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import r2_score, classification_report, roc_curve, auc, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
import typing
import random
from adjustText import adjust_text
import sys
import lifelines
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test
import matplotlib as mp
import matplotlib.pyplot as plt
# classes
class SklearnWrapper:
"""
class to handle sklearn function piped inline with pandas
"""
def __init__(self, transform: typing.Callable):
self.transform = transform
def __call__(self, df):
transformed = self.transform.fit_transform(df.values)
return pd.DataFrame(transformed, columns=df.columns, index=df.index)
# functions
def imports():
"""
print module names and versions
ref: https://stackoverflow.com/questions/20180543/how-to-check-version-of-python-modules
input: none
output: print to std out
"""
for name, val in globals().items():
if isinstance(val, types.ModuleType):
if val.__name__ not in ['builtins']:
try:
print (f'{val.__name__}:', val.__version__)
except:
pass
def create_adata (pre_adata):
"""
Creates adata obj from raw data (rows=gene_names, col=cell_id)
Input: raw expression data in pd df
Output: adata obj
"""
print('Ingest raw data...')
# pd df to np array
array_adata = pre_adata.values
# extract obs and var
obs = pre_adata.columns.tolist()
gene_names = pre_adata.index.tolist()
var = | pd.DataFrame({'gene_symbols':gene_names}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import Series, Index, isna, notna
from pandas.core.dtypes.common import is_float_dtype
from pandas.core.dtypes.missing import remove_na_arraylike
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
pytest.skip("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na_arraylike(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
expected = obj.apply(wrapper, axis=i)
tm.assert_panel_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
expected = obj.apply(skipna_wrapper, axis=i)
tm.assert_panel_equal(result, expected)
pytest.raises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
def test_get_axis(self):
assert self.panel4d._get_axis(0) is self.panel4d.labels
assert self.panel4d._get_axis(1) is self.panel4d.items
assert self.panel4d._get_axis(2) is self.panel4d.major_axis
assert self.panel4d._get_axis(3) is self.panel4d.minor_axis
def test_set_axis(self):
with catch_warnings(record=True):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
assert 'l1' not in self.panel4d._item_cache
assert self.panel4d.labels is new_labels
self.panel4d.major_axis = new_major
assert self.panel4d[0].major_axis is new_major
assert self.panel4d.major_axis is new_major
self.panel4d.minor_axis = new_minor
assert self.panel4d[0].minor_axis is new_minor
assert self.panel4d.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel4d._get_axis_number('labels') == 0
assert self.panel4d._get_axis_number('items') == 1
assert self.panel4d._get_axis_number('major') == 2
assert self.panel4d._get_axis_number('minor') == 3
def test_get_axis_name(self):
assert self.panel4d._get_axis_name(0) == 'labels'
assert self.panel4d._get_axis_name(1) == 'items'
assert self.panel4d._get_axis_name(2) == 'major_axis'
assert self.panel4d._get_axis_name(3) == 'minor_axis'
def test_arith(self):
with catch_warnings(record=True):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
pytest.raises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
tm.assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
assert (len(list(self.panel4d.iteritems())) ==
len(self.panel4d.labels))
def test_combinePanel4d(self):
with catch_warnings(record=True):
result = self.panel4d.add(self.panel4d)
tm.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with catch_warnings(record=True):
tm.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with catch_warnings(record=True):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
tm.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
tm.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
tm.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
tm.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
tm.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with catch_warnings(record=True):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
tm.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
tm.assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with catch_warnings(record=True):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
tm.assert_panel_equal(expected, result)
assert 'l2' not in self.panel4d.labels
del self.panel4d['l3']
assert 'l3' not in self.panel4d.labels
pytest.raises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
with catch_warnings(record=True):
# Panel
p = Panel(dict(
ItemA=self.panel4d['l1']['ItemA'][2:].filter(
items=['A', 'B'])))
self.panel4d['l4'] = p
self.panel4d['l5'] = p
p2 = self.panel4d['l4']
tm.assert_panel_equal(p, p2.reindex(items=p.items,
major_axis=p.major_axis,
minor_axis=p.minor_axis))
# scalar
self.panel4d['lG'] = 1
self.panel4d['lE'] = True
assert self.panel4d['lG'].values.dtype == np.int64
assert self.panel4d['lE'].values.dtype == np.bool_
# object dtype
self.panel4d['lQ'] = 'foo'
assert self.panel4d['lQ'].values.dtype == np.object_
# boolean dtype
self.panel4d['lP'] = self.panel4d['l1'] > 0
assert self.panel4d['lP'].values.dtype == np.bool_
def test_setitem_by_indexer(self):
with catch_warnings(record=True):
# Panel
panel4dc = self.panel4d.copy()
p = panel4dc.iloc[0]
def func():
self.panel4d.iloc[0] = p
pytest.raises(NotImplementedError, func)
# DataFrame
panel4dc = self.panel4d.copy()
df = panel4dc.iloc[0, 0]
df.iloc[:] = 1
panel4dc.iloc[0, 0] = df
assert (panel4dc.iloc[0, 0].values == 1).all()
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0, 0, :, 0]
s.iloc[:] = 1
panel4dc.iloc[0, 0, :, 0] = s
assert (panel4dc.iloc[0, 0, :, 0].values == 1).all()
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
assert (panel4dc.iloc[0].values == 1).all()
assert panel4dc.iloc[1].values.all()
assert (panel4dc.iloc[2].values == 'foo').all()
def test_setitem_by_indexer_mixed_type(self):
with catch_warnings(record=True):
# GH 8702
self.panel4d['foo'] = 'bar'
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
assert (panel4dc.iloc[0].values == 1).all()
assert panel4dc.iloc[1].values.all()
assert (panel4dc.iloc[2].values == 'foo').all()
def test_comparisons(self):
with catch_warnings(record=True):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
tp = p1.reindex(labels=p1.labels.tolist() + ['foo'])
p = p1[p1.labels[0]]
def test_comp(func):
result = func(p1, p2)
tm.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
pytest.raises(Exception, func, p1, tp)
# versus different objs
pytest.raises(Exception, func, p1, p)
result3 = func(self.panel4d, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.panel4d.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
with catch_warnings(record=True):
xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'],
ref.xs(idx), check_names=False)
# not contained
idx = self.panel4d.major_axis[0] - BDay()
pytest.raises(Exception, self.panel4d.major_xs, idx)
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
with catch_warnings(record=True):
xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
assert xs['l1']['A'].dtype == np.float64
assert xs['l4']['A'].dtype == np.object_
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
with catch_warnings(record=True):
idx = self.panel4d.minor_axis[1]
xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
# not contained
pytest.raises(Exception, self.panel4d.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
with catch_warnings(record=True):
xs = self.panel4d.minor_xs('D')
assert xs['l1'].T['ItemA'].dtype == np.float64
assert xs['l4'].T['ItemA'].dtype == np.object_
def test_xs(self):
l1 = self.panel4d.xs('l1', axis=0)
expected = self.panel4d['l1']
tm.assert_panel_equal(l1, expected)
# View if possible
l1_view = self.panel4d.xs('l1', axis=0)
l1_view.values[:] = np.nan
assert np.isnan(self.panel4d['l1'].values).all()
# Mixed-type
self.panel4d['strings'] = 'foo'
with catch_warnings(record=True):
result = self.panel4d.xs('D', axis=3)
assert result.is_copy is not None
def test_getitem_fancy_labels(self):
with catch_warnings(record=True):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
items = panel4d.items[[1, 0]]
dates = panel4d.major_axis[::2]
cols = ['D', 'C', 'F']
# all 4 specified
tm.assert_panel4d_equal(panel4d.loc[labels, items, dates, cols],
panel4d.reindex(labels=labels, items=items,
major=dates, minor=cols))
# 3 specified
tm.assert_panel4d_equal(panel4d.loc[:, items, dates, cols],
panel4d.reindex(items=items, major=dates,
minor=cols))
# 2 specified
tm.assert_panel4d_equal(panel4d.loc[:, :, dates, cols],
panel4d.reindex(major=dates, minor=cols))
tm.assert_panel4d_equal(panel4d.loc[:, items, :, cols],
panel4d.reindex(items=items, minor=cols))
tm.assert_panel4d_equal(panel4d.loc[:, items, dates, :],
panel4d.reindex(items=items, major=dates))
# only 1
tm.assert_panel4d_equal(panel4d.loc[:, items, :, :],
panel4d.reindex(items=items))
tm.assert_panel4d_equal(panel4d.loc[:, :, dates, :],
panel4d.reindex(major=dates))
tm.assert_panel4d_equal(panel4d.loc[:, :, :, cols],
panel4d.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
pass
def test_get_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
result = self.panel4d.get_value(
label, item, mjr, mnr)
expected = self.panel4d[label][item][mnr][mjr]
assert_almost_equal(result, expected)
def test_set_value(self):
with catch_warnings(record=True):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
self.panel4d.set_value(label, item, mjr, mnr, 1.)
tm.assert_almost_equal(
self.panel4d[label][item][mnr][mjr], 1.)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['l4'].values)
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
assert isinstance(res, Panel4D)
assert res is not self.panel4d
assert res.get_value('l4', 'ItemE', 'foo', 'bar') == 1.5
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['l4'].values)
class TestPanel4d(CheckIndexing, SafeForSparse,
SafeForLongAndSparse):
def setup_method(self, method):
with catch_warnings(record=True):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
with catch_warnings(record=True):
panel4d = Panel4D(self.panel4d._data)
assert panel4d._data is self.panel4d._data
panel4d = Panel4D(self.panel4d._data, copy=True)
assert panel4d._data is not self.panel4d._data
tm.assert_panel4d_equal(panel4d, self.panel4d)
vals = self.panel4d.values
# no copy
panel4d = Panel4D(vals)
assert panel4d.values is vals
# copy
panel4d = Panel4D(vals, copy=True)
assert panel4d.values is not vals
# GH #8285, test when scalar data is used to construct a Panel4D
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
panel4d = Panel4D(val, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5))
vals = np.empty((2, 3, 4, 5), dtype=dtype)
vals.fill(val)
expected = Panel4D(vals, dtype=dtype)
tm.assert_panel4d_equal(panel4d, expected)
# test the case when dtype is passed
panel4d = Panel4D(1, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5), dtype='float32')
vals = np.empty((2, 3, 4, 5), dtype='float32')
vals.fill(1)
expected = Panel4D(vals, dtype='float32')
tm.assert_panel4d_equal(panel4d, expected)
def test_constructor_cast(self):
with catch_warnings(record=True):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
casted2 = Panel4D(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel4D(zero_filled._data, dtype=np.int32)
casted2 = Panel4D(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
| assert_almost_equal(casted2.values, exp_values) | pandas.util.testing.assert_almost_equal |
import requests
from opencage.geocoder import OpenCageGeocode
import pandas as pd
# from pprint import pprint
app = flask.Flask(__name__)
# @app.route('/',methods=['GET', 'POST', 'PUT'])
# def pass_val():
# # search_address = request.args.get('search_address')
# # print('search_address', search_address)
# print("hello")
# return jsonify({'reply':'success'})
# for i in range(0, 5):
# api-endpoint
URL = 'https://jobs.github.com/positions.json'
# # location given here
# location = 'new york' #enter user inputted city
# full_time = 'true'
# description = 'engineer'
# defining a params dict for the parameters to be sent to the API
PARAMS = {'page':4}
# sending get request and saving the response as response object
response = requests.get(url = URL, params = PARAMS)
# extracting data in json format
jsonResponse = response.json()
print(jsonResponse)
# posting_set = set()
#CSV columns list
company_list = []
location_list = []
title_list = []
type_list = []
company_url_list = []
description_list = []
company_logo_list = []
latitude_list = []
longitude_list = []
for i,v in enumerate(jsonResponse):
query_string = ''
company_name = ''
location_name = ''
title = ''
type_name = ''
description_name = ''
company_url = ''
company_logo = ''
for key, value in jsonResponse[i].items():
if key == 'title':
title = value
if key == 'type':
type_name = value
if key == 'company_url':
company_url = value
if key == 'company_logo':
company_logo = value
if key == 'description':
description_name = value
if key == 'location':
location_name = value
query_string += value
if key == 'company':
company_name = value
query_string += value
query_string += ' '
# if (query_string == '' or query_string in posting_set):
# continue
title_list.append(title)
type_list.append(type_name)
company_url_list.append(company_url)
company_logo_list.append(company_logo)
description_list.append(description_name)
company_list.append(company_name)
location_list.append(location_name)
#Forward Geocoding
key = '<KEY>'
geocoder = OpenCageGeocode(key)
query = query_string
# posting_set.add(query_string)
results = geocoder.geocode(query)
if results and results[0]['components']['country_code'] == 'us':
latitude_list.append(results[0]['geometry']['lat'])
longitude_list.append(results[0]['geometry']['lng'])
else:
latitude_list.append('')
longitude_list.append('')
#Creating dataframe
dict = {'Company': company_list, 'Location': location_list, 'Type': type_list, 'Title': title_list,
'Description': description_list, 'Company_url': company_url_list, 'Company_logo': company_logo_list,'Latitude': latitude_list, 'Longitude' : longitude_list}
df = | pd.DataFrame(dict) | pandas.DataFrame |
import pandas
from collections import Counter
from tqdm import tqdm
user_df = pandas.read_csv('processed_data/prj_user.csv')
tweets_df = pandas.read_csv('original_data/prj_tweet.csv')
ids = user_df["id"]
ids = list(ids.values)
hobby_1_list = []
hobby_2_list = []
def get_users_most_popular_hashtags_list(tweets_df, user_id, number_of_wanted_hashtags=2):
"""
:param user_id: id of user for which we want to get
:return: list of strings, size of number_of_wanted_hashtags or smaller if user doesn't have enough hashtags
"""
tweets = list(tweets_df.loc[tweets_df['userID'] == int(user_id)]["tweet"].values)
tweets_with_hashtag = [tweet for tweet in tweets if "#" in tweet]
user_hashtags = []
for tweet in tweets_with_hashtag:
user_hashtags += [i[1:] for i in tweet.split() if i.startswith("#")]
users_most_common_hashtags = [word for word, word_count in Counter(user_hashtags).most_common(number_of_wanted_hashtags)]
return users_most_common_hashtags
for id in tqdm(ids):
users_most_common_hashtags = get_users_most_popular_hashtags_list(tweets_df=tweets_df, user_id = id, number_of_wanted_hashtags=2)
if len(users_most_common_hashtags) < 2:
while len(users_most_common_hashtags) < 2:
users_most_common_hashtags.append(None)
hobby_1_list.append(users_most_common_hashtags[0])
hobby_2_list.append(users_most_common_hashtags[1])
hobby_df = pandas.read_csv('processed_data/prj_user.csv')
hobby_df['hobby'] = pandas.Series(hobby_1_list, index=hobby_df.index)
hobby_df['hobbyPiority'] = pandas.Series([1 for i in range(len(hobby_1_list))], index=hobby_df.index)
hobby_df = hobby_df[["id", "hobby", "hobbyPiority"]]
hobby_df.to_csv("processed_data/user_has_hobby1.csv", index=False, na_rep='NULL')
hobby_df = pandas.read_csv('processed_data/prj_user.csv')
hobby_df['hobby'] = | pandas.Series(hobby_2_list, index=hobby_df.index) | pandas.Series |
# ===== 라이브러리 ===== #
from random import shuffle
import datetime
import pandas as pd
# ===== 상수 ===== #
EASY, HARD, HISTORY, EXIT = map(str, range(1, 5)) # command용 상수
STRIKE_SCORE, BALL_SCORE = 0.1, 0.05 # 스트라이크/볼 점수
TRY_LIMIT = 30 # 시도 횟수 제한
DATA_FILE = "data.csv" # 점수 기록 파일
RANKING_COUNT = 3 # 상위 몇 개의 기록을 보여줄 지
END = False
# ===== 전역 변수 ===== #
records = pd.DataFrame() # 랭킹 기록 변수
# ===== 클래스 ===== #
class Game:
# 클래스 생성자
def __init__(self, n: int) -> None:
self.n = n # 숫자 자릿수
self.try_count = self.score = 0 # 시도 횟수, 점수
tmp = list(range(10))
shuffle(tmp)
self.answer = tmp[:n] # 정답 숫자
# 사용자의 입력이 유효한지 판별하는 함수
def check(self, target: str) -> bool:
error = ''
try: int(target)
except: error = '숫자가 아닙니다.'
if not error and len(target) != self.n: error = f'{self.n}자리가 아닙니다.'
elif not error and len(target) != len(set(target)): error = '중복 숫자가 있습니다.'
if error:
print(error + " 다시 입력해주세요.\n")
return False
return True
# 정답과 target을 비교해 스트라이크 및 볼을 판정하는 함수
def get_count(self, target: str) -> None:
target = list(map(int, target))
result = [0, 0]
for idx, val in enumerate(target):
if self.answer[idx] == val: result[0] += 1
elif val in self.answer: result[1] += 1
return result
# 사용자의 입력을 바탕으로 스트라이크인지 볼인지 알려준 후 점수를 계산하는 함수
def play(self, user_input: str) -> bool:
strike, ball = self.get_count(user_input)
if strike + ball == 0: print("아웃!!!\n")
else: print(f"{strike} 스트라이크, {ball} 볼\n")
self.score += STRIKE_SCORE * strike + BALL_SCORE * ball
self.try_count += 1
return strike == self.n
# 점수에 시도 점수를 더하는 함수
def add_final_score(self) -> None:
self.score += self.n ** 2 * TRY_LIMIT / self.try_count
# 최종 점수를 저장 및 프린트하는 함수
def record_score(self, success: bool) -> None:
if success: self.add_final_score()
if self.score > 0:
with open(DATA_FILE, "a") as f:
now = datetime.datetime.now()
now_date = now.strftime('%Y.%m.%d')
now_time = now.strftime('%H:%M:%S')
difficulty = "EASY" if self.n == 3 else "HARD"
f.write(f"\n{now_date},{now_time},{100 * self.score:.2f},{difficulty}")
print(f"최종 점수: {100 * self.score:.2f}점")
# ===== 함수 ===== #
# 숫자 야구를 진행하는 함수
def play_baseball(n: int) -> None:
game = Game(n)
for i in range(TRY_LIMIT):
user_input = input(f"0-9로 중복 없이 이루어진 {n}자리 수를 입력해주세요 (포기: -1): ")
if user_input == "-1":
print("게임을 포기하셨습니다.")
game.record_score(False)
return
elif not game.check(user_input):
continue
elif game.play(user_input):
print("축하드립니다! 정답을 맞추셨습니다!")
game.record_score(True)
return
print("제한 횟수 내에 숫자를 맞추지 못하셨습니다.")
game.record_score(False)
# 사용자에게 난이도 선택을 받는 함수
def get_input() -> str:
print("=" * 30)
print("숫자 야구를 시작합니다.")
print(f"최고 점수: {records.iloc[0]['Score']:.2f}점({records.iloc[0]['Difficulty']}로 달성)")
result = input(f"{EASY}: EASY(3자리), {HARD}: HARD(4자리), {HISTORY}: 랭킹 보기, {EXIT}: 종료\n")
print("-" * 30)
return result
def load_history() -> None:
global records
data = | pd.read_csv(DATA_FILE) | pandas.read_csv |
import multiprocessing
import operator
import os
from six.moves import xrange
import pandas as pd
COMP_OP_MAP = {'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
'=': operator.eq,
'!=': operator.ne}
def get_output_row_from_tables(l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices=None,
r_out_attrs_indices=None):
output_row = []
# add ltable id attr
output_row.append(l_row[l_key_attr_index])
# add rtable id attr
output_row.append(r_row[r_key_attr_index])
# add ltable output attributes
if l_out_attrs_indices:
for l_attr_index in l_out_attrs_indices:
output_row.append(l_row[l_attr_index])
# add rtable output attributes
if r_out_attrs_indices:
for r_attr_index in r_out_attrs_indices:
output_row.append(r_row[r_attr_index])
return output_row
def get_output_header_from_tables(l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix):
output_header = []
output_header.append(l_out_prefix + l_key_attr)
output_header.append(r_out_prefix + r_key_attr)
if l_out_attrs:
for l_attr in l_out_attrs:
output_header.append(l_out_prefix + l_attr)
if r_out_attrs:
for r_attr in r_out_attrs:
output_header.append(r_out_prefix + r_attr)
return output_header
def convert_dataframe_to_list(table, join_attr_index,
remove_null=True):
table_list = []
for row in table.itertuples(index=False):
if remove_null and | pd.isnull(row[join_attr_index]) | pandas.isnull |
import joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from reports import mdl_results
from rolldecayestimators import logarithmic_decrement
from rolldecayestimators import lambdas
from sklearn.pipeline import Pipeline
from rolldecayestimators import measure
from rolldecayestimators.direct_estimator_cubic import EstimatorQuadraticB, EstimatorCubic, EstimatorQuadratic
def get_models_zero_speed():
mask = mdl_results.df_rolldecays['ship_speed'] == 0
df_rolldecays_zero = mdl_results.df_rolldecays.loc[mask].copy()
return _get_models(df=df_rolldecays_zero)
def get_models_speed():
mask = mdl_results.df_rolldecays['ship_speed'] > 0
df_rolldecays = mdl_results.df_rolldecays.loc[mask].copy()
return _get_models(df=df_rolldecays)
def get_models():
df = mdl_results.df_rolldecays
return _get_models(df=df)
def _get_models(df):
models = {}
for id, row in df.iterrows():
model_file_path = '../../models/KVLCC2_%i.pkl' % id
models[id] = joblib.load(model_file_path)['estimator']
return models
def gather_results(models):
df_results = | pd.DataFrame() | pandas.DataFrame |
# license: Creative Commons License
# Title: Big data strategies seminar. Challenge 1. www.iaac.net
# Created by: <NAME>
#
# is licensed under a license Creative Commons Attribution 4.0 International License.
# http://creativecommons.org/licenses/by/4.0/
# This script uses pandas for data management for more information visit; pandas.pydata.org/
# The tasks for joins and merges are here https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
# The options for scatterplotw with seaborn https://seaborn.pydata.org/generated/seaborn.scatterplot.html
#
import pandas as pd
from pandas import plotting
import geopandas
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
plotting.register_matplotlib_converters()
######################################################
# Read the different files starting with the last file
neighbourhoods = geopandas.read_file('../data/opendatabcn/neighbourhoods_barcelona_wgs84.geojson')
irf_2007 = pd.read_csv('../data/opendatabcn/2007_distribucio_territorial_renda_familiar.csv')
irf_2008 = pd.read_csv('../data/opendatabcn/2008_distribucio_territorial_renda_familiar.csv')
irf_2009 = | pd.read_csv('../data/opendatabcn/2009_distribucio_territorial_renda_familiar.csv') | pandas.read_csv |
#%% [markdown]
# # Basic of Beamforming and Source Localization with Steered response Power
# ## Motivation
# Beamforming is a technique to spatially filter out desired signal and surpress noise. This is applied in many different domains, like for example radar, mobile radio, hearing aids, speech enabled IoT devices.
#
# ## Signal Model
# 
# Model Description:
# $$\underline{X}(\Omega) = \underline{A}^{\text{ff}}(\Omega) \cdot S(\Omega)$$
# ## Beamforming
# Beamforming or spatial filtering is an array processing technique used to improve the quality of the desired signal in the presence of noise. This filtering is accomplished by a linear combination of the recorded signals $X_m(\Omega)$ and the beamformer weights $W_m(\Omega)$. In other words, the filtered microphone signals are summed together (compare with figure below). When the filter weights are configured correctly, the desired signal is superimposed constructively.
# 
# Image shows a filter and sum beamformer. Microphone signals $\underline{X}(\Omega)$ are multiplied with the beamformer weights $\underline{W}(\Omega)$ and then accumulated to the beamformer output signal $Y(\Omega)$.
# $$Y(\Omega) = \underline{W}^\text{H}(\Omega) \cdot \underline{X}(\Omega)$$
#%%
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(precision=3)
#%%[markdwon]
# ## Parameter
#%%
varphi = 45 / 180 * np.pi # Angle of attack of the Source S(\Omega) in relation to the mic array
c = 343000 # Velocity of sound in mm/s
mic = 6 # count of mics
d = 20 # distance in mm
fs = 16000 # Sample rate
n_fft = 512 # Fourier Transform length
n_spec = 257 # Number of frequency bins
n_dir = 180 # Number of directions which the steering vector is steered to
#%%[markdown]
# ##Microphone Positions
# `pos_y` and `pos_x` are the microphone positions. It is a Uniform Linear Array (ULA) type (like seen in the Figure below)
#%%
pos_y = np.zeros((1,mic))
pos_x = np.r_[0.:mic]*d
fig, ax = plt.subplots()
ax.scatter(pos_x, pos_y, c='tab:red', alpha=1, edgecolors='white')
plt.ylabel('Y Position [mm]')
plt.xlabel('X Position [mm]')
plt.ylim((-50, 50))
#%%[markdown]
# ## Free Field model and delay vectors
# ...
#$$\underline A_q^{\text{ff}}(\Omega) = \exp\big(-j\Omega f_s \Delta\underline \tau(\varphi_q)\big),$$
# Calculate the delay vectors to each microphone to the source $q$ in the frequency domain:
#%%
tau = (pos_x*np.cos(varphi)+pos_y*np.sin(varphi))/c #calculating delay vector tau (in the time domain) depending on the array geometry.
tau = tau.reshape([mic,1,1])
Omega_array = np.r_[0.:n_spec].T*np.pi/n_fft*2
Omega_array = Omega_array.reshape([1,1,n_spec])
A_ff = np.exp(-1j*Omega_array*fs*tau)
#%%
tmp = np.squeeze(np.round(np.angle(A_ff[:,:,:])/np.pi*180))
plt.plot(tmp.T)
plt.ylabel("Angle [Deg]")
plt.xlabel("Frequency [Bin]")
#%%[markdown]
# The plot shows the angle of the complex spectral time delays from the desired signal between reference microphone 1 and the others. for higher frequencys you see that the angle is growing due to the faster swinging of the signal. This means for the same time delay different frequencys have different phase differences between two microphones.
# ## Delay and Sum Beamformer
# ...
# ## Calculate the steering vectors W_H for the localization:
#%%
angle_array = np.c_[0:360:360/n_dir]/180*np.pi
tau_steering = (pos_x*np.cos(angle_array)+pos_y*np.sin(angle_array))/c
tau_steering = tau_steering.T.copy()
tau_steering = tau_steering.reshape([mic,1,1,n_dir])
W = np.exp(-1j*Omega_array.reshape([1,1,n_spec,1])*fs*tau_steering)
W.shape
W_H = W.reshape([1,mic,n_spec,n_dir]).conj()
W_H.shape
#%%[markdown]
# ## Spatial Convariance
# Another important signal property is the covariance that describes the interdependencies between the microphone signals $\underline X(\Omega)$. To obtain this covariance, it is presumed that the signals are stochastic. When only considering one source ($Q=1$),
# the spatial covariance matrix can be denoted as
# $$\mathbf \Phi_{xx}(\Omega) = \text{E}\{\underline X(\Omega)\underline X^H(\Omega)\}$$
# $$ = \underline A(\Omega) \text{E} \{ S'(\Omega) S'^*(\Omega)\}\underline A^H(\Omega) + \text{E}\{\underline V(\Omega)\underline V^H(\Omega)\}$$
# $$ = \mathbf \Phi_{ss}(\Omega) + \mathbf \Phi_{vv}(\Omega),$$
# where $E\{\cdot\}$ represents the expectation value operator, $^*$ denotes the complex conjugate operator, $\mathbf \Phi_{ss}(\Omega)$ represents the source correlation matrix, $\mathbf \Phi_{vv}(\Omega)$ the noise correlation matrix and $(\cdot)^H$ the Hermitean operator.
# If we consider noise not present $V=0$ and the expectation value of the signal $\text{E}{S(\Omega)}=1$ then the formular for the spatial covariance matrix $\mathbf \Phi_{xx}(\Omega)$ reduces to
# $$\mathbf \Phi_{xx}(\Omega) = \underline A(\Omega) \underline A^H(\Omega) $$
#%%
A_ff_H = A_ff.reshape([1,mic,n_spec]).copy()
A_ff_H = A_ff_H.conj()
phi_xx = A_ff_H * A_ff
#%%
df = | pd.DataFrame(phi_xx[:,:,50]) | pandas.DataFrame |
__author__ = "unknow"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import pandas as pd
import sys
from math import sqrt
import sys
import os
import ntpath
import scipy.stats
import seaborn as sns
from matplotlib import pyplot as plt
#sys.path.append('/home/silvio/git/track-ml-1/utils')
#sys.path.append('../')
from core.utils.tracktop import *
#def create_graphic(reconstructed_tracks, original_tracks, tracks_diffs):
def create_graphic_org(**kwargs):
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('path_original_track'):
path_original_track = kwargs.get('path_original_track')
if kwargs.get('tracks'):
tracks = kwargs.get('tracks')
dfOriginal = | pd.read_csv(original_tracks) | pandas.read_csv |
import simpledf as sdf
import pandas as pd
import numpy as np
from unittest import TestCase
def f(y):
''' A custom function that changes the shape of dataframes '''
y['Mean'] = np.mean(y['Data'].values)
return y
class TestApply(TestCase):
def test_apply_basic(self):
x = | pd.DataFrame({'Data': [1, 2, 3], 'Group': ['A', 'B', 'B']}) | pandas.DataFrame |
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': pd.StringDtype(),
'Actor2Geo_Lat': pd.StringDtype(),
'Actor2Geo_Long': pd.StringDtype(),
'ActionGeo_Type': type(1),
'ActionGeo_FullName': pd.StringDtype(),
'ActionGeo_Lat': pd.StringDtype(),
'ActionGeo_Long': pd.StringDtype(),
'DATEADDED' : pd.StringDtype(),
'SOURCEURL': pd.StringDtype(),
}
timecheckG = time()
print(" Creating another thread-safe connection to MongoDB...")
localDb = {}
localDb['client'] = pymongo.MongoClient()
localDb['database'] = localDb['client'].capstone
localDb['collection'] = localDb['database'].GDELT.events
configFilePath = os.path.join(os.path.abspath(__file__),
"GDELTeventsEDAconfig_batch.yaml"),
datetimeField = "DATEADDED"
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strftimeFormat = "%Y-%m-%dh%Hm%M"
print(" Pulling events records (long wait)... ", end = '')
eventsDF = pd.DataFrame.from_records(
list(localDb['collection'].find(projection = {"_id" : False},
allow_disk_use=True,
no_cursor_timeout = True)),
columns = columnNames,
)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
timecheckG = time()
print(" Setting dtypes... ", end='')
eventsDF = eventsDF.astype(dtype = columnTypes, copy = False)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
print(" Converting datetimes...", end = '')
eventsDF[datetimeField] = pd.to_datetime(eventsDF[datetimeField],
format = datetimeFormat)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
print("\n Events records DataFrame .info():\n")
print(eventsDF.info())
edaDates = "".join([
eventsDF[datetimeField].min().strftime(strftimeFormat),"_to_",
eventsDF[datetimeField].max().strftime(strftimeFormat),
])
edaLogName = "".join(["GDELT_events_EDA_", edaDates,".html"])
timecheckG = time()
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
timecheckG = time()
print(" File output:", edaLogName, "\n")
print(" Generating events 'batch' EDA report...")
eventsProfile = ProfileReport(eventsDF, config_file = configFilePath)
eventsProfile.to_file(edaLogName)
del eventsDF
del eventsProfile
print(" Complete!( %0.fd s )" % (float(time())-float(timecheckG)))
print("All Events EDA operations complete. Please check EDAlogs",
"directories for any resulting Events EDA profile reports.")
return True
# B03
def mentionsBatchEDA(mode):
'''Performs automatic EDA on GDELT Mentions record subsets. See
function batchEDA() for "if table == 'mentions':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Mentions records up to at least the size of the batch EDA test subset
used in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it it present only to receive a
parameter determined by imap(chunksize = 1), e.g. one iteration of the
function will execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
print(" Creating new thread-safe connection to MongoDB...")
columnNames = [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'EventTimeDate' : pd.StringDtype(),
'MentionTimeDate' : pd.StringDtype(),
'MentionType' : pd.StringDtype(),
'MentionSourceName' : pd.StringDtype(),
'MentionIdentifier' : pd.StringDtype(),
'InRawText' : type(True),
'Confidence' : type(1),
'MentionDocTone' : type(1.1),
}
localDb = {}
localDb['client'] = pymongo.MongoClient()
localDb['database'] = localDb['client'].capstone
localDb['collection'] = localDb['database'].GDELT.mentions
configFileName = "GDELTmentionsEDAconfig_batch.yaml"
datetimeField01 = "MentionTimeDate"
datetimeField02 = "EventTimeDate"
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strftimeFormat = "%Y-%m-%dh%Hm%M"
print("\n Pulling mentions records (long wait)...")
tableDF = pd.DataFrame.from_records(
list(localDb['collection'].find(projection = {"_id" : False},
allow_disk_use=True,
no_cursor_timeout = True)),
columns = columnNames,
)
print(" Complete!")
print(" Setting dtypes...")
tableDF = tableDF.astype(dtype = columnTypes, copy = False)
print(" Complete!")
print(" Converting datetimes...")
tableDF[datetimeField01] = pd.to_datetime(tableDF[datetimeField01],
format = datetimeFormat)
tableDF[datetimeField02] = pd.to_datetime(tableDF[datetimeField02],
format = datetimeFormat)
print(" Complete!")
print(" Mentions records DataFrame .info():")
print(tableDF.info())
edaDates = "".join([
tableDF[datetimeField01].min().strftime(strftimeFormat),"_to_",
tableDF[datetimeField01].max().strftime(strftimeFormat),
])
edaLogName = "".join(["GDELT_mentions_EDA_", edaDates,".html"])
print(" File output:", edaLogName, "\n")
print("\n Generating mentions 'batch' EDA report...")
profile = ProfileReport(tableDF, config_file= configFileName)
profile.to_file(edaLogName)
print("\n Complete!")
return True
# B04
def gkgBatchEDA(self):
'''Performs automatic EDA on GDELT Global Knowledge Graph (GKG)
record subsets.
Makes use of these helper functions for multiprocessing.Pool.map()
calls, from GDELTedaGKGhelpers. a separate file as part of ensuring
compatibility with this class's Python.multiprocessing calls (all 'AXX'
tags are for 'find' use in GDELTedaGKGhelpers.py):
A02 - pullMainGKGcolumns
A03 - applyDtypes
A04 - convertDatetimes
A05 - convertGKGV15Tone
A06 - mainReport
A07 - locationsReport
A08 - countsReport
A09 - themesReport
A10 - personsReport
A11 - organizationsReport
The intent behind this implementation is to reduce the amount of total
RAM required for all operations, as .close() upon appropriate process
pools should result in deallocation of their memory structures, which
just isn't going to be forced otherwise due to Pandas and Python memory
handling.
Well-known issues with underlying treatment of allocation and
deallocation of DataFrames, regardless of whether all references to a
DataFrame are passed to 'del' statements, restrict completion of the
processing necessary for normalization of all GKG columns, which is
necessary for execution of EDA on those columns. The apparent RAM
requirements for those operations on the batch test GKG data set are not
mitigable under these hardware circumstances, barring further subset of
the data into small enough component pieces with each their own EDA
profile, which could not be converted to batch EDA without processing
outside the scope of this capstone project.
The uncommented code in this function represent a working state which
can produce full batch EDA on at least the primary information-holding
GKG columns, but not for the majority of variable-length and subfielded
columns. V1Locations batch EDA has been produced from at least one
attempt, but no guarantee of its error-free operation on similarly-sized
subsets of GDELT GKG records is intended or encouraged.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
timecheck = time()
print(" Pulling non-variable-length GKG columns...")
pool = multiprocessing.Pool(1)
# For pullMainGKGcolumns documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A02'
tableDF = pool.map(GDELTedaGKGhelpers.pullMainGKGcolumns, ['batch'])
pool.close()
pool.join()
print(" Records acquired. ( %0.3f s )" % (float(time())-float(timecheck)))
print("\n GKG records DataFrame .info():")
tableDF = tableDF.pop()
pp(tableDF.info())
timecheck = time()
print("\n Setting dtypes...")
# This only sets 'GKGRECORDID', 'V21DATE', 'V2SourceCommonName',
# and 'V2DocumentIdentifier' dtypes to pd.StringDtype()
pool = multiprocessing.Pool(1)
# For applyDtypes documentation, See GDELTedaGKGhelpers.py, 'find'
# tag '# A03'
tableDF = pool.map(GDELTedaGKGhelpers.applyDtypes, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
timecheck = time()
print("\n Converting datetimes...")
pool = multiprocessing.Pool(1)
# For convertDatetimes documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A04'
tableDF = pool.map(GDELTedaGKGhelpers.convertDatetimes, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
timecheck = time()
print("\n Splitting V15Tone dicts to columns...")
pool = multiprocessing.Pool(1)
# For convertGKGV15Tone code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A05'
tableDF = pool.map(GDELTedaGKGhelpers.convertGKGV15Tone, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
print("\n Main GKG records (non-variable-length columns) DataFrame",
".info():\n")
pp(tableDF.info())
# Generating report excluding fields that require substantially more
# records (normalization), and so more resources for reporting.
timecheck = time()
print("\n Generating non-variable-length subfield report...")
pool = multiprocessing.Pool(1)
# For mainReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A06'
booleanSuccess = pool.map(GDELTedaGKGhelpers.mainReport, [tableDF])
pool.close()
pool.join()
print("\n Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
# # These columns may be dropped from this point on in order to
# # accomodate the increased RAM, CPU, and disk I/O requirements for
# # normalizing variable length columns, but this is commented out in order
# # to further check RAM requirements for full normalization.
# timecheck = time()
# print("\n Dropping excess columns before normalizing for variable-length",
# "columns...")
# tableDF.drop(columns = ['V2SourceCommonName',
# 'V2DocumentIdentifier',
# 'V15Tone_Positive',
# 'V15Tone_Negative',
# 'V15Tone_Polarity',
# 'V15Tone_ARD',
# 'V15Tone_SGRD',
# 'V15Tone_WordCount'], inplace = True)
# print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
# Working implementation with locationsReport
timecheck = time()
print("\n Splitting V1Locations dicts and generating report...")
pool = multiprocessing.Pool(1)
# For locationsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A07'
booleanSuccess = pool.map(GDELTedaGKGhelpers.locationsReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
'''
# This section of calls are commented out due to their current inability
# to complete processing for the batch EDA test subset of GKG records.
# Future attempts to complete these sections will see this comment area
# removed and this section cleaned of work-in-progress documentation.
# Non-working implementation of countsReport. Tempted to think it's due
# to Pandas limitations for long-running jobs, but it's also just a huge
# DataFrame I'm attempting to normalize, thanks to the variable quantities
# of 'V1Counts' values with subfielded values per record.
# timecheck = time()
# print("\n Splitting V1Counts lists and generating report...")
# pool = multiprocessing.Pool(1)
# # For countsReport code/documentation, See GDELTedaGKGhelpers.py,
# # 'find' tag '# A08'
# booleanSuccess = pool.map(GDELTedaGKGhelpers.countsReport, [tableDF])
# pool.close()
# pool.join()
# print(" Complete! (%0.3f seconds)" % (float(time())-float(timecheck)))
# Ditto the rest of the normalization helper functions, because the
# normalization necessary for EDA on this large a subset of GKG records is
# just too high, given how this field has so many values of varying and
# sometimes ridiculous length. If Pandas Profiling could be coerced to not
# care about allocating smaller data structures for columns of varying
# type, this wouldn't matter, because I could leave all but key column
# values as NaN, but Pandas wants to allocate massive amounts of empty
# memory structures, and when records total over 16 million and Pandas
# wants that many 64-bit float values, even if there's only 8 million real
# values in the column to work with, Pandas will create however many extra
# copies of those 8 million empty 64-bit float memory signatures, or at
# least demand them until the system can't help but except the process.
timecheck = time()
print("\n Splitting V1Themes lists and generating report...")
pool = multiprocessing.Pool(1)
# For themesReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A09'
booleanSuccess = pool.map(GDELTedaGKGhelpers.themesReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
timecheck = time()
print("\n Generating Persons report...")
pool = multiprocessing.Pool(1)
# For personsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A10'
booleanSuccess = pool.map(GDELTedaGKGhelpers.personsReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
timecheck = time()
print("\n Generating Organizations report...")
pool = multiprocessing.Pool(1)
# For organizationsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A11'
booleanSuccess = pool.map(GDELTedaGKGhelpers.organizationsReport,[tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
'''
print("All GKG EDA operations complete. Please check EDAlogs directories",
" for any resulting EDA profile reports.")
print("\n--------------------------------------------------------------\n")
# B05
def realtimeEDA(self, tableList = ['events','mentions','gkg']):
'''Performs automatic EDA on the latest GDELT datafiles for records
from Events/Mentions and GKG. This function is enabled by loopEDA() to
download a specified window of datafiles, or else just most-recent
datafiles if called by itself, or for a default loopEDA() call.
Current testing on recent GDELT updates confirms that this function
may complete all EDA processing on each datafile set well within the
fifteen minute window before each successive update.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting of operations to one or more tables.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
# using a tuple to track failed execution
return (False, 'tableList')
print("--------------------------------------------------------------\n")
print("Beginning realtime GDELT EDA collection for these tables:")
print(tableList)
# Decrementing remaining iterations to track 'last' run, in order to
# delay report generation until desired window is collected.
if self.realtimeWindow > 1:
self.realtimeWindow -= 1
self.realtimeLooping == True
elif self.realtimeWindow == 1:
self.realtimeWindow -= 1
lastRun = False
if self.realtimeWindow < 1:
lastRun = True
fileURLs = {
'events' : '',
'mentions' : '',
'gkg' : '',
}
priorURLs = {
'events' : '',
'mentions' : '',
'gkg' : '',
}
EDAFiles = {
'events' : [],
'mentions' : [],
'gkg' : [],
}
# Tracking function runtime
timecheckF = time()
# applicable for all tables
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strptimeFormat = "%Y%m%d%H%M%S"
strftimeFormat = "%Y-%m-%dh%Hm%M"
# Downloading and parsing lastupdate.txt
# That text file consists of three lines, one for each main GDELT
# table's last datafile update. URLs/Filenames follow conventions used in
# GDELTbase download, cleaning, and MongoDB export functions, e.g. a base
# URL followed by 14 char numeric strings for the current UTC datetime at
# the resolution of seconds, at 15-minute intervals, followed by csv and
# zip extensions.
# As of 2021/09/08, that page is working and updates on the schedule
# described in GDELT docs. Exact update times likely vary with volume of
# current world news coverage, but should be at least every fifteen
# minutes, with all dates and times reported by UTC zone.
os.chdir(self.gBase.toolData['path']['base'])
print(" Checking http://data.gdeltproject.org/gdeltv2/lastupdate.txt...")
lastFilesURL = 'http://data.gdeltproject.org/gdeltv2/lastupdate.txt'
lastFiles = wget.download(lastFilesURL, 'lastupdate.txt')
with open(lastFiles) as lastupdate:
lines = lastupdate.readlines()
# Table order is reversed from most other loops in this project, here,
# because list.pop() pulls in reverse and I'm too short on time to bother
# juggling these strings more than necessary. Regardless, GKG is first in
# this order, distinct from most elsewhere.
for table in ['gkg', 'mentions', 'events']:
fileURLs[table] = lines.pop().split(' ')[-1].replace("\n", '')
# form a current datetime from lastupdate.txt strings
thisDatetime = \
datetime.strptime(fileURLs[table][37:51],
strptimeFormat).replace(tzinfo = timezone.utc)
# Form a 'last' datetime and string from the current lastupdates.txt
# datetime string. Worst-case is dead midnight UTC ( 5pm PST, 6pm MST,
# 8pm EST ) since the "last" file before then will be an irregular amount
# of time prior: 00:00 UTC - 22:45 UTC = -1 hour 15 minutes
if thisDatetime.hour == 0 and thisDatetime.minute == 0:
lastDatetime = lastDatetime - timedelta(hours = 1)
lastDatetime = (thisDatetime - timedelta(minutes = 15))
lastDatestring = lastDatetime.strftime(strptimeFormat)
# First-run datetime, timedelta, and string juggling for generating
# last-most-recent URLS for download.
for table in ['gkg', 'mentions', 'events']:
priorURLs[table] = ''.join([self.gBase.toolData['URLbase'],
lastDatestring, '.',
self.gBase.toolData['extensions'][table]])
# Shouldn't apply for first run, since no last/next file is set yet, and
# shouldn't matter for the last run, since self.realtimeWindow running out
# will halt execution in loopEDA() anyway.
if self.lastRealDatetime != '' and self.nextRealDatetime != '':
if thisDatetime == self.lastRealDatetime:
print("\n----------------------------------------------------------\n")
print("Isn't %s the same as %s ? Too early! No new update yet!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooEarly')
elif thisDatetime > self.nextRealDatetime:
print("\n----------------------------------------------------------\n")
print("%s is a little later than %s . Too late! We missed one!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooLate')
print(" URLs acquired:\n")
print("current:")
pp(fileURLs)
print("prior:")
pp(priorURLs)
print("Beginning per-table operations...\n")
for table in tableList:
# B05a - every-table operations
# Note that order of execution for all tables will be project-typical.
# Tracking per-table loop times
timecheckT = time()
print("Trying downloading and cleaning for most recent", table,
"file...")
# making use of alternate-mode functionality for GDELTbase methods.
thisDL = self.gBase.downloadGDELTFile(fileURLs[table], table,
verbose = True, mode = 'realtime')
# Matching the same input formatting requirements, typically performed
# in the 'table' versions of GDELTbase methods
fileName = fileURLs[table].replace(self.gBase.toolData['URLbase'], '')
fileName = fileName.replace('.zip', '')
# cleaning the file (exported to realtimeClean as .json)
thisClean = self.gBase.cleanFile(fileName, verbose = True,
mode = 'realtime')
# tracking prior URLs, still, might delete this section
lastFileName = priorURLs[table].replace(self.gBase.toolData['URLbase'], '')
lastFileName = lastFileName.replace('.zip', '')
# GKG still has different extensions...
if table == 'gkg':
cleanFileName = fileName.replace('.csv', '.json')
cleanLastFileName = lastFileName.replace('.csv', '.json')
else:
cleanFileName = fileName.replace('.CSV', '.json')
cleanLastFileName = lastFileName.replace('.CSV', '.json')
# Each iterative run of this function will add another most-recent
# datafile, so long as it hasn't already been collected and cleaned, but
# the first run should wipe per-table collections before populating 'em
# with records.
if not self.realtimeStarted:
print(" Dropping any old realtime GDELT MongoDB collection...")
self.gBase.localDb['collections']['realtime'][table].drop()
print("Starting MongoDB export for acquired file...")
thisMongo = self.gBase.mongoFile(cleanFileName, table, verbose = True,
mode = 'realtime')
print('')
# Permitting delay of report generation for N iterations
if lastRun:
pass
# bails on this loop iteration if not final realtimeEDA() iteration
else:
continue
# If lastRun == True, EDA processing will be executed in this iteration
# for any records in the 'realtime' MongoDB collection for this table.
print("Beginning EDA processing...")
# switching to table-appropriate logPath directory...
os.chdir(self.logPath[table]['realtime'])
# B05b - Events/Mentions handling
# Per-table records querying, DataFrame shaping, and Pandas Profiling
# EDA ProfileReport() generation.
if table == 'events' or table == 'mentions':
timecheckG = time()
print("\n Loading", table, "realtimeEDA files held locally...",
end = '')
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names'][table]['reduced'])
print(" ")
print(" Setting dtypes...")
thisDF = thisDF.astype(
dtype = self.gBase.toolData['columnTypes'][table],
copy = False,
)
print(" Converting datetimes...")
if table == 'events':
datetimeField = 'DATEADDED'
# mentions has an extra datetime field, 'EventTimeDate', converted here
if table == 'mentions':
datetimeField = 'MentionTimeDate'
thisDF['EventTimeDate'] = pd.to_datetime(thisDF['EventTimeDate'],
format = datetimeFormat)
thisDF[datetimeField] = pd.to_datetime(thisDF[datetimeField],
format = datetimeFormat)
print("\n ", table, "DataFrame .info():\n")
print(thisDF.info(),'\n')
edaDateString = thisDF[datetimeField].min().strftime(strftimeFormat)
if table == 'events':
configName = "GDELTeventsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Events_realtime_EDA_", edaDateString,
".html"])
if table == 'mentions':
configName = "GDELTmentionsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Mentions_realtime_EDA_", edaDateString,
".html"])
print(" File to output:", edaLogName)
profile = ProfileReport(thisDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
del profile
del thisDF
print('')
print('------------------------------------------------------------\n')
# B05c - GKG handling
if table == 'gkg':
print("\n Pulling any", table, "realtime EDA files...", end = '')
timecheckG = time()
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names']['gkg']['reduced'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
# Reusing GDELTedaGKGhelpers.py functions, since they'll work
# in this context. See that file for code and documentation.
timecheckG = time()
print(" Applying initial dtypes...", end = '')
thisDF = GDELTedaGKGhelpers.applyDtypes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Converting datetimes...", end = '')
thisDF = GDELTedaGKGhelpers.convertDatetimes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
edaDateString = thisDF['V21DATE'].min().strftime(strftimeFormat)
timecheckG = time()
print(" Splitting and forming columns from V15Tone...")
thisDF = GDELTedaGKGhelpers.convertGKGV15Tone(thisDF)
print(" ( took %0.3f s )" % (float(time()) - float(timecheckG)))
# B05d - GKG non-variable-length EDA generation
# Isolating main columns for their own EDA, dropping variable-length
# columns for copy (not inplace).
timecheckG = time()
print(" Starting EDA generation for main GKG columns only...", end='')
mainDF = thisDF.drop(columns = ['V1Locations',
'V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
print(" ( drop/copy: %0.3f s )" % (float(time()) - float(timecheckG)))
print("\n GKG main columns DataFrame .info():\n")
print(mainDF.info())
print('')
# constructing EDA output filename
configName = "GDELTgkgMainEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_main_EDA_", edaDateString,
".html"])
# Generating non-variable-length-subfield column EDA
print("\n File to output:", edaLogName)
profile = ProfileReport(mainDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del profile
del mainDF
print(" Continuing processing with separate normalization of each",
"variable-length subfield...\n")
# B05e - V1Locations EDA generation
timecheckG = time()
print(" Exploding V1Locations...", end = '')
locationDF = thisDF.drop(columns = ['V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
locationDF = locationDF.explode('V1Locations')
print(" ( drop/explode: %0.3f s )" % \
(float(time()) - float(timecheckG)))
timecheckG = time()
print(" Normalizing V1Locations...", end = '')
subcols = pd.json_normalize(locationDF['V1Locations'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Renaming columns, dropping old, rejoining, astyping...",
end = '')
subcols.columns = [f"V1Locations_{c}" for c in subcols.columns]
locationDF = locationDF.drop(columns = ['V1Locations']).join(
subcols).astype({'V1Locations_FullName' : pd.StringDtype(),
'V1Locations_CountryCode' : pd.StringDtype(),
'V1Locations_ADM1Code' : pd.StringDtype(),
'V1Locations_FeatureID' : pd.StringDtype(),},
copy = False)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
print("\n GKG Locations-normalized DataFrame .info():\n")
print(locationDF.info())
print(" Setting index to 'GKGRECORDID'...")
locationDF.set_index(keys='GKGRECORDID', drop = True, append = False,
inplace = True, verify_integrity = False)
configName = "GDELTgkgLocationsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_locations_EDA_",
edaDateString,
".html"])
timecheckG = time()
print("\n File to output:", edaLogName)
profile = ProfileReport(locationDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del locationDF
del profile
# B05f - V1Counts EDA generation
timecheckG = time()
print(" Exploding V1Counts...", end = '')
countsDF = thisDF.drop(columns = ['V1Locations',
'V1Themes',
'V1Persons',
'V1Organizations'])
print(" ( drop/explode: %0.3f s )" % \
(float(time()) - float(timecheckG)))
countsDF = countsDF.explode('V1Counts')
print(" Normalizing V1Counts...", end = '')
subcols = pd.json_normalize(countsDF['V1Counts'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Renaming columns, dropping old, rejoining,",
"astyping... ", end = '')
subcols.columns = [f"V1Counts_{c}" for c in subcols.columns]
countsDF = countsDF.drop(columns = ['V1Counts']).join(
subcols).astype({
'V1Counts_CountType' : pd.StringDtype(),
'V1Counts_ObjectType' : pd.StringDtype(),
'V1Counts_LocationFullName' : pd.StringDtype(),
'V1Counts_LocationCountryCode' : pd.StringDtype(),
'V1Counts_LocationADM1Code' : pd.StringDtype(),
'V1Counts_LocationFeatureID' : | pd.StringDtype() | pandas.StringDtype |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
scenario_filenames = ["OUTPUT_110011_20201117123025"]
scenario_labels =["Lockdown enabled,Self Isolation,Mask Compliance (0.5)"]
MAX_DAY = 250#250#120
POPULATION = 10000.0
FIGSIZE = [20,10]
plt.rcParams.update({'font.size': 22})
#### comparison of infections
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100*dfg["Infected_count"].values/POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Infected_count"])))+[MAX_DAY],list(100*dfg["Infected_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Infected (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/infected_count_comparison.png")
#### comparison of deaths
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100 * dfg["Death_count"].values / POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Death_count"])))+[MAX_DAY],list(100*dfg["Death_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Deceased (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/death_count_comparison.png")
#### comparison of recoveries
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = | pd.read_csv(simulation_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from pycytominer import aggregate, normalize
from pycytominer.cyto_utils import (
output,
check_compartments,
check_aggregate_operation,
infer_cp_features,
get_default_linking_cols,
get_default_compartments,
assert_linking_cols_complete,
provide_linking_cols_feature_name_update,
check_fields_of_view_format,
check_fields_of_view,
)
default_compartments = get_default_compartments()
default_linking_cols = get_default_linking_cols()
class SingleCells(object):
"""This is a class to interact with single cell morphological profiles. Interaction
includes aggregation, normalization, and output.
Attributes
----------
file_or_conn : str or pandas.core.frame.DataFrame
A file string or database connection storing the location of single cell profiles.
strata : list of str, default ["Metadata_Plate", "Metadata_Well"]
The columns to groupby and aggregate single cells.
aggregation_operation : str, default "median"
Operation to perform single cell aggregation.
output_file : str, default "none"
If specified, the location to write the file.
compartments : list of str, default ["cells", "cytoplasm", "nuclei"]
List of compartments to process.
compartment_linking_cols : dict, default noted below
Dictionary identifying how to merge columns across tables.
merge_cols : list of str, default ["TableNumber", "ImageNumber"]
Columns indicating how to merge image and compartment data.
image_cols : list of str, default ["TableNumber", "ImageNumber", "Metadata_Site"]
Columns to select from the image table.
feature: str or list of str, default "infer"
List of features that should be aggregated.
load_image_data : bool, default True
Whether or not the image data should be loaded into memory.
subsample_frac : float, default 1
The percentage of single cells to select (0 < subsample_frac <= 1).
subsample_n : str or int, default "all"
How many samples to subsample - do not specify both subsample_frac and subsample_n.
subsampling_random_state : str or int, default "none"
The random state to init subsample.
fields_of_view : list of int, str, default "all"
List of fields of view to aggregate.
object_feature : str, default "Metadata_ObjectNumber"
Object number feature.
Notes
-----
.. note::
the argument compartment_linking_cols is designed to work with CellProfiler output,
as curated by cytominer-database. The default is: {
"cytoplasm": {
"cells": "Cytoplasm_Parent_Cells",
"nuclei": "Cytoplasm_Parent_Nuclei",
},
"cells": {"cytoplasm": "ObjectNumber"},
"nuclei": {"cytoplasm": "ObjectNumber"},
}
"""
def __init__(
self,
file_or_conn,
strata=["Metadata_Plate", "Metadata_Well"],
aggregation_operation="median",
output_file="none",
compartments=default_compartments,
compartment_linking_cols=default_linking_cols,
merge_cols=["TableNumber", "ImageNumber"],
image_cols=["TableNumber", "ImageNumber", "Metadata_Site"],
features="infer",
load_image_data=True,
subsample_frac=1,
subsample_n="all",
subsampling_random_state="none",
fields_of_view="all",
fields_of_view_feature="Metadata_Site",
object_feature="Metadata_ObjectNumber",
):
"""Constructor method"""
# Check compartments specified
check_compartments(compartments)
# Check if correct operation is specified
aggregation_operation = check_aggregate_operation(aggregation_operation)
# Check that the subsample_frac is between 0 and 1
assert (
0 < subsample_frac and 1 >= subsample_frac
), "subsample_frac must be between 0 and 1"
self.file_or_conn = file_or_conn
self.strata = strata
self.load_image_data = load_image_data
self.aggregation_operation = aggregation_operation.lower()
self.output_file = output_file
self.merge_cols = merge_cols
self.image_cols = image_cols
self.features = features
self.subsample_frac = subsample_frac
self.subsample_n = subsample_n
self.subset_data_df = "none"
self.subsampling_random_state = subsampling_random_state
self.is_aggregated = False
self.is_subset_computed = False
self.compartments = compartments
self.compartment_linking_cols = compartment_linking_cols
self.fields_of_view_feature = fields_of_view_feature
self.object_feature = object_feature
# Confirm that the compartments and linking cols are formatted properly
assert_linking_cols_complete(
compartments=self.compartments, linking_cols=self.compartment_linking_cols
)
# Build a dictionary to update linking column feature names
self.linking_col_rename = provide_linking_cols_feature_name_update(
self.compartment_linking_cols
)
if self.subsample_n != "all":
self.set_subsample_n(self.subsample_n)
# Connect to sqlite engine
self.engine = create_engine(self.file_or_conn)
self.conn = self.engine.connect()
# Throw an error if both subsample_frac and subsample_n is set
self._check_subsampling()
# Confirm that the input fields of view is valid
self.fields_of_view = check_fields_of_view_format(fields_of_view)
if self.load_image_data:
self.load_image()
def _check_subsampling(self):
"""Internal method checking if subsampling options were specified correctly.
Returns
-------
None
Nothing is returned.
"""
# Check that the user didn't specify both subset frac and subsample all
assert (
self.subsample_frac == 1 or self.subsample_n == "all"
), "Do not set both subsample_frac and subsample_n"
def set_output_file(self, output_file):
"""Setting operation to conveniently rename output file.
Parameters
----------
output_file : str
New output file name.
Returns
-------
None
Nothing is returned.
"""
self.output_file = output_file
def set_subsample_frac(self, subsample_frac):
"""Setting operation to conveniently update the subsample fraction.
Parameters
----------
subsample_frac : float, default 1
Percentage of single cells to select (0 < subsample_frac <= 1).
Returns
-------
None
Nothing is returned.
"""
self.subsample_frac = subsample_frac
self._check_subsampling()
def set_subsample_n(self, subsample_n):
"""Setting operation to conveniently update the subsample n.
Parameters
----------
subsample_n : int, default "all"
Indicate how many sample to subsample - do not specify both subsample_frac and subsample_n.
Returns
-------
None
Nothing is returned.
"""
try:
self.subsample_n = int(subsample_n)
except ValueError:
raise ValueError("subsample n must be an integer or coercable")
self._check_subsampling()
def set_subsample_random_state(self, random_state):
"""Setting operation to conveniently update the subsample random state.
Parameters
----------
random_state: int, optional
The random state to init subsample.
Returns
-------
None
Nothing is returned.
"""
self.subsampling_random_state = random_state
def load_image(self):
"""Load image table from sqlite file
Returns
-------
None
Nothing is returned.
"""
# Extract image metadata
image_query = "select {} from image".format(
", ".join(np.union1d(self.image_cols, self.strata))
)
self.image_df = | pd.read_sql(sql=image_query, con=self.conn) | pandas.read_sql |
#Aug 2015
#compress CNV data for BRCA dataset for multiple isoforms of genes with different gene coordinates
#genes lost at this stage are those which appear in known common CNVs-removed in the 'no_cnv' files
import pandas as pd
import csv
#read CNV data
print('processing CNVs_genes file...')
#this will handle the file incorrectly if nothing is named in col 0, row 0. add "TCGA ID" manually if needed.
CNV_genes = pd.read_csv('../BRCA_CNVs_genes_foldchange.csv',index_col=0,dtype={'A1BG':str},header=0,skiprows=[1]) #,skipfooter=1,
CNV_genes.index.rename('TCGA_ID',inplace=True)
print('FILE READ IN:')
print(CNV_genes.head())
#read in candidates file for TN
cands = pd.read_csv('CNV_candidates_compressed.csv',header=0)
print(cands.Symbol.unique().shape[0], 'unique candidate genes') #12621
print(cands.Synonym.unique().shape[0], 'unique candidate genes by synonym') #8920
#check genes that actually have CNV data
sub1 = cands[cands.Symbol.isin(CNV_genes.columns.values)]
rest1 = cands[~(cands.Symbol.isin(CNV_genes.columns.values))]
sub1['CNV data exists'] = 'yes'
rest1['CNV data exists'] = 'no'
print(sub1.Symbol.unique().shape[0], 'candidate genes with CNV data') #12621
#remove those IDs marked as having duplicate CNV data
dupmask = (CNV_genes.iloc[:,0] == 'duplicates')
CNV_genes = CNV_genes[~dupmask]
#convert first column back to floats
CNV_genes.iloc[:,0] = CNV_genes.iloc[:,0].astype(float)
data = CNV_genes.iloc[:,0:-2]
#drop genes that are in known common CNVs
data = data.dropna(axis=1,how='all')
#a few genes get dropped here: ['AFG3L1' 'C16orf55' 'C16orf7' 'C1orf70' 'C8ORFK29' 'CACNA1B' 'CHMP2A'\n 'CN5H6.4' 'DIP2C' 'FAM138E' 'FAM157B' 'HEATR7A' 'HGC6.3' 'LOC646627'\n 'LOC90834' 'METRNL' 'MGC2752' 'MZF1' 'OR4F15' 'OR4F17' 'OR4F4' 'OR4F6'\n 'RPS5' 'SLC27A5' 'TARSL2' 'TM2D3' 'TRIM28' 'TUBB8' 'TUBBP5' 'UBE2M'\n 'WASH3P' 'ZBTB45' 'ZMYND11' 'ZNF132' 'ZNF324' 'ZNF324B' 'ZNF446' 'ZNF584'\n 'ZNF837' 'hsa-mir-1250' 'hsa-mir-1302-10' 'hsa-mir-1302-11' 'hsa-mir-200a'\n 'hsa-mir-3065' 'hsa-mir-3118-1' 'hsa-mir-3118-3' 'hsa-mir-3186'\n 'hsa-mir-338' 'hsa-mir-429' 'hsa-mir-571' 'hsa-mir-657']
print('DATA:')
print(data.head())
print(data.dtypes)
samples = | pd.DataFrame(CNV_genes.iloc[:,-2:]) | pandas.DataFrame |
# Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses as dc
import typing as tp
import datetime as dt
import decimal
import platform
import pyarrow as pa
import pyarrow.compute as pc
import pandas as pd
import tracdap.rt.metadata as _meta
import tracdap.rt.exceptions as _ex
import tracdap.rt.impl.util as _util
@dc.dataclass(frozen=True)
class DataSpec:
data_item: str
data_def: _meta.DataDefinition
storage_def: _meta.StorageDefinition
schema_def: tp.Optional[_meta.SchemaDefinition]
@dc.dataclass(frozen=True)
class DataPartKey:
@classmethod
def for_root(cls) -> DataPartKey:
return DataPartKey(opaque_key='part_root')
opaque_key: str
@dc.dataclass(frozen=True)
class DataItem:
schema: pa.Schema
table: tp.Optional[pa.Table] = None
batches: tp.Optional[tp.List[pa.RecordBatch]] = None
pandas: tp.Optional[pd.DataFrame] = None
pyspark: tp.Any = None
@dc.dataclass(frozen=True)
class DataView:
trac_schema: _meta.SchemaDefinition
arrow_schema: pa.Schema
parts: tp.Dict[DataPartKey, tp.List[DataItem]]
@staticmethod
def for_trac_schema(trac_schema: _meta.SchemaDefinition):
arrow_schema = DataMapping.trac_to_arrow_schema(trac_schema)
return DataView(trac_schema, arrow_schema, dict())
class _DataInternal:
@staticmethod
def float_dtype_check():
if "Float64Dtype" not in pd.__dict__:
raise _ex.EStartup("TRAC D.A.P. requires Pandas >= 1.2")
class DataMapping:
"""
Map primary data between different supported data frameworks, preserving equivalent data types.
DataMapping is for primary data, to map metadata types and values use
:py:class:`TypeMapping <tracdap.rt.impl.type_system.TypeMapping>` and
:py:class:`TypeMapping <tracdap.rt.impl.type_system.MetadataCodec>`.
"""
__log = _util.logger_for_namespace(_DataInternal.__module__ + ".DataMapping")
# Matches TRAC_ARROW_TYPE_MAPPING in ArrowSchema, tracdap-lib-data
__TRAC_DECIMAL_PRECISION = 38
__TRAC_DECIMAL_SCALE = 12
__TRAC_TIMESTAMP_UNIT = "ms"
__TRAC_TIMESTAMP_ZONE = None
__TRAC_TO_ARROW_BASIC_TYPE_MAPPING = {
_meta.BasicType.BOOLEAN: pa.bool_(),
_meta.BasicType.INTEGER: pa.int64(),
_meta.BasicType.FLOAT: pa.float64(),
_meta.BasicType.DECIMAL: pa.decimal128(__TRAC_DECIMAL_PRECISION, __TRAC_DECIMAL_SCALE),
_meta.BasicType.STRING: pa.utf8(),
_meta.BasicType.DATE: pa.date32(),
_meta.BasicType.DATETIME: pa.timestamp(__TRAC_TIMESTAMP_UNIT, __TRAC_TIMESTAMP_ZONE)
}
# Check the Pandas dtypes for handling floats are available before setting up the type mapping
__PANDAS_FLOAT_DTYPE_CHECK = _DataInternal.float_dtype_check()
__PANDAS_DATETIME_TYPE = pd.to_datetime([]).dtype
# Only partial mapping is possible, decimal and temporal dtypes cannot be mapped this way
__ARROW_TO_PANDAS_TYPE_MAPPING = {
pa.bool_(): pd.BooleanDtype(),
pa.int8(): pd.Int8Dtype(),
pa.int16(): pd.Int16Dtype(),
pa.int32(): pd.Int32Dtype(),
pa.int64(): pd.Int64Dtype(),
pa.uint8(): | pd.UInt8Dtype() | pandas.UInt8Dtype |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([ | Series(vals2) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Esta version de este codigo, saca los umbrales horarios y estacionalesde las reflectancias'
'en los pixeles seleccionados, cada 15 minutos porque se hace con el set de datos de GOES de'
'2018, debido a que es el mas completo y permitiría obtener los umbrales estacionalmente. La'
'versión antigua de este codigo que los sacaba cada 10 minutos para el horizonte del experi-'
'mento se aloja en la carpetade Backups_VersionesAtiguas_Codigos por si esnecesario volverlo'
'a consultar.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------HORAS SOBRE LAS CUALES TRABAJAR----------------------------- ##
HI = '06:00'; HF = '17:59'
#################################################################################################
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
#################################################################################################
df_P975 = | pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2]) | pandas.read_table |
"""
We made this file to create the datasets. By Reversed engineering, we knew how the datasets were made.
Below are the functions to create the datasets. These are called when the 'recreated_data == yes' parameter and if the
datasets are yet not recreated.
"""
import os
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
########################### GERMAN DATASET ###########################
def recreate_german_dataset():
file_path = os.path.join("recreated_data", "resources", "german.data")
data = pd.read_csv(file_path, delim_whitespace=True, header=None)
targets = data[data.columns[-1]] # TARGET labels
data = data.drop(20, axis=1) # drop targets before rescaling
## had to change the targets since the targets were [1,2]
targets = targets.replace({1:0, 2:1})
"""
Attribute 9 (in our dataset attribute and index 8, since we start at 0, which later becomes idx 0):
Personal status and sex
A91 : male : divorced/separated
A92 : female : divorced/separated/married
A93 : male : single
A94 : male : married/widowed
A95 : female : single
"""
## Sex attribute binary
data[8] = data[8].replace({"A91": 0, "A92": 1, "A93": 0, "A94": 0, "A95":1})
## Sensitive feature is sex - attribute 8, make that now index 0
sensitive_feature_idx = data.pop(8)
data.insert(0, 8, sensitive_feature_idx)
data = data.rename(columns={i:j for i,j in zip(data.columns, range(13))})
# One-hot encode all categorical variables
str_columns = []
not_str = []
for i in data.columns:
if type(data[i][0]) == str:
str_columns.append(i)
else:
not_str.append(i)
dummies = pd.get_dummies(data[str_columns])
data = pd.concat([data[not_str], dummies], axis=1, join='inner')
# First rescale to mean = 0 and std = 1, before adding targets to df (otherwise targets would be rescaled as well)
for i in data.columns:
data[i] = preprocessing.scale(data[i])
dataset = pd.concat([data, targets], axis=1, join='inner')
# Thereafter reshuffle whole dataframe
dataset = dataset.sample(frac=1, random_state=1).reset_index(drop=True)
group_label = dataset.iloc[:, -1:].to_numpy()
group_label = np.array([i[0] for i in group_label])
# Split dataframe in 80-20%
train, test = train_test_split(dataset, test_size=0.2, random_state=42)
# At last make x and y
X_train = train.iloc[:, :-1].to_numpy() # exclude targets
X_test = test.iloc[:, :-1].to_numpy()
y_train = train.iloc[:, -1:].to_numpy() # targets only
y_train = np.array([i[0] for i in y_train])
y_test = test.iloc[:, -1:].to_numpy() # targets only
y_test = np.array([i[0] for i in y_test])
# Just a check
# print(len(X_train), len(X_test), len(y_train), len(y_test), len(group_label) == len(y_train) + len(y_test))
np.savez(os.path.join("recreated_data", "data.npz"), X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test)
np.savez(os.path.join("recreated_data", "german_group_label.npz"), group_label=group_label)
######################################################################
########################### COMPAS DATASET ###########################
def recreate_compas_dataset():
data = pd.read_csv(os.path.join("recreated_data", "resources", "compas-scores-two-years.csv"))
targets = data[data.columns[-1]]
# Used columns as specified in the paper
used_cols = ["sex", "juv_fel_count", "priors_count", "race", "age_cat",
"juv_misd_count", "c_charge_degree", "juv_other_count"]
data = data[used_cols]
# Manually change the values male to 0 and female to 1
data["sex"] = data["sex"].replace({"Male":0, "Female":1})
str_columns = [i for i in data.columns if type(data[i][0]) == str]
not_str = [i for i in data.columns if type(data[i][0]) != str]
dummies = pd.get_dummies(data[str_columns])
data = pd.concat([data[not_str], dummies], axis=1, join='inner')
# First rescale to mean = 0 and std = 1, before adding targets to df (otherwise targets would be rescaled as well)
for i in data.columns:
data[i] = preprocessing.scale(data[i])
# print("Column specifications (as on website):", [i for i in data.columns])
dataset = | pd.concat([data, targets], axis=1, join='inner') | pandas.concat |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Model, Parameters, minimize, report_fit
from scipy.optimize import curve_fit
from scipy import stats
from utilities.statistical_tests import r_squared_calculator
from GEN_Utils import FileHandling
from loguru import logger
logger.info('Import ok')
# Define the fitting functions
def sigmoid(x, bottom, top, X50):
return bottom + ((top - bottom) / (1 + np.exp((X50 - x))))
def boltzmann(x, bottom, top, V50, slope):
return bottom + ((top - bottom) / (1 + np.exp((V50 - x)/slope)))
def denaturant(urea, top, bottom, cM, m):
# adapted from https://en.wikipedia.org/wiki/Equilibrium_unfolding, by keeping terms for bottom as in Savitski, subbing deltaG into standard equation, and reintroducing bottom term as per boltzmann
temp_constant = 298.15
gas_constant = 8.31446261815324
constant = temp_constant * gas_constant
y = bottom + ((top - bottom) / (1 + np.exp((m*(cM-urea)/constant))))
# deltaG can then be calculated as m(cM-urea) - generally calculated at 0M urea therefore m(cM)
return y
def denaturant_fit(compiled, info_cols, quant_cols):
"""Attempts to fit a sigmoid to each row. Returns sample_params dict where keys are sequences"""
fit_params = {}
for info, quant_data in compiled.set_index(info_cols).iterrows():
# extract x and y vals for fitting
y_vals = np.array(list(quant_data[quant_cols]))
x_vals = np.array([float(x) for x in quant_cols])
# Attempt fitting
try:
model = Model(denaturant)
params = model.make_params(
bottom=-1, top=1, cM=3, m=-10000)
result = model.fit(y_vals, params, urea=x_vals)
r_squared = r_squared_calculator(
x_vals, y_vals, denaturant, result.values.values())
# Collect fitted parameters
fit_stats = pd.DataFrame()
for parameter, details in result.params.items():
fit_stats[f'{parameter}_value'] = [details.value]
fit_stats[f'{parameter}_stderr'] = [details.stderr]
fit_stats[f'{parameter}_relerr'] = fit_stats[f'{parameter}_stderr'].values[0] / \
fit_stats[f'{parameter}_value'].values[0] * 100
# add r-squared value, key info
fit_stats['r_squared'] = r_squared
fit_stats['key'] = [info]
fit_params[info] = fit_stats
except:
logger.info(f'No fit found for {info}')
return fit_params
def sigmoid_filter(summary, filter_R2=True, filter_range=True, filter_cM=True, filter_relerr=True, filter_direction=True):
# apply filtering criteria
filtered = summary.copy()
if filter_R2:
# Remove R2 < filter threshold
filtered['filter_R2'] = [1 if R2 > 0.75 else 0 for R2 in filtered['r_squared']]
logger.info(f"R2 filter: {filtered['filter_R2'].sum()}")
if filter_range:
# Remove top/bottom outside range - threshold = 10?
filtered = filtered[(abs(filtered['top_value']) < 10) & (abs(filtered['bottom_value']) < 10)]
filtered['filter_range'] = [1 if (abs(val_1) < 10) & (abs(val_2) < 10) else 0 for val_1, val_2 in filtered[['top_value', 'bottom_value']].values]
logger.info(f"Range filter: {filtered['filter_range'].sum()}")
if filter_cM:
# Remove cM outside range tested
filtered['filter_cM'] = [1 if (val < 6) & (val > 0) else 0 for val in filtered['cM_value']]
logger.info(f"cM filter: {filtered['filter_cM'].sum()}")
if filter_relerr:
# Remove fits with > 50% uncertainty in cM fit
filtered['filter_relerr'] = [1 if val < 50 else 0 for val in filtered['cM_relerr']]
logger.info(f"Relative cM error: {filtered['filter_relerr'].sum()}")
if filter_direction:
# Remove sigmoids that trend upward
filtered['filter_direction'] = [1 if val_0 > val_6 else 0 for val_0, val_6 in zip(filtered['0M_value'], filtered['6M_value'])]
logger.info(f"Sigmoid direction: {filtered['filter_direction'].sum()}")
filter_cols = [col for col in filtered.columns.tolist() if 'filter_' in str(col)]
filtered['filter_count'] = filtered[filter_cols].sum(axis=1)
filtered['filter_all'] = [1 if num == len(filter_cols) else 0 for num in filtered['filter_count']]
logger.info(f"All filters: {filtered['filter_all'].sum()}")
# add filtering info to original df
summary['filtered'] = filtered['filter_all']
return summary, filtered
if __name__ == '__main__':
filter_cols = []
input_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
output_folder = f'results/lysate_denaturation/sigmoid_fitting/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Read in cluster data
clusters_summary = pd.read_excel(input_path, sheet_name=None)
cluster_number = clusters_summary['summary']['cluster'].max()
clusters = clusters_summary['clustered'].copy()
clusters.drop([col for col in clusters.columns.tolist() if 'Unnamed: ' in str(col)], axis=1, inplace=True)
info_cols = ['Sequence', 'Proteins', 'PC1', 'PC2', f'score_{cluster_number}', f'member_{cluster_number}']
quant_cols = [col for col in clusters.columns.tolist() if type(col) == float]
clusters = clusters[info_cols+quant_cols].rename(columns={f'member_{cluster_number}': 'cluster', f'score_{cluster_number}': 'score'})
info_cols = ['Sequence', 'Proteins', 'PC1', 'PC2', 'cluster', 'score']
# complete denaturant fit
fit_params = denaturant_fit(clusters, info_cols=info_cols, quant_cols=quant_cols)
fitting_parameters = pd.concat(fit_params.values()).reset_index(drop=True)
# add back useful info
fitting_parameters[info_cols] = pd.DataFrame(fitting_parameters['key'].tolist(), index=fitting_parameters.index)
summary = pd.merge(clusters, fitting_parameters, on=info_cols, how='inner')
# generate "fitted" results
sigmoid_fitted_vals = {}
for sequence, df in summary.iterrows():
# generate fitted values
(bottom, top, cM, m, r_squared, cluster, protein, sequence) = tuple(df[['bottom_value', 'top_value', 'cM_value', 'm_value', 'r_squared', 'cluster', 'Proteins', 'Sequence']])
y_vals = denaturant(np.array(quant_cols), top, bottom, cM, m)
sigmoid_fitted_vals[sequence] = y_vals
sigmoid_fitted_vals = | pd.DataFrame(sigmoid_fitted_vals) | pandas.DataFrame |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons Public API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import datetime
import json
from itertools import product
from . import _auth
import pandas as pd
_PLACES = ('City', 'County', 'State', 'Country', 'Continent')
_CLIENT_ID = ('66054275879-a0nalqfe2p9shlv4jpra5jekfkfnr8ug.apps.googleusercontent.com')
_CLIENT_SECRET = '<KEY>'
_API_ROOT = 'https://datcom-api.appspot.com'
_MICRO_SECONDS = 1000000
_EPOCH_START = datetime.datetime(year=1970, month=1, day=1)
def _year_epoch_micros(year):
"""Get the timestamp of the start of a year in micro seconds.
Args:
year: An integer number of the year.
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime(year=year, month=1, day=1)
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
def _date_epoch_micros(date_string):
"""Get the timestamp of the date string in micro seconds.
Args:
date_string: An string of date
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime.strptime(date_string, '%Y-%m-%d')
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
class Client(object):
"""Provides Data Commons API."""
def __init__(self,
client_id=_CLIENT_ID,
client_secret=_CLIENT_SECRET,
api_root=_API_ROOT):
self._service = _auth.do_auth(client_id, client_secret, api_root)
response = self._service.get_prop_type(body={}).execute()
self._prop_type = defaultdict(dict)
self._inv_prop_type = defaultdict(dict)
for t in response.get('type_info', []):
self._prop_type[t['node_type']][t['prop_name']] = t['prop_type']
if t['prop_type'] != 'Text':
self._inv_prop_type[t['prop_type']][t['prop_name']] = t['node_type']
self._inited = True
def query(self, datalog_query, max_rows=100):
"""Performs a query returns results as a table.
Args:
datalog_query: string representing datalog query in [TODO(shanth): link]
max_rows: max number of returned rows.
Returns:
A pandas.DataFrame with the selected variables in the query as the
the column names. If the query returns multiple values for a property then
the result is flattened into multiple rows.
Raises:
RuntimeError: some problem with executing query (hint in the string)
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
try:
response = self._service.query(body={
'query': datalog_query,
'options': {
'row_count_limit': max_rows
}
}).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to execute query: %s' % e)
header = response.get('header', [])
rows = response.get('rows', [])
result_dict = {header: [] for header in header}
for row in rows:
cells = row.get('cells', [])
if len(cells) != len(header):
raise RuntimeError(
'Response #cells mismatches #header: {}'.format(response))
cell_values = []
for key, cell in zip(header, cells):
if not cell:
cell_values.append([''])
else:
try:
cell_values.append(cell['value'])
except KeyError:
raise RuntimeError('No value in cell: {}'.format(row))
# Iterate through the cartesian product to flatten the query results.
for values in product(*cell_values):
for idx, key in enumerate(header):
result_dict[key].append(values[idx])
return pd.DataFrame(result_dict)[header]
def expand(self,
pd_table,
arc_name,
seed_col_name,
new_col_name,
outgoing=True,
max_rows=100):
"""Create a new column with values for the given property.
The existing pandas dataframe should include a column containing entity IDs
for a certain schema.org type. This function populates a new column with
property values for the entities and adds additional rows if a property has
repeated values.
Args:
pd_table: Pandas dataframe that contains entity information.
arc_name: The property to add to the table.
seed_col_name: The column name that contains entity (ids) that the added
properties belong to.
new_col_name: New column name.
outgoing: Set this flag if the property points away from the entities
denoted by the seed column.
max_rows: The maximum number of rows returned by the query results.
Returns:
A pandas.DataFrame with the additional column and rows added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type != 'Text', 'Parent entity should not be Text'
# Determine the new column type
if outgoing:
if arc_name not in self._prop_type[seed_col_type]:
raise ValueError(
'%s does not have outgoing property %s' % (seed_col_type, arc_name))
new_col_type = self._prop_type[seed_col_type][arc_name]
else:
if arc_name not in self._inv_prop_type[seed_col_type]:
raise ValueError(
'%s does not have incoming property %s' % (seed_col_type, arc_name))
new_col_type = self._inv_prop_type[seed_col_type][arc_name]
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
# All entries in the seed column are empty strings. The new column should
# contain no entries.
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = new_col_type
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
if outgoing:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?node ?{new_col_var}').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
else:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?{new_col_var} ?node').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
new_col_type,
max_rows=max_rows)
# ----------------------- OBSERVATION QUERY FUNCTIONS -----------------------
def get_instances(self, col_name, instance_type, max_rows=100):
"""Get a list of instance dcids for a given type.
Args:
col_name: Column name for the returned column.
instance_type: String of the instance type.
max_rows: Max number of returend rows.
Returns:
A pandas.DataFrame with instance dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{col_name},'
'typeOf ?node {instance_type},'
'dcid ?node ?{col_name}').format(
col_name=col_name, instance_type=instance_type)
type_row = pd.DataFrame(data=[{col_name: instance_type}])
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query\n%s\ngot an error:\n%s' % (query, e))
return pd.concat([type_row, dcid_column], ignore_index=True)
def get_populations(self,
pd_table,
seed_col_name,
new_col_name,
population_type,
max_rows=100,
**kwargs):
"""Create a new column with population dcid.
The existing pandas dataframe should include a column containing entity IDs
for geo entities. This function populates a new column with
population dcid corresponding to the geo entity.
Args:
pd_table: Pandas dataframe that contains geo entity dcids.
seed_col_name: The column name that contains entity (ids) that the added
properties belong to.
new_col_name: New column name.
population_type: Population type like "Person".
max_rows: The maximum number of rows returned by the query results.
**kwargs: keyword properties to define the population.
Returns:
A pandas.DataFrame with an additional column added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type != 'Text', 'Parent entity should not be Text'
# Create the datalog query for the requested observations
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = 'Population'
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'typeOf ?pop Population,'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'location ?pop ?node,'
'dcid ?pop ?{new_col_var},'
'populationType ?pop {population_type},').format(
new_col_var=new_col_var,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
dcids=dcids,
population_type=population_type)
pv_pairs = sorted(kwargs.items())
idx = 0
for idx, pv in enumerate(pv_pairs, 1):
query += 'p{} ?pop {},'.format(idx, pv[0])
query += 'v{} ?pop {},'.format(idx, pv[1])
query += 'numConstraints ?pop {}'.format(idx)
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
'Population',
max_rows=max_rows)
def get_observations(self,
pd_table,
seed_col_name,
new_col_name,
start_date,
end_date,
measured_property,
stats_type,
max_rows=100):
"""Create a new column with values for an observation of the given property.
The existing pandas dataframe should include a column containing entity IDs
for a certain schema.org type. This function populates a new column with
property values for the entities.
Args:
pd_table: Pandas dataframe that contains entity information.
seed_col_name: The column that contains the population dcid.
new_col_name: New column name.
start_date: The start date of the observation (in 'YYY-mm-dd' form).
end_date: The end date of the observation (in 'YYY-mm-dd' form).
measured_property: observation measured property.
stats_type: Statistical type like "Median"
max_rows: The maximum number of rows returned by the query results.
Returns:
A pandas.DataFrame with an additional column added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type == 'Population' or seed_col_type == 'City', (
'Parent entity should be Population' or 'City')
# Create the datalog query for the requested observations
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = 'Observation'
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?pop {seed_col_type},'
'typeOf ?o Observation,'
'dcid ?pop {dcids},'
'dcid ?pop ?{seed_col_var},'
'observedNode ?o ?pop,'
'startTime ?o {start_time},'
'endTime ?o {end_time},'
'measuredProperty ?o {measured_property},'
'{stats_type}Value ?o ?{new_col_var},').format(
seed_col_type=seed_col_type,
new_col_var=new_col_var,
seed_col_var=seed_col_var,
dcids=dcids,
measured_property=measured_property,
stats_type=stats_type,
start_time=_date_epoch_micros(start_date),
end_time=_date_epoch_micros(end_date))
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
'Observation',
max_rows=max_rows)
# -------------------------- CACHING FUNCTIONS --------------------------
def read_dataframe(self, file_name):
"""Read a previously saved pandas dataframe.
User can only read previously saved data file with the same authentication
email.
Args:
file_name: The saved file name.
Returns:
A pandas dataframe.
Raises:
RuntimeError: when failed to read the dataframe.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
try:
response = self._service.read_dataframe(file_name=file_name).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to read dataframe: {}'.format(e))
return pd.read_json(json.loads(response['data']), dtype=False)
def save_dataframe(self, pd_dataframe, file_name):
"""Saves pandas dataframe for later retrieving.
Each aunthentication email has its own scope for saved dataframe. Write
with same file_name overwrites previously saved dataframe.
Args:
pd_dataframe: A pandas.DataFrame.
file_name: The saved file name.
Raises:
RuntimeError: when failed to save the dataframe.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
data = json.dumps(pd_dataframe.to_json())
try:
response = self._service.save_dataframe(body={
'data': data,
'file_name': file_name
}).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to save dataframe: {}'.format(e))
return response['file_name']
# -------------------------- OTHER QUERY FUNCTIONS --------------------------
def get_cities(self, state, new_col_name, max_rows=100):
"""Get a list of city dcids in a given state.
Args:
state: Name of the state name.
new_col_name: Column name for the returned city column.
max_rows: Max number of returend rows.
Returns:
A pandas.DataFrame with city dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{new_col_name},'
'typeOf ?node City,'
'dcid ?node ?{new_col_name},'
'containedInPlace ?node ?county,'
'containedInPlace ?county ?state,'
'name ?state "{state}"').format(
new_col_name=new_col_name, state=state)
type_row = pd.DataFrame(data=[{new_col_name: 'City'}])
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query\n%s\ngot an error:\n%s' % (query, e))
return pd.concat([type_row, dcid_column], ignore_index=True)
def get_states(self, country, new_col_name, max_rows=100):
"""Get a list of state dcids.
Args:
country: A string of the country states contained in.
new_col_name: Column name for the returned state column.
max_rows: max number of returend results.
Returns:
A pandas.DataFrame with state dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{new_col_name},'
'typeOf ?node State,'
'dcid ?node ?{new_col_name},'
'containedInPlace ?node ?country,'
'name ?country "{country}"').format(
new_col_name=new_col_name, country=country)
type_row = pd.DataFrame(data=[{new_col_name: 'State'}])
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query %s got an error:\n%s' % (query, e))
return pd.concat([type_row, dcid_column], ignore_index=True)
def get_places_in(self, place_type, container_dcid, col_name, max_rows=100):
"""Get a list of places that are contained in a higher level geo places.
Args:
place_type: The place type, like "City".
container_dcid: The dcid of the container place.
col_name: Column name for the returned state column.
max_rows: max number of returend results.
Returns:
A pandas.DataFrame with dcids of the contained place.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
assert place_type in _PLACES, 'Input place types are not supported'
# Get the type of the container place.
type_query = 'SELECT ?type, dcid ?node {dcid}, subType ?node ?type'.format(
dcid=container_dcid)
query_result = self.query(type_query)
assert query_result['type'].count() == 1, (
'Type of the container dcid not found')
container_type = query_result['type'][0]
# Sanity check the type information.
place_type_ind = _PLACES.index(place_type)
container_type_ind = _PLACES.index(container_type)
assert container_type_ind > place_type_ind, (
'Requested place type should be of a lower level than the container')
# Do the actual query.
query = ('SELECT ?{col_name},'
'typeOf ?node_{place_type} {place_type},'
'dcid ?node_{place_type} ?{col_name},').format(
col_name=col_name,
place_type=place_type)
for i in range(place_type_ind, container_type_ind):
query += 'containedInPlace ?node_{child} ?node_{parent},'.format(
child=_PLACES[i], parent=_PLACES[i+1])
query += 'dcid ?node_{container_type} "{container_dcid}"'.format(
container_type=container_type, container_dcid=container_dcid)
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query %s got an error:\n%s' % (query, e))
type_row = | pd.DataFrame(data=[{col_name: place_type}]) | pandas.DataFrame |
import os
import sys
import tensorflow as tf
import random
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import config as cf
class UTILS(object):
######################################
# load all data files #
######################################
def __init__(self, batch_size = 32, dim_sentence = 80, dim_aspect = 3, isSample = True):
# data file paths
self.dataPath = cf.ROOT_PATH + cf.DATA_PATH
if isSample:
self.trainPath = self.dataPath + 'rest_train_sample.csv'
self.testPath = self.dataPath + 'rest_test_sample.csv'
self.trainEncodePath = self.dataPath + 'train_sample.npy'
self.testEncodePath = self.dataPath + 'test_sample.npy'
else:
self.trainPath = self.dataPath + 'rest_train_2014_processed.csv'
self.testPath = self.dataPath + 'rest_test_2014_processed.csv'
self.trainEncodePath = self.dataPath + 'train.npy'
self.testEncodePath = self.dataPath + 'test.npy'
self.glovePath = self.dataPath + 'glove.npy'
# hyperparameters of model
self.batch_size = batch_size
self.dim_sentence = dim_sentence
self.dim_aspect = dim_aspect
self.loadData()
# loading all data
def loadData(self):
self.trainData = pd.read_csv(self.trainPath)
self.testData = | pd.read_csv(self.testPath) | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.